Merge branch 'master' of git://git.denx.de/u-boot
[oweals/u-boot.git] / .azure-pipelines.yml
index 6c6b24e49848e064d54a6676e9462956b54f1a32..5d9645451d47385beb7808bf52b9ee7a74fc3499 100644 (file)
@@ -1,5 +1,12 @@
 variables:
-  windows_vm: vs2015-win2012r2
+  windows_vm: vs2017-win2016
+  ubuntu_vm: ubuntu-18.04
+  ci_runner_image: trini/u-boot-gitlab-ci-runner:bionic-20200403-27Apr2020
+  # Add '-u 0' options for Azure pipelines, otherwise we get "permission
+  # denied" error when it tries to "useradd -m -u 1001 vsts_azpcontainer",
+  # since our $(ci_runner_image) user is not root.
+  container_option: -u 0
+  work_dir: /u
 
 jobs:
   - job: tools_only_windows
@@ -36,3 +43,375 @@ jobs:
           MSYSTEM: MSYS
           # Tell MSYS2 not to ‘cd’ our startup directory to HOME
           CHERE_INVOKING: yes
+
+  - job: cppcheck
+    displayName: 'Static code analysis with cppcheck'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: cppcheck -j$(nproc) --force --quiet --inline-suppr .
+
+  - job: htmldocs
+    displayName: 'Build HTML documentation'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: make htmldocs
+
+  - job: todo
+    displayName: 'Search for TODO within source tree'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: grep -r TODO .
+      - script: grep -r FIXME .
+      - script: grep -r HACK . | grep -v HACKKIT
+
+  - job: sloccount
+    displayName: 'Some statistics about the code base'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: sloccount .
+
+  - job: maintainers
+    displayName: 'Ensure all configs have MAINTAINERS entries'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: |
+          if [ `./tools/genboardscfg.py -f 2>&1 | wc -l` -ne 0 ]; then exit 1; fi
+
+  - job: tools_only
+    displayName: 'Ensure host tools build'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: |
+          make tools-only_config tools-only -j$(nproc)
+
+  - job: envtools
+    displayName: 'Ensure env tools build'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: |
+          make tools-only_config envtools -j$(nproc)
+
+  - job: utils
+    displayName: 'Run binman, buildman, dtoc, Kconfig and patman testsuites'
+    pool:
+      vmImage: $(ubuntu_vm)
+    steps:
+      - script: |
+          cat << EOF > build.sh
+          set -ex
+          cd ${WORK_DIR}
+          EOF
+          cat << "EOF" >> build.sh
+          git config --global user.name "Azure Pipelines"
+          git config --global user.email bmeng.cn@gmail.com
+          export USER=azure
+          virtualenv -p /usr/bin/python3 /tmp/venv
+          . /tmp/venv/bin/activate
+          pip install pyelftools pytest
+          export UBOOT_TRAVIS_BUILD_DIR=/tmp/sandbox_spl
+          export PYTHONPATH=${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc/pylibfdt
+          export PATH=${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc:${PATH}
+          ./tools/buildman/buildman -o ${UBOOT_TRAVIS_BUILD_DIR} -w sandbox_spl
+          ./tools/binman/binman --toolpath ${UBOOT_TRAVIS_BUILD_DIR}/tools test
+          ./tools/buildman/buildman -t
+          ./tools/dtoc/dtoc -t
+          ./tools/patman/patman --test
+          make O=${UBOOT_TRAVIS_BUILD_DIR} testconfig
+          EOF
+          cat build.sh
+          # We cannot use "container" like other jobs above, as buildman
+          # seems to hang forever with pre-configured "container" environment
+          docker run -v $PWD:$(work_dir) $(ci_runner_image) /bin/bash $(work_dir)/build.sh
+
+  - job: test_py
+    displayName: 'test.py'
+    pool:
+      vmImage: $(ubuntu_vm)
+    strategy:
+      matrix:
+        sandbox:
+          TEST_PY_BD: "sandbox"
+        sandbox_clang:
+          TEST_PY_BD: "sandbox"
+          OVERRIDE: "-O clang-10"
+        sandbox_spl:
+          TEST_PY_BD: "sandbox_spl"
+          TEST_PY_TEST_SPEC: "test_ofplatdata or test_handoff"
+        sandbox_flattree:
+          TEST_PY_BD: "sandbox_flattree"
+        evb_ast2500:
+          TEST_PY_BD: "evb-ast2500"
+          TEST_PY_ID: "--id qemu"
+        vexpress_ca15_tc2:
+          TEST_PY_BD: "vexpress_ca15_tc2"
+          TEST_PY_ID: "--id qemu"
+        vexpress_ca9x4:
+          TEST_PY_BD: "vexpress_ca9x4"
+          TEST_PY_ID: "--id qemu"
+        integratorcp_cm926ejs:
+          TEST_PY_BD: "integratorcp_cm926ejs"
+          TEST_PY_ID: "--id qemu"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_arm:
+          TEST_PY_BD: "qemu_arm"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_arm64:
+          TEST_PY_BD: "qemu_arm64"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_mips:
+          TEST_PY_BD: "qemu_mips"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_mipsel:
+          TEST_PY_BD: "qemu_mipsel"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_mips64:
+          TEST_PY_BD: "qemu_mips64"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_mips64el:
+          TEST_PY_BD: "qemu_mips64el"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_ppce500:
+          TEST_PY_BD: "qemu-ppce500"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_riscv32:
+          TEST_PY_BD: "qemu-riscv32"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_riscv64:
+          TEST_PY_BD: "qemu-riscv64"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_riscv32_spl:
+          TEST_PY_BD: "qemu-riscv32_spl"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_riscv64_spl:
+          TEST_PY_BD: "qemu-riscv64_spl"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_x86:
+          TEST_PY_BD: "qemu-x86"
+          TEST_PY_TEST_SPEC: "not sleep"
+        qemu_x86_64:
+          TEST_PY_BD: "qemu-x86_64"
+          TEST_PY_TEST_SPEC: "not sleep"
+        xilinx_zynq_virt:
+          TEST_PY_BD: "xilinx_zynq_virt"
+          TEST_PY_ID: "--id qemu"
+          TEST_PY_TEST_SPEC: "not sleep"
+        xilinx_versal_virt:
+          TEST_PY_BD: "xilinx_versal_virt"
+          TEST_PY_ID: "--id qemu"
+          TEST_PY_TEST_SPEC: "not sleep"
+        xtfpga:
+          TEST_PY_BD: "xtfpga"
+          TEST_PY_ID: "--id qemu"
+          TEST_PY_TEST_SPEC: "not sleep"
+    steps:
+      - script: |
+          cat << EOF > test.sh
+          set -ex
+          # make environment variables available as tests are running inside a container
+          export WORK_DIR="${WORK_DIR}"
+          export TEST_PY_BD="${TEST_PY_BD}"
+          export TEST_PY_ID="${TEST_PY_ID}"
+          export TEST_PY_TEST_SPEC="${TEST_PY_TEST_SPEC}"
+          export OVERRIDE="${OVERRIDE}"
+          EOF
+          cat << "EOF" >> test.sh
+          # the below corresponds to .gitlab-ci.yml "before_script"
+          cd ${WORK_DIR}
+          git clone --depth=1 git://github.com/swarren/uboot-test-hooks.git /tmp/uboot-test-hooks
+          ln -s travis-ci /tmp/uboot-test-hooks/bin/`hostname`
+          ln -s travis-ci /tmp/uboot-test-hooks/py/`hostname`
+          grub-mkimage --prefix=\"\" -o ~/grub_x86.efi -O i386-efi normal  echo lsefimmap lsefi lsefisystab efinet tftp minicmd
+          grub-mkimage --prefix=\"\" -o ~/grub_x64.efi -O x86_64-efi normal  echo lsefimmap lsefi lsefisystab efinet tftp minicmd
+          cp /opt/grub/grubriscv64.efi ~/grub_riscv64.efi
+          cp /opt/grub/grubriscv32.efi ~/grub_riscv32.efi
+          cp /opt/grub/grubaa64.efi ~/grub_arm64.efi
+          cp /opt/grub/grubarm.efi ~/grub_arm.efi
+          if [[ "${TEST_PY_BD}" == "qemu-riscv32_spl" ]]; then
+              wget -O - https://github.com/riscv/opensbi/releases/download/v0.6/opensbi-0.6-rv32-bin.tar.xz | tar -C /tmp -xJ;
+              export OPENSBI=/tmp/opensbi-0.6-rv32-bin/platform/qemu/virt/firmware/fw_dynamic.bin;
+          fi
+          if [[ "${TEST_PY_BD}" == "qemu-riscv64_spl" ]]; then
+              wget -O - https://github.com/riscv/opensbi/releases/download/v0.6/opensbi-0.6-rv64-bin.tar.xz | tar -C /tmp -xJ;
+              export OPENSBI=/tmp/opensbi-0.6-rv64-bin/platform/qemu/virt/firmware/fw_dynamic.bin;
+          fi
+          # the below corresponds to .gitlab-ci.yml "script"
+          cd ${WORK_DIR}
+          export UBOOT_TRAVIS_BUILD_DIR=/tmp/${TEST_PY_BD};
+          tools/buildman/buildman -o ${UBOOT_TRAVIS_BUILD_DIR} -w -E -W -e --board ${TEST_PY_BD} ${OVERRIDE}
+          virtualenv -p /usr/bin/python3 /tmp/venv
+          . /tmp/venv/bin/activate
+          pip install -r test/py/requirements.txt
+          export PATH=/opt/qemu/bin:/tmp/uboot-test-hooks/bin:${PATH};
+          export PYTHONPATH=/tmp/uboot-test-hooks/py/travis-ci;
+          # "${var:+"-k $var"}" expands to "" if $var is empty, "-k $var" if not
+          ./test/py/test.py --bd ${TEST_PY_BD} ${TEST_PY_ID} ${TEST_PY_TEST_SPEC:+"-k ${TEST_PY_TEST_SPEC}"} --build-dir "$UBOOT_TRAVIS_BUILD_DIR";
+          # the below corresponds to .gitlab-ci.yml "after_script"
+          rm -rf /tmp/uboot-test-hooks /tmp/venv
+          EOF
+          cat test.sh
+          # make current directory writeable to uboot user inside the container
+          # as sandbox testing need create files like spi flash images, etc.
+          # (TODO: clean up this in the future)
+          chmod 777 .
+          docker run -v $PWD:$(work_dir) $(ci_runner_image) /bin/bash $(work_dir)/test.sh
+
+  - job: build_the_world
+    displayName: 'Build the World'
+    pool:
+      vmImage: $(ubuntu_vm)
+    strategy:
+      # Use almost the same target division in .travis.yml, only merged
+      # 4 small build jobs (arc/microblaze/nds32/xtensa) into one.
+      matrix:
+        arc_microblaze_nds32_xtensa:
+          BUILDMAN: "arc microblaze nds32 xtensa"
+        arm11_arm7_arm920t_arm946es:
+          BUILDMAN: "arm11 arm7 arm920t arm946es"
+        arm926ejs:
+          BUILDMAN: "arm926ejs -x freescale,siemens,at91,kirkwood,spear,omap"
+        at91_non_armv7:
+          BUILDMAN: "at91 -x armv7"
+        at91_non_arm926ejs:
+          BUILDMAN: "at91 -x arm926ejs"
+        boundary_engicam_toradex:
+          BUILDMAN: "boundary engicam toradex"
+        arm_bcm:
+          BUILDMAN: "bcm -x mips"
+        nxp_arm32:
+          BUILDMAN: "freescale -x powerpc,m68k,aarch64,ls101,ls102,ls104,ls108,ls20,lx216"
+        nxp_ls101x:
+          BUILDMAN: "freescale&ls101"
+        nxp_ls102x:
+          BUILDMAN: "freescale&ls102"
+        nxp_ls104x:
+          BUILDMAN: "freescale&ls104"
+        nxp_ls108x:
+          BUILDMAN: "freescale&ls108"
+        nxp_ls20xx:
+          BUILDMAN: "freescale&ls20"
+        nxp_lx216x:
+          BUILDMAN: "freescale&lx216"
+        imx6:
+          BUILDMAN: "mx6 -x boundary,engicam,freescale,technexion,toradex"
+        imx:
+          BUILDMAN: "mx -x mx6,freescale,technexion,toradex"
+        keystone2_keystone3:
+          BUILDMAN: "k2 k3"
+        samsung_socfpga:
+          BUILDMAN: "samsung socfpga"
+        spear:
+          BUILDMAN: "spear"
+        sun4i:
+          BUILDMAN: "sun4i"
+        sun5i:
+          BUILDMAN: "sun5i"
+        sun6i:
+          BUILDMAN: "sun6i"
+        sun7i:
+          BUILDMAN: "sun7i"
+        sun8i_32bit:
+          BUILDMAN: "sun8i&armv7"
+        sun8i_64bit:
+          BUILDMAN: "sun8i&aarch64"
+        sun9i:
+          BUILDMAN: "sun9i"
+        sun50i:
+          BUILDMAN: "sun50i"
+        arm_catch_all:
+          BUILDMAN: "arm -x arm11,arm7,arm9,aarch64,at91,bcm,freescale,kirkwood,mvebu,siemens,tegra,uniphier,mx,samsung,sunxi,am33xx,omap,rk,toradex,socfpga,k2,k3,zynq"
+        sandbox_x86:
+          BUILDMAN: "sandbox x86"
+        technexion:
+          BUILDMAN: "technexion"
+        kirkwood:
+          BUILDMAN: "kirkwood"
+        mvebu:
+          BUILDMAN: "mvebu"
+        m68k:
+          BUILDMAN: "m68k"
+        mips:
+          BUILDMAN: "mips"
+        non_fsl_ppc:
+          BUILDMAN: "powerpc -x freescale"
+        mpc85xx_freescale:
+          BUILDMAN: "mpc85xx&freescale -x t208xrdb -x t4qds -x t102* -x p1_p2_rdb_pc -x p1010rdb -x corenet_ds -x b4860qds -x bsc91*"
+        t208xrdb_corenet_ds:
+          BUILDMAN: "t208xrdb corenet_ds"
+        fsl_ppc:
+          BUILDMAN: "t4qds b4860qds mpc83xx&freescale mpc86xx&freescale"
+        t102x:
+          BUILDMAN: "t102*"
+        p1_p2_rdb_pc:
+          BUILDMAN: "p1_p2_rdb_pc"
+        p1010rdb_bsc91:
+          BUILDMAN: "p1010rdb bsc91"
+        siemens:
+          BUILDMAN: "siemens"
+        tegra:
+          BUILDMAN: "tegra -x toradex"
+        am33xx_no_siemens:
+          BUILDMAN: "am33xx -x siemens"
+        omap:
+          BUILDMAN: "omap"
+        uniphier:
+          BUILDMAN: "uniphier"
+        aarch64_catch_all:
+          BUILDMAN: "aarch64 -x bcm,k3,tegra,ls1,ls2,lx216,mvebu,uniphier,sunxi,samsung,socfpga,rk,versal,zynq"
+        rockchip:
+          BUILDMAN: "rk"
+        sh:
+          BUILDMAN: "sh -x arm"
+        zynq:
+          BUILDMAN: "zynq&armv7"
+        zynqmp_versal:
+          BUILDMAN: "versal|zynqmp&aarch64"
+        riscv:
+          BUILDMAN: "riscv"
+    steps:
+      - script: |
+          cat << EOF > build.sh
+          set -ex
+          cd ${WORK_DIR}
+          # make environment variables available as tests are running inside a container
+          export BUILDMAN="${BUILDMAN}"
+          EOF
+          cat << "EOF" >> build.sh
+          if [[ "${BUILDMAN}" != "" ]]; then
+              ret=0;
+              tools/buildman/buildman -o /tmp -P -W ${BUILDMAN} ${OVERRIDE} || ret=$?;
+              if [[ $ret -ne 0 ]]; then
+                  tools/buildman/buildman -o /tmp -seP ${BUILDMAN};
+                  exit $ret;
+              fi;
+          fi
+          EOF
+          cat build.sh
+          docker run -v $PWD:$(work_dir) $(ci_runner_image) /bin/bash $(work_dir)/build.sh