diff --git a/.buildkite/hooks/post-command b/.buildkite/hooks/post-command new file mode 100755 index 0000000000000..839d86ed2a5b7 --- /dev/null +++ b/.buildkite/hooks/post-command @@ -0,0 +1,8 @@ +#!/bin/bash +# Copyright (c) 2020 Linaro Limited +# +# SPDX-License-Identifier: Apache-2.0 + +# report disk usage: +echo "--- $0 disk usage" +df -h diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command new file mode 100755 index 0000000000000..04ea9a06b01dc --- /dev/null +++ b/.buildkite/hooks/pre-command @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright (c) 2020 Linaro Limited +# +# SPDX-License-Identifier: Apache-2.0 + +# Save off where we started so we can go back there +WORKDIR=${PWD} + +if [ -n "${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" ]; then + git fetch -v origin ${BUILDKITE_PULL_REQUEST_BASE_BRANCH} + git checkout FETCH_HEAD + git config --local user.email "builds@zephyrproject.org" + git config --local user.name "Zephyr CI" + git merge --no-edit "${BUILDKITE_COMMIT}" || { + local merge_result=$? + echo "Merge failed: ${merge_result}" + git merge --abort + exit $merge_result + } +fi + +mkdir -p /var/lib/buildkite-agent/zephyr-ccache/ + +# create cache dirs, no-op if they already exist +mkdir -p /var/lib/buildkite-agent/zephyr-module-cache/modules +mkdir -p /var/lib/buildkite-agent/zephyr-module-cache/tools +mkdir -p /var/lib/buildkite-agent/zephyr-module-cache/bootloader + +# Clean cache - if it already exists +cd /var/lib/buildkite-agent/zephyr-module-cache +find -type f -not -path "*/.git/*" -not -name ".git" -delete + +# Remove any stale locks +find -name index.lock -delete + +# return from where we started so we can find pipeline files from +# git repo +cd ${WORKDIR} diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml new file mode 100644 index 0000000000000..ffe699e514a90 --- /dev/null +++ b/.buildkite/pipeline.yml @@ -0,0 +1,28 @@ +steps: + - command: + - .buildkite/run.sh + env: + ZEPHYR_TOOLCHAIN_VARIANT: "zephyr" + ZEPHYR_SDK_INSTALL_DIR: "/opt/sdk/zephyr-sdk-0.11.3" + parallelism: 20 + timeout_in_minutes: 120 + retry: + manual: true + plugins: + - docker#v3.5.0: + image: "zephyrprojectrtos/ci:v0.11.8" + propagate-environment: true + volumes: + - "/var/lib/buildkite-agent/git-mirrors:/var/lib/buildkite-agent/git-mirrors" + - "/var/lib/buildkite-agent/zephyr-module-cache:/var/lib/buildkite-agent/zephyr-module-cache" + - "/var/lib/buildkite-agent/zephyr-ccache:/root/.ccache" + workdir: "/workdir/zephyr" + agents: + - "queue=default" + + - wait: ~ + continue_on_failure: true + + - plugins: + - junit-annotate#v1.7.0: + artifacts: sanitycheck-*.xml diff --git a/.buildkite/run.sh b/.buildkite/run.sh new file mode 100755 index 0000000000000..0603771ca3604 --- /dev/null +++ b/.buildkite/run.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Copyright (c) 2020 Linaro Limited +# +# SPDX-License-Identifier: Apache-2.0 + +echo "--- run $0" + +git log -n 5 --oneline --decorate --abbrev=12 + +# Setup module cache +cd /workdir +ln -s /var/lib/buildkite-agent/zephyr-module-cache/modules +ln -s /var/lib/buildkite-agent/zephyr-module-cache/tools +ln -s /var/lib/buildkite-agent/zephyr-module-cache/bootloader +cd /workdir/zephyr + +export JOB_NUM=$((${BUILDKITE_PARALLEL_JOB}+1)) + +# ccache stats +echo "" +echo "--- ccache stats at start" +ccache -s + +if [ -n "${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" ]; then + ./scripts/ci/run_ci.sh -c -b ${BUILDKITE_PULL_REQUEST_BASE_BRANCH} -r origin \ + -m ${JOB_NUM} -M ${BUILDKITE_PARALLEL_JOB_COUNT} -p ${BUILDKITE_PULL_REQUEST} +else + ./scripts/ci/run_ci.sh -c -b ${BUILDKITE_BRANCH} -r origin \ + -m ${JOB_NUM} -M ${BUILDKITE_PARALLEL_JOB_COUNT}; +fi; + +SANITY_EXIT_STATUS=$? + +# Rename sanitycheck junit xml for use with junit-annotate-buildkite-plugin +# create dummy file if sanitycheck did nothing +if [ ! -f sanity-out/sanitycheck.xml ]; then + touch sanity-out/sanitycheck.xml +fi +mv sanity-out/sanitycheck.xml sanitycheck-${BUILDKITE_JOB_ID}.xml +buildkite-agent artifact upload sanitycheck-${BUILDKITE_JOB_ID}.xml + +# ccache stats +echo "--- ccache stats at finish" +ccache -s + +# disk usage +echo "--- disk usage at finish" +df -h + +exit ${SANITY_EXIT_STATUS} diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml new file mode 100644 index 0000000000000..b1edfa993fcdc --- /dev/null +++ b/.github/workflows/compliance.yml @@ -0,0 +1,114 @@ +name: Compliance + +on: pull_request + +jobs: + compliance_job: + runs-on: ubuntu-latest + name: Run compliance checks on patch series (PR) + steps: + - name: Checkout the code + uses: actions/checkout@v1 + + - name: cache-pip + uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-doc-pip + + - name: Install python dependencies + run: | + pip3 install setuptools + pip3 install wheel + pip3 install python-magic junitparser gitlint pylint pykwalify + pip3 install west + + - name: Run Compliance Tests + id: compliance + env: + BASE_REF: ${{ github.base_ref }} + run: | + export PATH=$PATH:~/.local/bin + export ZEPHYR_BASE=$PWD + git config --global user.email "you@example.com" + git config --global user.name "Your Name" + git rebase origin/${BASE_REF} + ./scripts/ci/check_compliance.py -m Codeowners -m Devicetree -m Gitlint -m Identity -m Nits -m pylint -m checkpatch -m Kconfig -c origin/${BASE_REF}.. || true + + - name: upload-results + uses: actions/upload-artifact@master + continue-on-error: True + with: + name: compliance.xml + path: compliance.xml + + - name: check-warns + run: | + if [ -s Nits.txt ]; then + errors=$(cat Nits.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=Nits.txt::$errors" + exit=1 + fi + if [ -s checkpatch.txt ]; then + errors=$(cat checkpatch.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=Checkpatch.txt::$errors" + exit=1 + fi + if [ -s Identity.txt ]; then + errors=$(cat Identity.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=Identity.txt::$errors" + exit=1 + fi + if [ -s Gitlint.txt ]; then + errors=$(cat Gitlint.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=Gitlint.txt::$errors" + exit=1 + fi + if [ -s pylint.txt ]; then + errors=$(cat pylint.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=pylint.txt::$errors" + exit=1 + fi + if [ -s Devicetree.txt ]; then + errors=$(cat Devicetree.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=Devicetree.txt::$errors" + exit=1 + fi + if [ -s Kconfig.txt ]; then + errors=$(cat Kconfig.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=Kconfig.txt::$errors" + exit=1 + fi + if [ -s Codeowners.txt ]; then + errors=$(cat Codeowners.txt) + errors="${errors//'%'/'%25'}" + errors="${errors//$'\n'/'%0A'}" + errors="${errors//$'\r'/'%0D'}" + echo "::error file=Codeowners.txt::$errors" + exit=1 + fi + + if [ ${exit} == 1 ]; then + exit 1; + fi diff --git a/CODEOWNERS b/CODEOWNERS index 499e7f2360194..76bbf80bdfaac 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -16,6 +16,7 @@ /.known-issues/ @nashif /.github/ @nashif /.github/workflows/ @galak @nashif +/.buildkite/ @galak /arch/arc/ @vonhust @ruuddw /arch/arm/ @MaureenHelm @galak @ioannisg /arch/arm/core/aarch32/cortex_m/cmse/ @ioannisg diff --git a/scripts/ci/check_compliance.py b/scripts/ci/check_compliance.py new file mode 100755 index 0000000000000..ed19c9777b906 --- /dev/null +++ b/scripts/ci/check_compliance.py @@ -0,0 +1,1123 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2018,2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import collections +import sys +import subprocess +import re +import os +from email.utils import parseaddr +import logging +import argparse +from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error, Failure, Attr +import tempfile +import traceback +import magic +import shlex +from pathlib import Path + +# '*' makes it italic +EDIT_TIP = "\n\n*Tip: The bot edits this comment instead of posting a new " \ + "one, so you can check the comment's history to see earlier " \ + "messages.*" + +logger = None + +# This ends up as None when we're not running in a Zephyr tree +ZEPHYR_BASE = os.environ.get('ZEPHYR_BASE') + + +def git(*args, cwd=None): + # Helper for running a Git command. Returns the rstrip()ed stdout output. + # Called like git("diff"). Exits with SystemError (raised by sys.exit()) on + # errors. 'cwd' is the working directory to use (default: current + # directory). + + git_cmd = ("git",) + args + try: + git_process = subprocess.Popen( + git_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) + except OSError as e: + err(f"failed to run '{cmd2str(git_cmd)}': {e}") + + stdout, stderr = git_process.communicate() + stdout = stdout.decode("utf-8") + stderr = stderr.decode("utf-8") + if git_process.returncode or stderr: + err(f"""\ +'{cmd2str(git_cmd)}' exited with status {git_process.returncode} and/or wrote +to stderr. + +==stdout== +{stdout} +==stderr== +{stderr}""") + + return stdout.rstrip() + + +def get_shas(refspec): + """ + Returns the list of Git SHAs for 'refspec'. + + :param refspec: + :return: + """ + return git('rev-list', + '--max-count={}'.format(-1 if "." in refspec else 1), + refspec).split() + + +class MyCase(TestCase): + """ + Custom junitparser.TestCase for our tests that adds some extra + XML attributes. These will be preserved when tests are saved and loaded. + """ + classname = Attr() + # Remembers informational messages. These can appear on successful tests + # too, where TestCase.result isn't set. + info_msg = Attr() + + +class ComplianceTest: + """ + Base class for tests. Inheriting classes should have a run() method and set + these class variables: + + name: + Test name + + doc: + Link to documentation related to what's being tested + + path_hint: + The path the test runs itself in. This is just informative and used in + the message that gets printed when running the test. + + The magic string "" refers to the top-level repository + directory. This avoids running 'git' to find the top-level directory + before main() runs (class variable assignments run when the 'class ...' + statement runs). That avoids swallowing errors, because main() reports + them to GitHub. + """ + def __init__(self): + self.case = MyCase(self.name) + self.case.classname = "Guidelines" + + def error(self, msg): + """ + Signals a problem with running the test, with message 'msg'. + + Raises an exception internally, so you do not need to put a 'return' + after error(). + + Any failures generated prior to the error() are included automatically + in the message. Usually, any failures would indicate problems with the + test code. + """ + if self.case.result: + msg += "\n\nFailures before error: " + self.case.result._elem.text + + self.case.result = Error(msg, "error") + + raise EndTest + + def skip(self, msg): + """ + Signals that the test should be skipped, with message 'msg'. + + Raises an exception internally, so you do not need to put a 'return' + after skip(). + + Any failures generated prior to the skip() are included automatically + in the message. Usually, any failures would indicate problems with the + test code. + """ + if self.case.result: + msg += "\n\nFailures before skip: " + self.case.result._elem.text + + self.case.result = Skipped(msg, "skipped") + + raise EndTest + + def add_failure(self, msg): + """ + Signals that the test failed, with message 'msg'. Can be called many + times within the same test to report multiple failures. + """ + if not self.case.result: + # First reported failure + self.case.result = Failure(self.name + " issues", "failure") + self.case.result._elem.text = msg.rstrip() + else: + # If there are multiple Failures, concatenate their messages + self.case.result._elem.text += "\n\n" + msg.rstrip() + + def add_info(self, msg): + """ + Adds an informational message without failing the test. The message is + shown on GitHub, and is shown regardless of whether the test passes or + fails. If the test fails, then both the informational message and the + failure message are shown. + + Can be called many times within the same test to add multiple messages. + """ + def escape(s): + # Hack to preserve e.g. newlines and tabs in the attribute when + # tests are saved to .xml and reloaded. junitparser doesn't seem to + # handle it correctly, though it does escape stuff like quotes. + # unicode-escape replaces newlines with \n (two characters), etc. + return s.encode("unicode-escape").decode("utf-8") + + if not self.case.info_msg: + self.case.info_msg = escape(msg) + else: + self.case.info_msg += r"\n\n" + escape(msg) + + +class EndTest(Exception): + """ + Raised by ComplianceTest.error()/skip() to end the test. + + Tests can raise EndTest themselves to immediately end the test, e.g. from + within a nested function call. + """ + + +class CheckPatch(ComplianceTest): + """ + Runs checkpatch and reports found issues + + """ + name = "checkpatch" + doc = "See https://docs.zephyrproject.org/latest/contribute/#coding-style for more details." + path_hint = "" + + def run(self): + # Default to Zephyr's checkpatch if ZEPHYR_BASE is set + checkpatch = os.path.join(ZEPHYR_BASE or GIT_TOP, 'scripts', + 'checkpatch.pl') + if not os.path.exists(checkpatch): + self.skip(checkpatch + " not found") + + # git diff's output doesn't depend on the current (sub)directory + diff = subprocess.Popen(('git', 'diff', COMMIT_RANGE), + stdout=subprocess.PIPE) + try: + subprocess.check_output((checkpatch, '--mailback', '--no-tree', '-'), + stdin=diff.stdout, + stderr=subprocess.STDOUT, + shell=True, cwd=GIT_TOP) + + except subprocess.CalledProcessError as ex: + output = ex.output.decode("utf-8") + if re.search("[1-9][0-9]* errors,", output): + self.add_failure(output) + else: + # No errors found, but warnings. Show them. + self.add_info(output) + + +class KconfigCheck(ComplianceTest): + """ + Checks is we are introducing any new warnings/errors with Kconfig, + for example using undefiend Kconfig variables. + """ + name = "Kconfig" + doc = "See https://docs.zephyrproject.org/latest/guides/kconfig/index.html for more details." + path_hint = ZEPHYR_BASE + + def run(self): + kconf = self.parse_kconfig() + + self.check_top_menu_not_too_long(kconf) + self.check_no_pointless_menuconfigs(kconf) + self.check_no_undef_within_kconfig(kconf) + self.check_no_undef_outside_kconfig(kconf) + + def get_modules(self, modules_file): + """ + Get a list of modules and put them in a file that is parsed by + Kconfig + + This is needed to complete Kconfig sanity tests. + + """ + # Invoke the script directly using the Python executable since this is + # not a module nor a pip-installed Python utility + zephyr_module_path = os.path.join(ZEPHYR_BASE, "scripts", + "zephyr_module.py") + cmd = [sys.executable, zephyr_module_path, + '--kconfig-out', modules_file] + try: + _ = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as ex: + self.error(ex.output) + + def parse_kconfig(self): + """ + Returns a kconfiglib.Kconfig object for the Kconfig files. We reuse + this object for all tests to avoid having to reparse for each test. + """ + if not ZEPHYR_BASE: + self.skip("Not a Zephyr tree (ZEPHYR_BASE unset)") + + # Put the Kconfiglib path first to make sure no local Kconfiglib version is + # used + kconfig_path = os.path.join(ZEPHYR_BASE, "scripts", "kconfig") + if not os.path.exists(kconfig_path): + self.error(kconfig_path + " not found") + + sys.path.insert(0, kconfig_path) + # Import globally so that e.g. kconfiglib.Symbol can be referenced in + # tests + global kconfiglib + import kconfiglib + + # Look up Kconfig files relative to ZEPHYR_BASE + os.environ["srctree"] = ZEPHYR_BASE + + # Parse the entire Kconfig tree, to make sure we see all symbols + os.environ["SOC_DIR"] = "soc/" + os.environ["ARCH_DIR"] = "arch/" + os.environ["BOARD_DIR"] = "boards/*/*" + os.environ["ARCH"] = "*" + os.environ["CMAKE_BINARY_DIR"] = tempfile.gettempdir() + os.environ['DEVICETREE_CONF'] = "dummy" + os.environ['DTS_POST_CPP'] = 'dummy' + + # Older name for DEVICETREE_CONF, for compatibility with older Zephyr + # versions that don't have the renaming + os.environ["GENERATED_DTS_BOARD_CONF"] = "dummy" + + # For multi repo support + self.get_modules(os.path.join(tempfile.gettempdir(), "Kconfig.modules")) + + # Tells Kconfiglib to generate warnings for all references to undefined + # symbols within Kconfig files + os.environ["KCONFIG_WARN_UNDEF"] = "y" + + try: + # Note this will both print warnings to stderr _and_ return + # them: so some warnings might get printed + # twice. "warn_to_stderr=False" could unfortunately cause + # some (other) warnings to never be printed. + return kconfiglib.Kconfig() + except kconfiglib.KconfigError as e: + self.add_failure(str(e)) + raise EndTest + + def check_top_menu_not_too_long(self, kconf): + """ + Checks that there aren't too many items in the top-level menu (which + might be a sign that stuff accidentally got added there) + """ + max_top_items = 50 + + n_top_items = 0 + node = kconf.top_node.list + while node: + # Only count items with prompts. Other items will never be + # shown in the menuconfig (outside show-all mode). + if node.prompt: + n_top_items += 1 + node = node.next + + if n_top_items > max_top_items: + self.add_failure(""" +Expected no more than {} potentially visible items (items with prompts) in the +top-level Kconfig menu, found {} items. If you're deliberately adding new +entries, then bump the 'max_top_items' variable in {}. +""".format(max_top_items, n_top_items, __file__)) + + def check_no_pointless_menuconfigs(self, kconf): + # Checks that there are no pointless 'menuconfig' symbols without + # children in the Kconfig files + + bad_mconfs = [] + for node in kconf.node_iter(): + # 'kconfiglib' is global + # pylint: disable=undefined-variable + + # Avoid flagging empty regular menus and choices, in case people do + # something with 'osource' (could happen for 'menuconfig' symbols + # too, though it's less likely) + if node.is_menuconfig and not node.list and \ + isinstance(node.item, kconfiglib.Symbol): + + bad_mconfs.append(node) + + if bad_mconfs: + self.add_failure("""\ +Found pointless 'menuconfig' symbols without children. Use regular 'config' +symbols instead. See +https://docs.zephyrproject.org/latest/guides/kconfig/tips.html#menuconfig-symbols. + +""" + "\n".join(f"{node.item.name:35} {node.filename}:{node.linenr}" + for node in bad_mconfs)) + + def check_no_undef_within_kconfig(self, kconf): + """ + Checks that there are no references to undefined Kconfig symbols within + the Kconfig files + """ + undef_ref_warnings = "\n\n\n".join(warning for warning in kconf.warnings + if "undefined symbol" in warning) + + if undef_ref_warnings: + self.add_failure("Undefined Kconfig symbols:\n\n" + + undef_ref_warnings) + + def check_no_undef_outside_kconfig(self, kconf): + """ + Checks that there are no references to undefined Kconfig symbols + outside Kconfig files (any CONFIG_FOO where no FOO symbol exists) + """ + # Grep for symbol references. + # + # Example output line for a reference to CONFIG_FOO at line 17 of + # foo/bar.c: + # + # foo/bar.c17#ifdef CONFIG_FOO + # + # 'git grep --only-matching' would get rid of the surrounding context + # ('#ifdef '), but it was added fairly recently (second half of 2018), + # so we extract the references from each line ourselves instead. + # + # The regex uses word boundaries (\b) to isolate the reference, and + # negative lookahead to automatically whitelist the following: + # + # - ##, for token pasting (CONFIG_FOO_##X) + # + # - $, e.g. for CMake variable expansion (CONFIG_FOO_${VAR}) + # + # - @, e.g. for CMakes's configure_file() (CONFIG_FOO_@VAR@) + # + # - {, e.g. for Python scripts ("CONFIG_FOO_{}_BAR".format(...)") + # + # - *, meant for comments like '#endif /* CONFIG_FOO_* */ + + defined_syms = get_defined_syms(kconf) + + # Maps each undefined symbol to a list : strings + undef_to_locs = collections.defaultdict(list) + + # Warning: Needs to work with both --perl-regexp and the 're' module + regex = r"\bCONFIG_[A-Z0-9_]+\b(?!\s*##|[$@{*])" + + # Skip doc/releases, which often references removed symbols + grep_stdout = git("grep", "--line-number", "-I", "--null", + "--perl-regexp", regex, "--", ":!/doc/releases", + cwd=ZEPHYR_BASE) + + # splitlines() supports various line terminators + for grep_line in grep_stdout.splitlines(): + path, lineno, line = grep_line.split("\0") + + # Extract symbol references (might be more than one) within the + # line + for sym_name in re.findall(regex, line): + sym_name = sym_name[7:] # Strip CONFIG_ + if sym_name not in defined_syms and \ + sym_name not in UNDEF_KCONFIG_WHITELIST: + + undef_to_locs[sym_name].append("{}:{}".format(path, lineno)) + + if not undef_to_locs: + return + + # String that describes all referenced but undefined Kconfig symbols, + # in alphabetical order, along with the locations where they're + # referenced. Example: + # + # CONFIG_ALSO_MISSING arch/xtensa/core/fatal.c:273 + # CONFIG_MISSING arch/xtensa/core/fatal.c:264, subsys/fb/cfb.c:20 + undef_desc = "\n".join( + "CONFIG_{:35} {}".format(sym_name, ", ".join(locs)) + for sym_name, locs in sorted(undef_to_locs.items())) + + self.add_failure(""" +Found references to undefined Kconfig symbols. If any of these are false +positives, then add them to UNDEF_KCONFIG_WHITELIST in {} in the ci-tools repo. + +If the reference is for a comment like /* CONFIG_FOO_* */ (or +/* CONFIG_FOO_*_... */), then please use exactly that form (with the '*'). The +CI check knows not to flag it. + +More generally, a reference followed by $, @, {{, *, or ## will never be +flagged. + +{}""".format(os.path.basename(__file__), undef_desc)) + + +def get_defined_syms(kconf): + # Returns a set() with the names of all defined Kconfig symbols (with no + # 'CONFIG_' prefix). This is complicated by samples and tests defining + # their own Kconfig trees. For those, just grep for 'config FOO' to find + # definitions. Doing it "properly" with Kconfiglib is still useful for the + # main tree, because some symbols are defined using preprocessor macros. + + # Warning: Needs to work with both --perl-regexp and the 're' module. + # (?:...) is a non-capturing group. + regex = r"^\s*(?:menu)?config\s*([A-Z0-9_]+)\s*(?:#|$)" + + # Grep samples/ and tests/ for symbol definitions + grep_stdout = git("grep", "-I", "-h", "--perl-regexp", regex, "--", + ":samples", ":tests", cwd=ZEPHYR_BASE) + + # Symbols from the main Kconfig tree + grepped definitions from samples and + # tests + return set([sym.name for sym in kconf.unique_defined_syms] + + re.findall(regex, grep_stdout, re.MULTILINE)) + + +# Many of these are symbols used as examples. Note that the list is sorted +# alphabetically, and skips the CONFIG_ prefix. +UNDEF_KCONFIG_WHITELIST = { + "ALSO_MISSING", + "APP_LINK_WITH_", + "CDC_ACM_PORT_NAME_", + "CLOCK_STM32_SYSCLK_SRC_", + "CMU", + "BT_6LOWPAN", # Defined in Linux, mentioned in docs + "COUNTER_RTC_STM32_CLOCK_SRC", + "CRC", # Used in TI CC13x2 / CC26x2 SDK comment + "DEEP_SLEEP", # #defined by RV32M1 in ext/ + "DESCRIPTION", + "ERR", + "ESP_DIF_LIBRARY", # Referenced in CMake comment + "EXPERIMENTAL", + "FFT", # Used as an example in cmake/extensions.cmake + "FLAG", # Used as an example + "FOO", + "FOO_LOG_LEVEL", + "FOO_SETTING_1", + "FOO_SETTING_2", + "LIS2DW12_INT_PIN", + "LSM6DSO_INT_PIN", + "MISSING", + "MODULES", + "MYFEATURE", + "MY_DRIVER_0", + "NORMAL_SLEEP", # #defined by RV32M1 in ext/ + "OPT", + "OPT_0", + "PEDO_THS_MIN", + "REG1", + "REG2", + "SAMPLE_MODULE_LOG_LEVEL", # Used as an example in samples/subsys/logging + "SEL", + "SHIFT", + "SOC_WATCH", # Issue 13749 + "SOME_BOOL", + "SOME_INT", + "SOME_OTHER_BOOL", + "SOME_STRING", + "SRAM2", # Referenced in a comment in samples/application_development + "STACK_SIZE", # Used as an example in the Kconfig docs + "STD_CPP", # Referenced in CMake comment + "TEST1", + "TYPE_BOOLEAN", + "USB_CONSOLE", + "USE_STDC_", + "WHATEVER", +} + + +class DeviceTreeCheck(ComplianceTest): + """ + Runs the dtlib and edtlib test suites in scripts/dts/. + """ + name = "Devicetree" + doc = "See https://docs.zephyrproject.org/latest/guides/dts/index.html for more details" + path_hint = ZEPHYR_BASE + + def run(self): + if not ZEPHYR_BASE: + self.skip("Not a Zephyr tree (ZEPHYR_BASE unset)") + + scripts_path = os.path.join(ZEPHYR_BASE, "scripts", "dts") + + sys.path.insert(0, scripts_path) + import testdtlib + import testedtlib + + # Hack: The test suites expect to be run from the scripts/dts + # directory, because they compare repr() output that contains relative + # paths against an expected string. Temporarily change the working + # directory to scripts/dts/. + # + # Warning: This is not thread-safe, though the test suites run in a + # fraction of a second. + old_dir = os.getcwd() + os.chdir(scripts_path) + try: + logger.info("cd %s && ./testdtlib.py", scripts_path) + testdtlib.run() + logger.info("cd %s && ./testedtlib.py", scripts_path) + testedtlib.run() + except SystemExit as e: + # The dtlib and edtlib test suites call sys.exit() on failure, + # which raises SystemExit. Let any errors in the test scripts + # themselves trickle through and turn into an internal CI error. + self.add_failure(str(e)) + except Exception as e: + # Report other exceptions as an internal test failure + self.error(str(e)) + finally: + # Restore working directory + os.chdir(old_dir) + + +class Codeowners(ComplianceTest): + """ + Check if added files have an owner. + """ + name = "Codeowners" + doc = "See https://help.github.com/articles/about-code-owners/ for more details." + path_hint = "" + + def ls_owned_files(self, codeowners): + """Returns an OrderedDict mapping git patterns from the CODEOWNERS file + to the corresponding list of files found on the filesystem. It + unfortunately does not seem possible to invoke git and re-use + how 'git ignore' and/or 'git attributes' already implement this, + we must re-invent it. + """ + + # TODO: filter out files not in "git ls-files" (e.g., + # sanity-out) _if_ the overhead isn't too high for a clean tree. + # + # pathlib.match() doesn't support **, so it looks like we can't + # recursively glob the output of ls-files directly, only real + # files :-( + + pattern2files = collections.OrderedDict() + top_path = Path(GIT_TOP) + + with open(codeowners, "r") as codeo: + for lineno, line in enumerate(codeo, start=1): + + if line.startswith("#") or not line.strip(): + continue + + match = re.match(r"^([^\s,]+)\s+[^\s]+", line) + if not match: + self.add_failure( + "Invalid CODEOWNERS line %d\n\t%s" % + (lineno, line)) + continue + + git_patrn = match.group(1) + glob = self.git_pattern_to_glob(git_patrn) + files = [] + for abs_path in top_path.glob(glob): + # comparing strings is much faster later + files.append(str(abs_path.relative_to(top_path))) + + if not files: + self.add_failure("Path '{}' not found in the tree but is listed in " + "CODEOWNERS".format(git_patrn)) + + pattern2files[git_patrn] = files + + return pattern2files + + def git_pattern_to_glob(self, git_pattern): + """Appends and prepends '**[/*]' when needed. Result has neither a + leading nor a trailing slash. + """ + + if git_pattern.startswith("/"): + ret = git_pattern[1:] + else: + ret = "**/" + git_pattern + + if git_pattern.endswith("/"): + ret = ret + "**/*" + elif os.path.isdir(os.path.join(GIT_TOP, ret)): + self.add_failure("Expected '/' after directory '{}' " + "in CODEOWNERS".format(ret)) + + return ret + + def run(self): + # TODO: testing an old self.commit range that doesn't end + # with HEAD is most likely a mistake. Should warn, see + # https://github.com/zephyrproject-rtos/ci-tools/pull/24 + codeowners = os.path.join(GIT_TOP, "CODEOWNERS") + if not os.path.exists(codeowners): + self.skip("CODEOWNERS not available in this repo") + + name_changes = git("diff", "--name-only", "--diff-filter=ARCD", + COMMIT_RANGE) + + owners_changes = git("diff", "--name-only", COMMIT_RANGE, + "--", codeowners) + + if not name_changes and not owners_changes: + # TODO: 1. decouple basic and cheap CODEOWNERS syntax + # validation from the expensive ls_owned_files() scanning of + # the entire tree. 2. run the former always. + return + + logging.info("If this takes too long then cleanup and try again") + patrn2files = self.ls_owned_files(codeowners) + + # The way git finds Renames and Copies is not "exact science", + # however if one is missed then it will always be reported as an + # Addition instead. + new_files = git("diff", "--name-only", "--diff-filter=ARC", + COMMIT_RANGE).splitlines() + logging.debug("New files %s", new_files) + + # Convert to pathlib.Path string representation (e.g., + # backslashes 'dir1\dir2\' on Windows) to be consistent + # with self.ls_owned_files() + new_files = [str(Path(f)) for f in new_files] + + new_not_owned = [] + for newf in new_files: + f_is_owned = False + + for git_pat, owned in patrn2files.items(): + logging.debug("Scanning %s for %s", git_pat, newf) + + if newf in owned: + logging.info("%s matches new file %s", git_pat, newf) + f_is_owned = True + # Unlike github, we don't care about finding any + # more specific owner. + break + + if not f_is_owned: + new_not_owned.append(newf) + + if new_not_owned: + self.add_failure("New files added that are not covered in " + "CODEOWNERS:\n\n" + "\n".join(new_not_owned) + + "\n\nPlease add one or more entries in the " + "CODEOWNERS file to cover those files") + + +class Nits(ComplianceTest): + """ + Checks various nits in added/modified files. Doesn't check stuff that's + already covered by e.g. checkpatch.pl and pylint. + """ + name = "Nits" + doc = "See https://docs.zephyrproject.org/latest/contribute/#coding-style for more details." + path_hint = "" + + def run(self): + # Loop through added/modified files + for fname in git("diff", "--name-only", "--diff-filter=d", + COMMIT_RANGE).splitlines(): + if "Kconfig" in fname: + self.check_kconfig_header(fname) + self.check_redundant_zephyr_source(fname) + + if fname.startswith("dts/bindings/"): + self.check_redundant_document_separator(fname) + + if fname.endswith((".c", ".conf", ".cpp", ".dts", ".overlay", + ".h", ".ld", ".py", ".rst", ".txt", ".yaml", + ".yml")) or \ + "Kconfig" in fname or \ + "defconfig" in fname or \ + fname == "README": + + self.check_source_file(fname) + + def check_kconfig_header(self, fname): + # Checks for a spammy copy-pasted header format + + with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f: + contents = f.read() + + # 'Kconfig - yada yada' has a copy-pasted redundant filename at the + # top. This probably means all of the header was copy-pasted. + if re.match(r"\s*#\s*(K|k)config[\w.-]*\s*-", contents): + self.add_failure(""" +Please use this format for the header in '{}' (see +https://docs.zephyrproject.org/latest/guides/kconfig/index.html#header-comments-and-other-nits): + + # + (Blank line) + # Copyright (c) 2019 ... + # SPDX-License-Identifier: + (Blank line) + (Kconfig definitions) + +Skip the "Kconfig - " part of the first line, since it's clear that the comment +is about Kconfig from context. The "# Kconfig - " is what triggers this +failure. +""".format(fname)) + + def check_redundant_zephyr_source(self, fname): + # Checks for 'source "$(ZEPHYR_BASE)/Kconfig[.zephyr]"', which can be + # be simplified to 'source "Kconfig[.zephyr]"' + + with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f: + # Look for e.g. rsource as well, for completeness + match = re.search( + r'^\s*(?:o|r|or)?source\s*"\$\(?ZEPHYR_BASE\)?/(Kconfig(?:\.zephyr)?)"', + f.read(), re.MULTILINE) + + if match: + self.add_failure(""" +Redundant 'source "$(ZEPHYR_BASE)/{0}" in '{1}'. Just do 'source "{0}"' +instead. The $srctree environment variable already points to the Zephyr root, +and all 'source's are relative to it.""".format(match.group(1), fname)) + + def check_redundant_document_separator(self, fname): + # Looks for redundant '...' document separators in bindings + + with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f: + if re.search(r"^\.\.\.", f.read(), re.MULTILINE): + self.add_failure(f"""\ +Redundant '...' document separator in {fname}. Binding YAML files are never +concatenated together, so no document separators are needed.""") + + def check_source_file(self, fname): + # Generic nits related to various source files + + with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f: + contents = f.read() + + if not contents.endswith("\n"): + self.add_failure("Missing newline at end of '{}'. Check your text " + "editor settings.".format(fname)) + + if contents.startswith("\n"): + self.add_failure("Please remove blank lines at start of '{}'" + .format(fname)) + + if contents.endswith("\n\n"): + self.add_failure("Please remove blank lines at end of '{}'" + .format(fname)) + + +class GitLint(ComplianceTest): + """ + Runs gitlint on the commits and finds issues with style and syntax + + """ + name = "Gitlint" + doc = "See https://docs.zephyrproject.org/latest/contribute/#commit-guidelines for more details" + path_hint = "" + + def run(self): + # By default gitlint looks for .gitlint configuration only in + # the current directory + proc = subprocess.Popen('gitlint --commits ' + COMMIT_RANGE, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + shell=True, cwd=GIT_TOP) + + msg = "" + if proc.wait() != 0: + msg = proc.stdout.read() + + if msg != "": + self.add_failure(msg.decode("utf-8")) + + +class PyLint(ComplianceTest): + """ + Runs pylint on all .py files, with a limited set of checks enabled. The + configuration is in the pylintrc file. + """ + name = "pylint" + doc = "See https://www.pylint.org/ for more details" + path_hint = "" + + def run(self): + # Path to pylint configuration file + pylintrc = os.path.abspath(os.path.join(os.path.dirname(__file__), + "pylintrc")) + + # List of files added/modified by the commit(s). + files = git( + "diff", "--name-only", "--diff-filter=d", COMMIT_RANGE, "--", + # Skip to work around crash in pylint 2.2.2: + # https://github.com/PyCQA/pylint/issues/2906 + ":!boards/xtensa/intel_s1000_crb/support/create_board_img.py") \ + .splitlines() + + # Filter out everything but Python files. Keep filenames + # relative (to GIT_TOP) to stay farther from any command line + # limit. + py_files = filter_py(GIT_TOP, files) + if not py_files: + return + + pylintcmd = ["pylint", "--rcfile=" + pylintrc] + py_files + logger.info(cmd2str(pylintcmd)) + try: + # Run pylint on added/modified Python files + process = subprocess.Popen( + pylintcmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=GIT_TOP) + except OSError as e: + self.error(f"Failed to run {cmd2str(pylintcmd)}: {e}") + + stdout, stderr = process.communicate() + if process.returncode or stderr: + # Issues found, or a problem with pylint itself + self.add_failure(stdout.decode("utf-8") + stderr.decode("utf-8")) + + +def filter_py(root, fnames): + # PyLint check helper. Returns all Python script filenames among the + # filenames in 'fnames', relative to directory 'root'. Uses the + # python-magic library, so that we can detect Python files that + # don't end in .py as well. python-magic is a frontend to libmagic, + # which is also used by 'file'. + + return [fname for fname in fnames + if fname.endswith(".py") or + magic.from_file(os.path.join(root, fname), + mime=True) == "text/x-python"] + + +class Identity(ComplianceTest): + """ + Checks if Emails of author and signed-off messages are consistent. + """ + name = "Identity" + doc = "See https://docs.zephyrproject.org/latest/contribute/#commit-guidelines for more details" + # git rev-list and git log don't depend on the current (sub)directory + # unless explicited + path_hint = "" + + def run(self): + for shaidx in get_shas(COMMIT_RANGE): + commit = git("log", "--decorate=short", "-n 1", shaidx) + signed = [] + author = "" + sha = "" + parsed_addr = None + for line in commit.split("\n"): + match = re.search(r"^commit\s([^\s]*)", line) + if match: + sha = match.group(1) + match = re.search(r"^Author:\s(.*)", line) + if match: + author = match.group(1) + parsed_addr = parseaddr(author) + match = re.search(r"signed-off-by:\s(.*)", line, re.IGNORECASE) + if match: + signed.append(match.group(1)) + + error1 = "%s: author email (%s) needs to match one of the signed-off-by entries." % ( + sha, author) + error2 = "%s: author email (%s) does not follow the syntax: First Last ." % ( + sha, author) + error3 = "%s: author email (%s) must be a real email and cannot end in @users.noreply.github.com" % ( + sha, author) + failure = None + if author not in signed: + failure = error1 + + if not parsed_addr or len(parsed_addr[0].split(" ")) < 2: + if not failure: + + failure = error2 + else: + failure = failure + "\n" + error2 + elif parsed_addr[1].endswith("@users.noreply.github.com"): + failure = error3 + + if failure: + self.add_failure(failure) + + +def init_logs(cli_arg): + # Initializes logging + + # TODO: there may be a shorter version thanks to: + # logging.basicConfig(...) + + global logger + + level = os.environ.get('LOG_LEVEL', "WARN") + + console = logging.StreamHandler() + console.setFormatter(logging.Formatter('%(levelname)-8s: %(message)s')) + + logger = logging.getLogger('') + logger.addHandler(console) + logger.setLevel(cli_arg if cli_arg else level) + + logging.info("Log init completed, level=%s", + logging.getLevelName(logger.getEffectiveLevel())) + + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Check for coding style and documentation warnings.") + parser.add_argument('-c', '--commits', default="HEAD~1..", + help='''Commit range in the form: a..[b], default is + HEAD~1..HEAD''') + parser.add_argument('-r', '--repo', default=None, + help="GitHub repository") + parser.add_argument('-p', '--pull-request', default=0, type=int, + help="Pull request number") + parser.add_argument('-S', '--sha', default=None, help="Commit SHA") + parser.add_argument('-o', '--output', default="compliance.xml", + help='''Name of outfile in JUnit format, + default is ./compliance.xml''') + + parser.add_argument('-l', '--list', action="store_true", + help="List all checks and exit") + + parser.add_argument("-v", "--loglevel", help="python logging level") + + parser.add_argument('-m', '--module', action="append", default=[], + help="Checks to run. All checks by default.") + + parser.add_argument('-e', '--exclude-module', action="append", default=[], + help="Do not run the specified checks") + + parser.add_argument('-j', '--previous-run', default=None, + help='''Pre-load JUnit results in XML format + from a previous run and combine with new results.''') + + return parser.parse_args() + +def _main(args): + # The "real" main(), which is wrapped to catch exceptions and report them + # to GitHub. Returns the number of test failures. + + # The absolute path of the top-level git directory. Initialize it here so + # that issues running Git can be reported to GitHub. + global GIT_TOP + GIT_TOP = git("rev-parse", "--show-toplevel") + + # The commit range passed in --commit, e.g. "HEAD~3" + global COMMIT_RANGE + COMMIT_RANGE = args.commits + + init_logs(args.loglevel) + + if args.list: + for testcase in ComplianceTest.__subclasses__(): + print(testcase.name) + return 0 + + # Load saved test results from an earlier run, if requested + if args.previous_run: + if not os.path.exists(args.previous_run): + # This probably means that an earlier pass had an internal error + # (the script is currently run multiple times by the ci-pipelines + # repo). Since that earlier pass might've posted an error to + # GitHub, avoid generating a GitHub comment here, by avoiding + # sys.exit() (which gets caught in main()). + print("error: '{}' not found".format(args.previous_run), + file=sys.stderr) + return 1 + + logging.info("Loading previous results from " + args.previous_run) + for loaded_suite in JUnitXml.fromfile(args.previous_run): + suite = loaded_suite + break + else: + suite = TestSuite("Compliance") + + for testcase in ComplianceTest.__subclasses__(): + # "Modules" and "testcases" are the same thing. Better flags would have + # been --tests and --exclude-tests or the like, but it's awkward to + # change now. + + if args.module and testcase.name not in args.module: + continue + + if testcase.name in args.exclude_module: + print("Skipping " + testcase.name) + continue + + test = testcase() + try: + print(f"Running {test.name:16} tests in " + f"{GIT_TOP if test.path_hint == '' else test.path_hint} ...") + test.run() + except EndTest: + pass + + suite.add_testcase(test.case) + + xml = JUnitXml() + xml.add_testsuite(suite) + xml.update_statistics() + xml.write(args.output, pretty=True) + + failed_cases = [] + name2doc = {testcase.name: testcase.doc + for testcase in ComplianceTest.__subclasses__()} + + for case in suite: + if case.result: + if case.result.type == 'skipped': + logging.warning("Skipped %s, %s", case.name, case.result.message) + else: + failed_cases.append(case) + else: + # Some checks like codeowners can produce no .result + logging.info("No JUnit result for %s", case.name) + + n_fails = len(failed_cases) + + if n_fails: + print("{} checks failed".format(n_fails)) + for case in failed_cases: + # not clear why junitxml doesn't clearly expose the most + # important part of its underlying etree.Element + errmsg = case.result._elem.text + logging.error("Test %s failed: %s", case.name, + errmsg.strip() if errmsg else case.result.message) + + with open(f"{case.name}.txt", "w") as f: + docs = name2doc.get(case.name) + f.write(f"{docs}\n\n") + f.write(errmsg.strip() if errmsg else case.result.message) + + print("\nComplete results in " + args.output) + return n_fails + + +def main(): + args = parse_args() + + try: + n_fails = _main(args) + except BaseException: + # Catch BaseException instead of Exception to include stuff like + # SystemExit (raised by sys.exit()) + print(format(__file__, traceback.format_exc())) + + raise + + sys.exit(n_fails) + + +def cmd2str(cmd): + # Formats the command-line arguments in the iterable 'cmd' into a string, + # for error messages and the like + + return " ".join(shlex.quote(word) for word in cmd) + + +def err(msg): + cmd = sys.argv[0] # Empty if missing + if cmd: + cmd += ": " + sys.exit(cmd + "error: " + msg) + + +if __name__ == "__main__": + main() diff --git a/scripts/ci/pylintrc b/scripts/ci/pylintrc new file mode 100644 index 0000000000000..010c8c99b2fff --- /dev/null +++ b/scripts/ci/pylintrc @@ -0,0 +1,249 @@ +# Copyright (c) 2019, Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +# pylint configuration for the PyLint check in check_compliance.py. +# +# To run pylint manually with this configuration from the Zephyr repo, do +# +# pylint3 --rcfile=ci-tools/scripts/pylintrc +# +# This command will check all scripts: +# +# pylint3 --rcfile=ci-tools/scripts/pylintrc $(git ls-files '*.py') + +[MASTER] + +# Use multiple processes +jobs=0 + +# Do not pickle collected data for comparisons +persistent=no + + +[REPORTS] + +# Only show messages, not full report +reports=no + +# Disable score +score=no + + +[MESSAGES CONTROL] + +# Only enable specific (hopefully) uncontroversial warnings. Use +# 'pylint3 --list-msgs' to list messages and their IDs. +# +# These might be nice to check too, but currently trigger false positives: +# +# no-member +# arguments-differ +# redefine-in-handler +# abstract-method +# +# These might be too controversial: +# +# no-else-return +# consider-using-get +# redefined-builtin +# +# These tell you to use logger.warning("foo %d bar", 3) instead of e.g. +# logger.warning("foo {} bar".format(3)), but it's not a clear win in all +# cases. f-strings would be nicer too, and it's easier to convert from format() +# to those. +# +# logging-not-lazy +# logging-format-interpolation +# logging-fstring-interpolation + +disable=all +# Identifiers are in the same order as in 'pylint3 --list-msgs'. Entire +# message "types" (~= severities) like F(atal), E(error),... are listed +# first. +enable= + F, # atal + empty-docstring, + unneeded-not, + singleton-comparison, + misplaced-comparison-constant, + unidiomatic-typecheck, + consider-using-enumerate, + consider-iterating-dictionary, + bad-classmethod-argument, + bad-mcs-method-argument, + bad-mcs-classmethod-argument, + single-string-used-for-slots, + trailing-newlines, + trailing-whitespace, + missing-final-newline, + superfluous-parens, + mixed-line-endings, + unexpected-line-ending-format, + invalid-characters-in-docstring, + useless-import-alias, + len-as-condition, + syntax-error, + init-is-generator, + return-in-init, + function-redefined, + not-in-loop, + return-outside-function, + yield-outside-function, + nonexistent-operator, + duplicate-argument-name, + abstract-class-instantiated, + bad-reversed-sequence, + too-many-star-expressions, + invalid-star-assignment-target, + star-needs-assignment-target, + nonlocal-and-global, + continue-in-finally, + nonlocal-without-binding, + misplaced-format-function, + method-hidden, + access-member-before-definition, + no-method-argument, + no-self-argument, + invalid-slots-object, + assigning-non-slot, + invalid-slots, + inherit-non-class, + inconsistent-mro, + duplicate-bases, + non-iterator-returned, + unexpected-special-method-signature, + invalid-length-returned, + relative-beyond-top-level, + used-before-assignment, + undefined-variable, + undefined-all-variable, + invalid-all-object, + no-name-in-module, + unpacking-non-sequence, + bad-except-order, + raising-bad-type, + bad-exception-context, + misplaced-bare-raise, + raising-non-exception, + notimplemented-raised, + catching-non-exception, + bad-super-call, + not-callable, + assignment-from-no-return, + no-value-for-parameter, + too-many-function-args, + unexpected-keyword-arg, + redundant-keyword-arg, + missing-kwoa, + invalid-sequence-index, + invalid-slice-index, + assignment-from-none, + not-context-manager, + invalid-unary-operand-type, + unsupported-binary-operation, + repeated-keyword, + not-an-iterable, + not-a-mapping, + unsupported-membership-test, + unsubscriptable-object, + unsupported-assignment-operation, + unsupported-delete-operation, + invalid-metaclass, + unhashable-dict-key, + logging-unsupported-format, + logging-format-truncated, + logging-too-many-args, + logging-too-few-args, + bad-format-character, + truncated-format-string, + mixed-format-string, + format-needs-mapping, + missing-format-string-key, + too-many-format-args, + too-few-format-args, + bad-string-format-type, + bad-str-strip-call, + invalid-envvar-value, + yield-inside-async-function, + not-async-context-manager, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + literal-comparison, + comparison-with-itself, + no-self-use, + no-classmethod-decorator, + no-staticmethod-decorator, + cyclic-import, + duplicate-code, + consider-merging-isinstance, + simplifiable-if-statement, + redefined-argument-from-local, + trailing-comma-tuple, + stop-iteration-return, + useless-return, + consider-swap-variables, + consider-using-join, + consider-using-in, + chained-comparison, + consider-using-dict-comprehension, + consider-using-set-comprehension, + simplifiable-if-expression, + unreachable, + pointless-statement, + pointless-string-statement, + expression-not-assigned, + unnecessary-pass, + unnecessary-lambda, + duplicate-key, + assign-to-new-keyword, + useless-else-on-loop, + confusing-with-statement, + using-constant-test, + comparison-with-callable, + lost-exception, + assert-on-tuple, + bad-staticmethod-argument, + super-init-not-called, + non-parent-init-called, + useless-super-delegation, + unnecessary-semicolon, + bad-indentation, + mixed-indentation, + deprecated-module, + reimported, + import-self, + misplaced-future, + global-variable-not-assigned, + unused-import, + unused-variable, + undefined-loop-variable, + unbalanced-tuple-unpacking, + possibly-unused-variable, + self-cls-assignment, + bare-except, + duplicate-except, + try-except-raise, + binary-op-exception, + raising-format-tuple, + wrong-exception-operation, + keyword-arg-before-vararg, + bad-format-string-key, + unused-format-string-key, + bad-format-string, + unused-format-string-argument, + format-combined-specification, + missing-format-attribute, + invalid-format-index, + anomalous-backslash-in-string, + anomalous-unicode-escape-in-string, + bad-open-mode, + redundant-unittest-assert, + deprecated-method, + bad-thread-instantiation, + shallow-copy-environ, + invalid-envvar-default, + deprecated-string-function, + deprecated-str-translate-call, + deprecated-itertools-function, + deprecated-types-field, diff --git a/scripts/ci/run_ci.sh b/scripts/ci/run_ci.sh index 20cf241c591d5..8002b60926f4b 100755 --- a/scripts/ci/run_ci.sh +++ b/scripts/ci/run_ci.sh @@ -146,7 +146,8 @@ function west_setup() { pushd .. if [ ! -d .west ]; then west init -l ${git_dir} - west update + west update 1> west.update.log + west forall -c 'git reset --hard HEAD' fi popd } @@ -268,6 +269,8 @@ if [ -n "$main_ci" ]; then tail -n +2 test_file_1.txt > test_file_1_in.txt cat test_file_3.txt test_file_2_in.txt test_file_1_in.txt > test_file.txt + echo "+++ run sanitycheck" + # Run a subset of tests based on matrix size ${sanitycheck} ${sanitycheck_options} --load-tests test_file.txt \ --subset ${matrix}/${matrix_builds} --retry-failed 3 diff --git a/scripts/ci/sanitycheck_ignore.txt b/scripts/ci/sanitycheck_ignore.txt index 9d15fad2184ab..1b408d77c4598 100644 --- a/scripts/ci/sanitycheck_ignore.txt +++ b/scripts/ci/sanitycheck_ignore.txt @@ -26,3 +26,6 @@ doc/* scripts/ci/sanitycheck_ignore.txt scripts/ci/what_changed.py scripts/requirements* +.github/workflows/compliance.yml +scripts/ci/check_compliance.py +scripts/ci/pylintrc diff --git a/subsys/net/lib/dns/dns_internal.h b/subsys/net/lib/dns/dns_internal.h new file mode 100644 index 0000000000000..2e07a518fb5ac --- /dev/null +++ b/subsys/net/lib/dns/dns_internal.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include "dns_pack.h" + +int dns_validate_msg(struct dns_resolve_context *ctx, + struct dns_msg_t *dns_msg, + uint16_t *dns_id, + int *query_idx, + struct net_buf *dns_cname, + uint16_t *query_hash); diff --git a/subsys/net/lib/dns/dns_pack.c b/subsys/net/lib/dns/dns_pack.c index 0b25c5cf16f05..f8da094a51711 100644 --- a/subsys/net/lib/dns/dns_pack.c +++ b/subsys/net/lib/dns/dns_pack.c @@ -9,7 +9,9 @@ #include "dns_pack.h" -static inline u16_t dns_strlen(const char *str) +#include "dns_internal.h" + +static inline uint16_t dns_strlen(const char *str) { if (str == NULL) { return 0; @@ -334,7 +336,7 @@ int dns_unpack_response_query(struct dns_msg_t *dns_msg) /* 4 bytes more due to qtype and qclass */ offset += DNS_QTYPE_LEN + DNS_QCLASS_LEN; - if (offset > dns_msg->msg_size) { + if (offset >= dns_msg->msg_size) { return -ENOMEM; } @@ -361,14 +363,10 @@ int dns_copy_qname(u8_t *buf, u16_t *len, u16_t size, u8_t *msg = dns_msg->msg; u16_t lb_size; int rc = -EINVAL; - int i = 0; *len = 0U; - /* Iterate ANCOUNT + 1 to allow the Query's QNAME to be parsed. - * This is required to avoid 'alias loops' - */ - while (i++ < dns_header_ancount(dns_msg->msg) + 1) { + while (1) { if (pos >= msg_size) { rc = -ENOMEM; break; diff --git a/subsys/net/lib/dns/resolve.c b/subsys/net/lib/dns/resolve.c index bb83e7d7b9fa2..e72c882f5ee2d 100644 --- a/subsys/net/lib/dns/resolve.c +++ b/subsys/net/lib/dns/resolve.c @@ -24,6 +24,7 @@ LOG_MODULE_REGISTER(net_dns_resolve, CONFIG_DNS_RESOLVER_LOG_LEVEL); #include #include #include "dns_pack.h" +#include "dns_internal.h" #define DNS_SERVER_COUNT CONFIG_DNS_RESOLVER_MAX_SERVERS #define SERVER_COUNT (DNS_SERVER_COUNT + DNS_MAX_MCAST_SERVERS) @@ -359,60 +360,58 @@ static inline int get_slot_by_id(struct dns_resolve_context *ctx, return -ENOENT; } -static int dns_read(struct dns_resolve_context *ctx, - struct net_pkt *pkt, - struct net_buf *dns_data, - u16_t *dns_id, - struct net_buf *dns_cname, - u16_t *query_hash) +int dns_validate_msg(struct dns_resolve_context *ctx, + struct dns_msg_t *dns_msg, + uint16_t *dns_id, + int *query_idx, + struct net_buf *dns_cname, + uint16_t *query_hash) { struct dns_addrinfo info = { 0 }; - /* Helper struct to track the dns msg received from the server */ - struct dns_msg_t dns_msg; - u32_t ttl; /* RR ttl, so far it is not passed to caller */ - u8_t *src, *addr; + uint32_t ttl; /* RR ttl, so far it is not passed to caller */ + uint8_t *src, *addr; const char *query_name; int address_size; /* index that points to the current answer being analyzed */ int answer_ptr; - int data_len; int items; - int ret; - int server_idx, query_idx = -1; + int server_idx; + int ret = 0; - data_len = MIN(net_pkt_remaining_data(pkt), DNS_RESOLVER_MAX_BUF_SIZE); - - /* TODO: Instead of this temporary copy, just use the net_pkt directly. - */ - ret = net_pkt_read(pkt, dns_data->data, data_len); - if (ret < 0) { - ret = DNS_EAI_MEMORY; + /* Make sure that we can read DNS id, flags and rcode */ + if (dns_msg->msg_size < (sizeof(*dns_id) + sizeof(uint16_t))) { + ret = DNS_EAI_FAIL; goto quit; } - dns_msg.msg = dns_data->data; - dns_msg.msg_size = data_len; - /* The dns_unpack_response_header() has design flaw as it expects * dns id to be given instead of returning the id to the caller. * In our case we would like to get it returned instead so that we * can match the DNS query that we sent. When dns_read() is called, * we do not know what the DNS id is yet. */ - *dns_id = dns_unpack_header_id(dns_msg.msg); + *dns_id = dns_unpack_header_id(dns_msg->msg); - if (dns_header_rcode(dns_msg.msg) == DNS_HEADER_REFUSED) { + if (dns_header_rcode(dns_msg->msg) == DNS_HEADER_REFUSED) { ret = DNS_EAI_FAIL; goto quit; } - ret = dns_unpack_response_header(&dns_msg, *dns_id); + /* We might receive a query while we are waiting for a response, in that + * case we just ignore the query instead of making the resolving fail. + */ + if (dns_header_qr(dns_msg->msg) == DNS_QUERY) { + ret = 0; + goto quit; + } + + ret = dns_unpack_response_header(dns_msg, *dns_id); if (ret < 0) { ret = DNS_EAI_FAIL; goto quit; } - if (dns_header_qdcount(dns_msg.msg) != 1) { + if (dns_header_qdcount(dns_msg->msg) != 1) { /* For mDNS (when dns_id == 0) the query count is 0 */ if (*dns_id > 0) { ret = DNS_EAI_FAIL; @@ -420,7 +419,7 @@ static int dns_read(struct dns_resolve_context *ctx, } } - ret = dns_unpack_response_query(&dns_msg); + ret = dns_unpack_response_query(dns_msg); if (ret < 0) { /* Check mDNS like above */ if (*dns_id > 0) { @@ -431,7 +430,7 @@ static int dns_read(struct dns_resolve_context *ctx, /* mDNS responses to do not have the query part so the * answer starts immediately after the header. */ - dns_msg.answer_offset = dns_msg.query_offset; + dns_msg->answer_offset = dns_msg->query_offset; } /* Because in mDNS the DNS id is set to 0 and must be ignored @@ -443,32 +442,32 @@ static int dns_read(struct dns_resolve_context *ctx, answer_ptr = DNS_QUERY_POS; items = 0; server_idx = 0; - while (server_idx < dns_header_ancount(dns_msg.msg)) { - ret = dns_unpack_answer(&dns_msg, answer_ptr, &ttl); + while (server_idx < dns_header_ancount(dns_msg->msg)) { + ret = dns_unpack_answer(dns_msg, answer_ptr, &ttl); if (ret < 0) { ret = DNS_EAI_FAIL; goto quit; } - switch (dns_msg.response_type) { + switch (dns_msg->response_type) { case DNS_RESPONSE_IP: - if (query_idx >= 0) { + if (*query_idx >= 0) { goto query_known; } - query_name = dns_msg.msg + dns_msg.query_offset; + query_name = dns_msg->msg + dns_msg->query_offset; /* Add \0 and query type (A or AAAA) to the hash */ *query_hash = crc16_ansi(query_name, strlen(query_name) + 1 + 2); - query_idx = get_slot_by_id(ctx, *dns_id, *query_hash); - if (query_idx < 0) { + *query_idx = get_slot_by_id(ctx, *dns_id, *query_hash); + if (*query_idx < 0) { ret = DNS_EAI_SYSTEM; goto quit; } - if (ctx->queries[query_idx].query_type == + if (ctx->queries[*query_idx].query_type == DNS_QUERY_TYPE_A) { if (net_sin(&info.ai_addr)->sin_family == AF_INET6) { @@ -483,7 +482,7 @@ static int dns_read(struct dns_resolve_context *ctx, info.ai_addr.sa_family = AF_INET; info.ai_addrlen = sizeof(struct sockaddr_in); - } else if (ctx->queries[query_idx].query_type == + } else if (ctx->queries[*query_idx].query_type == DNS_QUERY_TYPE_AAAA) { if (net_sin6(&info.ai_addr)->sin6_family == AF_INET) { @@ -512,18 +511,25 @@ static int dns_read(struct dns_resolve_context *ctx, goto quit; } - if (dns_msg.response_length < address_size) { + if (dns_msg->response_length < address_size) { /* it seems this is a malformed message */ ret = DNS_EAI_FAIL; goto quit; } - src = dns_msg.msg + dns_msg.response_position; + if ((dns_msg->response_position + address_size) > + dns_msg->msg_size) { + /* Too short message */ + ret = DNS_EAI_FAIL; + goto quit; + } + + src = dns_msg->msg + dns_msg->response_position; memcpy(addr, src, address_size); query_known: - ctx->queries[query_idx].cb(DNS_EAI_INPROGRESS, &info, - ctx->queries[query_idx].user_data); + ctx->queries[*query_idx].cb(DNS_EAI_INPROGRESS, &info, + ctx->queries[*query_idx].user_data); items++; break; @@ -531,7 +537,7 @@ static int dns_read(struct dns_resolve_context *ctx, /* Instead of using the QNAME at DNS_QUERY_POS, * we will use this CNAME */ - answer_ptr = dns_msg.response_position; + answer_ptr = dns_msg->response_position; break; default: @@ -540,22 +546,23 @@ static int dns_read(struct dns_resolve_context *ctx, } /* Update the answer offset to point to the next RR (answer) */ - dns_msg.answer_offset += DNS_ANSWER_PTR_LEN; - dns_msg.answer_offset += dns_msg.response_length; + dns_msg->answer_offset += dns_msg->response_position - + dns_msg->answer_offset; + dns_msg->answer_offset += dns_msg->response_length; server_idx++; } - if (query_idx < 0) { + if (*query_idx < 0) { /* If the query_idx is still unknown, try to get it here * and hope it is found. */ - query_name = dns_msg.msg + dns_msg.query_offset; + query_name = dns_msg->msg + dns_msg->query_offset; *query_hash = crc16_ansi(query_name, strlen(query_name) + 1 + 2); - query_idx = get_slot_by_id(ctx, *dns_id, *query_hash); - if (query_idx < 0) { + *query_idx = get_slot_by_id(ctx, *dns_id, *query_hash); + if (*query_idx < 0) { ret = DNS_EAI_SYSTEM; goto quit; } @@ -565,18 +572,26 @@ static int dns_read(struct dns_resolve_context *ctx, * another query. Number of additional queries is controlled via Kconfig */ if (items == 0) { - if (dns_msg.response_type == DNS_RESPONSE_CNAME_NO_IP) { - u16_t pos = dns_msg.response_position; + if (dns_msg->response_type == DNS_RESPONSE_CNAME_NO_IP) { + uint16_t pos = dns_msg->response_position; - ret = dns_copy_qname(dns_cname->data, &dns_cname->len, - dns_cname->size, &dns_msg, pos); - if (ret < 0) { - ret = DNS_EAI_SYSTEM; - goto quit; + /* The dns_cname should always be set. As a special + * case, it might not be set for unit tests that call + * this function directly. + */ + if (dns_cname) { + ret = dns_copy_qname(dns_cname->data, + &dns_cname->len, + dns_cname->size, + dns_msg, pos); + if (ret < 0) { + ret = DNS_EAI_SYSTEM; + goto quit; + } } ret = DNS_EAI_AGAIN; - goto finished; + goto quit; } } @@ -586,6 +601,46 @@ static int dns_read(struct dns_resolve_context *ctx, ret = DNS_EAI_ALLDONE; } +quit: + return ret; +} + +static int dns_read(struct dns_resolve_context *ctx, + struct net_pkt *pkt, + struct net_buf *dns_data, + uint16_t *dns_id, + struct net_buf *dns_cname, + uint16_t *query_hash) +{ + /* Helper struct to track the dns msg received from the server */ + struct dns_msg_t dns_msg; + int data_len; + int ret; + int query_idx = -1; + + data_len = MIN(net_pkt_remaining_data(pkt), DNS_RESOLVER_MAX_BUF_SIZE); + + /* TODO: Instead of this temporary copy, just use the net_pkt directly. + */ + ret = net_pkt_read(pkt, dns_data->data, data_len); + if (ret < 0) { + ret = DNS_EAI_MEMORY; + goto quit; + } + + dns_msg.msg = dns_data->data; + dns_msg.msg_size = data_len; + + ret = dns_validate_msg(ctx, &dns_msg, dns_id, &query_idx, + dns_cname, query_hash); + if (ret == DNS_EAI_AGAIN) { + goto finished; + } + + if (ret < 0) { + goto quit; + } + if (k_delayed_work_remaining_get(&ctx->queries[query_idx].timer) > 0) { k_delayed_work_cancel(&ctx->queries[query_idx].timer); } diff --git a/tests/net/lib/dns_packet/src/main.c b/tests/net/lib/dns_packet/src/main.c index ba1ecc174caaf..c92936854eac1 100644 --- a/tests/net/lib/dns_packet/src/main.c +++ b/tests/net/lib/dns_packet/src/main.c @@ -6,8 +6,9 @@ #include #include - +#include #include +#include #define MAX_BUF_SIZE 512 /* RFC 1035, 4.1.1. Header section format */ @@ -19,6 +20,7 @@ static u16_t buf_len; static u8_t qname[MAX_BUF_SIZE]; static u16_t qname_len; +static struct dns_resolve_context dns_ctx; /* Domain: www.zephyrproject.org * Type: standard query (IPv4) @@ -584,6 +586,655 @@ void test_mdns_response(void) " at line %d", -rc); } +static uint8_t resp_truncated_response_ipv4_1[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* Answer data is missing */ +}; + +static uint8_t resp_truncated_response_ipv4_2[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Rest of the data is missing */ +}; + +static uint8_t resp_truncated_response_ipv4_3[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* Answer name */ + 0xc0, 0x1c, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, +}; + +static uint8_t resp_truncated_response_ipv4_4[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* Answer name */ + 0xc0, 0x1c, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, +}; + +static uint8_t resp_truncated_response_ipv4_5[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* Answer name */ + 0xc0, 0x1c, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, + + /* Resource data length */ + 0x00, 0x04, +}; + +static uint8_t resp_valid_response_ipv4_6[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* Answer name */ + 0xc0, 0x1c, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, + + /* Resource data length */ + 0x00, 0x04, + + /* Resource data (IP address) */ + 0x8c, 0xd3, 0xa9, 0x08 +}; + +static uint8_t resp_valid_response_ipv4_7[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* Answer name (do not use pointer here) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, + + /* Resource data length */ + 0x00, 0x04, + + /* Resource data (IP address) */ + 0x8c, 0xd3, 0xa9, 0x08 +}; + +static uint8_t resp_valid_response_ipv4_8[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* 1st answer name (do not use pointer here) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, + + /* Resource data length */ + 0x00, 0x04, + + /* Resource data (IP address) */ + 0x8c, 0xd3, 0xa9, 0x08, + + /* 2nd answer name (do not use pointer here) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, + + /* Resource data length */ + 0x00, 0x04, + + /* Resource data (IP address) */ + 0x8c, 0xd3, 0xa9, 0x09 +}; + +static uint8_t resp_valid_response_ipv4_9[] = { + /* DNS msg header (12 bytes) */ + 0xb0, 0x41, 0x81, 0x80, 0x00, 0x01, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, + + /* Query string (www.zephyrproject.org) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Query type */ + 0x00, 0x01, + + /* Query class */ + 0x00, 0x01, + + /* 1st answer name (use pointer for 1st answer) */ + 0xc0, 0x1c, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, + + /* Resource data length */ + 0x00, 0x04, + + /* Resource data (IP address) */ + 0x8c, 0xd3, 0xa9, 0x08, + + /* 2nd answer name (do not use pointer here) */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + + /* Answer type */ + 0x00, 0x01, + + /* Answer class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x0b, 0xd4, + + /* Resource data length */ + 0x00, 0x04, + + /* Resource data (IP address) */ + 0x8c, 0xd3, 0xa9, 0x09 +}; + +static uint8_t resp_valid_response_ipv4_10[] = { + /* DNS msg header (12 bytes) */ + 0x74, 0xe1, 0x81, 0x80, 0x00, 0x01, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, + + /* Query string */ + 0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32, + 0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65, + 0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f, + 0x6d, 0x00, + + /* Type */ + 0x00, 0x01, + + /* Class */ + 0x00, 0x01, + + /* Answer 1 */ + 0xc0, 0x0c, + + /* Answer type (cname) */ + 0x00, 0x05, + + /* Class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x00, 0x04, + + /* RR data length */ + 0x00, 0x26, + + /* Data */ + 0x11, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32, + 0x63, 0x6e, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x64, + 0x2d, 0x32, 0x0e, 0x74, 0x72, 0x61, 0x66, 0x66, + 0x69, 0x63, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x03, 0x6e, 0x65, 0x74, 0x00, + + /* Answer 2 */ + 0xc0, 0x4e, + + /* cname */ + 0x00, 0x05, + + /* Class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x00, 0x04, + + /* RR data length */ + 0x00, 0x2e, + + /* Data */ + 0x14, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32, + 0x63, 0x6e, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x64, + 0x2d, 0x32, 0x2d, 0x31, 0x36, 0x07, 0x77, 0x65, + 0x73, 0x74, 0x75, 0x73, 0x32, 0x08, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x61, 0x70, 0x70, 0x05, 0x61, + 0x7a, 0x75, 0x72, 0x65, 0xc0, 0x39, +}; + +static uint8_t resp_valid_response_ipv4_11[] = { + /* DNS msg header (12 bytes) */ + 0x74, 0xe1, 0x81, 0x80, 0x00, 0x01, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, + + /* Query string */ + 0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32, + 0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65, + 0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f, + 0x6d, 0x00, + + /* Type */ + 0x00, 0x01, + + /* Class */ + 0x00, 0x01, + + /* Answer 1 */ + 0xc0, 0x0c, + + /* Answer type (cname) */ + 0x00, 0x05, + + /* Class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x00, 0x04, + + /* RR data length */ + 0x00, 0x26, + + /* Data */ + 0x11, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32, + 0x63, 0x6e, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x64, + 0x2d, 0x32, 0x0e, 0x74, 0x72, 0x61, 0x66, 0x66, + 0x69, 0x63, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x03, 0x6e, 0x65, 0x74, 0x00, + + /* Answer 2 */ + 0xc0, 0x4e, + + /* cname */ + 0x00, 0x05, + + /* Class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x00, 0x04, + + /* RR data length */ + 0x00, 0x2e, + + /* Data */ + 0x14, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32, + 0x63, 0x6e, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x64, + 0x2d, 0x32, 0x2d, 0x31, 0x36, 0x07, 0x77, 0x65, + 0x73, 0x74, 0x75, 0x73, 0x32, 0x08, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x61, 0x70, 0x70, 0x05, 0x61, + 0x7a, 0x75, 0x72, 0x65, 0xc0, 0x39, + + /* Answer 3 */ + 0xc0, 0x80, + + /* A record */ + 0x00, 0x01, + + /* Class */ + 0x00, 0x01, + + /* TTL */ + 0x00, 0x00, 0x00, 0x04, + + /* RR length */ + 0x00, 0x04, + + /* IP address */ + 0x34, 0x72, 0x94, 0x90 +}; + +static void resolve_cb(enum dns_resolve_status status, + struct dns_addrinfo *info, + void *user_data) +{ + ARG_UNUSED(status); + ARG_UNUSED(info); + ARG_UNUSED(user_data); +} + +static void setup_dns_context(struct dns_resolve_context *ctx, + int idx, + uint16_t dns_id, + const uint8_t *query, + size_t query_len, + enum dns_query_type query_type) +{ + ctx->queries[idx].cb = resolve_cb; + ctx->queries[idx].id = dns_id; + ctx->queries[idx].query = query; + ctx->queries[idx].query_type = query_type; + ctx->queries[idx].query_hash = crc16_ansi(query, query_len); + ctx->is_used = true; + +} + +static void run_dns_malformed_response(const char *test_case, + uint8_t *buf, size_t len) +{ + /* Query is used to calculate the hash, it contains the + * labels + query type + */ + static const uint8_t query[] = { + /* Labels */ + 0x03, 0x77, 0x77, 0x77, 0x0d, 0x7a, 0x65, 0x70, + 0x68, 0x79, 0x72, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x03, 0x6f, 0x72, 0x67, 0x00, + /* Query type */ + 0x00, 0x01 + }; + struct dns_msg_t dns_msg = { 0 }; + uint16_t dns_id = 0; + int query_idx = -1; + uint16_t query_hash = 0; + int ret; + + dns_msg.msg = buf; + dns_msg.msg_size = len; + + dns_id = dns_unpack_header_id(dns_msg.msg); + + setup_dns_context(&dns_ctx, 0, dns_id, query, sizeof(query), + DNS_QUERY_TYPE_A); + + ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx, + NULL, &query_hash); + zassert_not_equal(ret, DNS_EAI_ALLDONE, + "[%s] DNS message was valid (%d)", + test_case, ret); +} + +static void run_dns_valid_response(const char *test_case, + uint8_t *buf, size_t len) +{ + struct dns_msg_t dns_msg = { 0 }; + uint16_t dns_id = 0; + int query_idx = -1; + uint16_t query_hash = 0; + int ret; + + dns_msg.msg = buf; + dns_msg.msg_size = len; + + ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx, + NULL, &query_hash); + zassert_equal(ret, DNS_EAI_ALLDONE, "[%s] DNS message failed (%d)", + test_case, ret); +} + +#define DNS_RESOLVER_MAX_BUF_SIZE 512 +#define DNS_RESOLVER_MIN_BUF 1 +#define DNS_RESOLVER_BUF_CTR (DNS_RESOLVER_MIN_BUF + 1) +#define DNS_MAX_NAME_LEN 255 + +NET_BUF_POOL_DEFINE(dns_qname_pool, DNS_RESOLVER_BUF_CTR, DNS_MAX_NAME_LEN, + 0, NULL); + +static void run_dns_valid_cname_response(const char *test_case, + uint8_t *buf, size_t len, + int expected_ret) +{ + static const uint8_t query[] = { + /* Query string */ + 0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32, + 0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65, + 0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72, + 0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f, + 0x6d, 0x00, + + /* Type */ + 0x00, 0x01, + }; + struct dns_msg_t dns_msg = { 0 }; + struct net_buf *dns_cname; + uint16_t dns_id = 0; + int query_idx = -1; + uint16_t query_hash = 0; + int ret; + + dns_msg.msg = buf; + dns_msg.msg_size = len; + + dns_cname = net_buf_alloc(&dns_qname_pool, K_MSEC(100)); + zassert_not_null(dns_cname, "Out of mem"); + + dns_id = dns_unpack_header_id(dns_msg.msg); + + setup_dns_context(&dns_ctx, 0, dns_id, query, sizeof(query), + DNS_QUERY_TYPE_A); + + ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx, + dns_cname, &query_hash); + zassert_equal(ret, expected_ret, "[%s] DNS message failed (%d)", + test_case, ret); +} + +#define RUN_VALID_TEST(test_name) \ + run_dns_valid_response(#test_name, test_name, sizeof(test_name)) + +#define RUN_VALID_CNAME_TEST(test_name, expected_ret) \ + run_dns_valid_cname_response(#test_name, test_name, \ + sizeof(test_name), expected_ret) + +static void test_dns_valid_responses(void) +{ + RUN_VALID_TEST(resp_valid_response_ipv4_6); + RUN_VALID_TEST(resp_valid_response_ipv4_7); + RUN_VALID_TEST(resp_valid_response_ipv4_8); + RUN_VALID_TEST(resp_valid_response_ipv4_9); + + RUN_VALID_CNAME_TEST(resp_valid_response_ipv4_10, DNS_EAI_AGAIN); + RUN_VALID_CNAME_TEST(resp_valid_response_ipv4_11, DNS_EAI_ALLDONE); +} + +#define RUN_MALFORMED_TEST(test_name) \ + run_dns_malformed_response(#test_name, test_name, sizeof(test_name)) + +static void test_dns_malformed_responses(void) +{ + RUN_MALFORMED_TEST(resp_truncated_response_ipv4_1); + RUN_MALFORMED_TEST(resp_truncated_response_ipv4_2); + RUN_MALFORMED_TEST(resp_truncated_response_ipv4_3); + RUN_MALFORMED_TEST(resp_truncated_response_ipv4_4); + RUN_MALFORMED_TEST(resp_truncated_response_ipv4_5); +} + +static void test_dns_id_len(void) +{ + struct dns_msg_t dns_msg = { 0 }; + uint8_t buf[1]; + uint16_t dns_id = 0; + int query_idx = -1; + uint16_t query_hash = 0; + int ret; + + dns_msg.msg = buf; + dns_msg.msg_size = sizeof(buf); + + ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx, + NULL, &query_hash); + zassert_equal(ret, DNS_EAI_FAIL, + "DNS message length check failed (%d)", ret); +} + +static void test_dns_flags_len(void) +{ + struct dns_msg_t dns_msg = { 0 }; + uint8_t buf[3]; + uint16_t dns_id = 0; + int query_idx = -1; + uint16_t query_hash = 0; + int ret; + + dns_msg.msg = buf; + dns_msg.msg_size = sizeof(buf); + + ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx, + NULL, &query_hash); + zassert_equal(ret, DNS_EAI_FAIL, + "DNS message length check failed (%d)", ret); +} + void test_main(void) { ztest_test_suite(dns_tests, @@ -591,13 +1242,17 @@ void test_main(void) ztest_unit_test(test_dns_response), ztest_unit_test(test_dns_response2), ztest_unit_test(test_mdns_query), - ztest_unit_test(test_mdns_response) + ztest_unit_test(test_mdns_response), + ztest_unit_test(test_dns_id_len), + ztest_unit_test(test_dns_flags_len), + ztest_unit_test(test_dns_malformed_responses), + ztest_unit_test(test_dns_valid_responses) ); ztest_run_test_suite(dns_tests); /* TODO: - * 1) add malformed DNS data + * 1) add malformed DNS data (mostly done) * 2) add validations against buffer overflows * 3) add debug info to detect the exit point (or split the tests) * 4) add test data with CNAME and more RR