8000 circuitpython/tests at ee7ed33c7041e2fb2e1a46db57b44b7a6122d3fd · cezer-io/circuitpython · GitHub
[go: up one dir, main page]

Skip to content
{"payload":{"allShortcutsEnabled":false,"path":"tests","repo":{"id":873479253,"defaultBranch":"main","name":"circuitpython","ownerLogin":"cezer-io","currentUserCanPush":false,"isFork":true,"isEmpty":false,"createdAt":"2024-10-16T08:37:35.000Z","ownerAvatar":"https://avatars.githubusercontent.com/u/57368693?v=4","public":true,"private":false,"isOrgOwned":true},"currentUser":null,"refInfo":{"name":"ee7ed33c7041e2fb2e1a46db57b44b7a6122d3fd","listCacheKey":"v0:1729067863.0992699","canEdit":false,"refType":"tree","currentOid":"ee7ed33c7041e2fb2e1a46db57b44b7a6122d3fd"},"tree":{"items":[{"name":"basics","path":"tests/basics","contentType":"directory"},{"name":"circuitpython-manual","path":"tests/circuitpython-manual","contentType":"directory"},{"name":"circuitpython","path":"tests/circuitpython","contentType":"directory"},{"name":"cmdline","path":"tests/cmdline","contentType":"directory"},{"name":"cpydiff","path":"tests/cpydiff","contentType":"directory"},{"name":"extmod","path":"tests/extmod","contentType":"directory"},{"name":"feature_check","path":"tests/feature_check","contentType":"directory"},{"name":"float","path":"tests/float","contentType":"directory"},{"name":"frozen","path":"tests/frozen","contentType":"directory"},{"name":"import","path":"tests/import","contentType":"directory"},{"name":"inlineasm","path":"tests/inlineasm","contentType":"directory"},{"name":"internal_bench","path":"tests/internal_bench","contentType":"directory"},{"name":"io","path":"tests/io","contentType":"directory"},{"name":"jni","path":"tests/jni","contentType":"directory"},{"name":"micropython","path":"tests/micropython","contentType":"directory"},{"name":"misc","path":"tests/misc","contentType":"directory"},{"name":"perf_bench","path":"tests/perf_bench","contentType":"directory"},{"name":"stress","path":"tests/stress","contentType":"directory"},{"name":"testlib","path":"tests/testlib","contentType":"directory"},{"name":"thread","path":"tests/thread","contentType":"directory"},{"name":"unicode","path":"tests/unicode","contentType":"directory"},{"name":"unix","path":"tests/unix","contentType":"directory"},{"name":"vectorio","path":"tests/vectorio","contentType":"directory"},{"name":"README.md","path":"tests/README.md","contentType":"file"},{"name":"endorse.py","path":"tests/endorse.py","contentType":"file"},{"name":"run-internalbench.py","path":"tests/run-internalbench.py","contentType":"file"},{"name":"run-multitests.py","path":"tests/run-multitests.py","contentType":"file"},{"name":"run-natmodtests.py","path":"tests/run-natmodtests.py","contentType":"file"},{"name":"run-perfbench-table.py","path":"tests/run-perfbench-table.py","contentType":"file"},{"name":"run-perfbench.py","path":"tests/run-perfbench.py","contentType":"file"},{"name":"run-tests-exp.py","path":"tests/run-tests-exp.py","contentType":"file"},{"name":"run-tests-exp.sh","path":"tests/run-tests-exp.sh","contentType":"file"},{"name":"run-tests.py","path":"tests/run-tests.py","contentType":"file"}],"templateDirectorySuggestionUrl":null,"readme":{"displayName":"README.md","richText":"\u003carticle class=\"markdown-body entry-content container-lg\" itemprop=\"text\"\u003e\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch1 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eMicroPython Test Suite\u003c/h1\u003e\u003ca id=\"user-content-micropython-test-suite\" class=\"anchor\" aria-label=\"Permalink: MicroPython Test Suite\" href=\"#micropython-test-suite\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eThis directory contains tests for various functionality areas of MicroPython.\nTo run all stable tests, run \"run-tests.py\" script in this directory.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eTests of capabilities not supported on all platforms should be written\nto check for the capability being present. If it is not, the test\nshould merely output 'SKIP' followed by the line terminator, and call\nsys.exit() to raise SystemExit, instead of attempting to test the\nmissing capability. The testing framework (run-tests.py in this\ndirectory, test_main.c in qemu_arm) recognizes this as a skipped test.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eThere are a few features for which this mechanism cannot be used to\ncondition a test. The run-tests.py script uses small scripts in the\nfeature_check directory to check whether each such feature is present,\nand skips the relevant tests if not.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eTests are generally verified by running the test both in MicroPython and\nin CPython and comparing the outputs. If the output differs the test fails\nand the outputs are saved in a .out and a .exp file respectively.\nFor tests that cannot be run in CPython, for example because they use\nthe machine module, a .exp file can be provided next to the test's .py\nfile. A convenient way to generate that is to run the test, let it fail\n(because CPython cannot run it) and then copy the .out file (but not\nbefore checking it manually!)\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eWhen creating new tests, anything that relies on float support should go in the\nfloat/ subdirectory. Anything that relies on import x, where x is not a built-in\nmodule, should go in the import/ subdirectory.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eperf_bench\u003c/h2\u003e\u003ca id=\"user-content-perf_bench\" class=\"anchor\" aria-label=\"Permalink: perf_bench\" href=\"#perf_bench\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eThe \u003ccode\u003eperf_bench\u003c/code\u003e directory contains some performance benchmarks that can be used\nto benchmark different MicroPython firmwares or host ports.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eThe runner utility is \u003ccode\u003erun-perfbench,py\u003c/code\u003e. Execute \u003ccode\u003e./run-perfbench.py --help\u003c/code\u003e\nfor a full list of command line options.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eBenchmarking a target\u003c/h3\u003e\u003ca id=\"user-content-benchmarking-a-target\" class=\"anchor\" aria-label=\"Permalink: Benchmarking a target\" href=\"#benchmarking-a-target\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eTo run tests on a firmware target using \u003ccode\u003epyboard.py\u003c/code\u003e, run the command line like\nthis:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"./run-perfbench.py -p -d /dev/ttyACM0 168 100\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003e./run-perfbench.py -p -d /dev/ttyACM0 168 100\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003e\u003ccode\u003e-p\u003c/code\u003e indicates running on a remote target via pyboard.py, not the host.\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e-d PORTNAME\u003c/code\u003e is the serial port, \u003ccode\u003e/dev/ttyACM0\u003c/code\u003e is the default if not\nprovided.\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e168\u003c/code\u003e is value \u003ccode\u003eN\u003c/code\u003e, the approximate CPU frequency in MHz (in this case Pyboard\nV1.1 is 168MHz). It's possible to choose other values as well: lower values\nlike \u003ccode\u003e10\u003c/code\u003e will run much the tests much quicker, higher values like \u003ccode\u003e1000\u003c/code\u003e will\nrun much longer.\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e100\u003c/code\u003e is value \u003ccode\u003eM\u003c/code\u003e, the approximate heap size in kilobytes (can get this from\n\u003ccode\u003eimport micropython; micropython.mem_info()\u003c/code\u003e or estimate it). It's possible to\nchoose other values here too: lower values like \u003ccode\u003e10\u003c/code\u003e will run shorter/smaller\ntests, and higher values will run bigger tests. The maximum value of \u003ccode\u003eM\u003c/code\u003e is\nlimited by available heap, and the tests are written so the \"recommended\"\nvalue is approximately the upper limit.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eBenchmarking the host\u003c/h3\u003e\u003ca id=\"user-content-benchmarking-the-host\" class=\"anchor\" aria-label=\"Permalink: Benchmarking the host\" href=\"#benchmarking-the-host\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eTo benchmark the host build (unix/Windows), run like this:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"./run-perfbench.py 2000 10000\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003e./run-perfbench.py 2000 10000\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eThe output of perfbench is a list of tests and times/scores, like this:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"N=2000 M=10000 n_average=8\nperf_bench/bm_chaos.py: SKIP\nperf_bench/bm_fannkuch.py: 94550.38 2.9145 84.68 2.8499\nperf_bench/bm_fft.py: 79920.38 10.0771 129269.74 8.8205\nperf_bench/bm_float.py: 43844.62 17.8229 353219.64 17.7693\nperf_bench/bm_hexiom.py: 32959.12 15.0243 775.77 14.8893\nperf_bench/bm_nqueens.py: 40855.00 10.7297 247776.15 11.3647\nperf_bench/bm_pidigits.py: 64547.75 2.5609 7751.36 2.5996\nperf_bench/core_import_mpy_multi.py: 15433.38 14.2733 33065.45 14.2368\nperf_bench/core_import_mpy_single.py: 263.00 11.3910 3858.35 12.9021\nperf_bench/core_qstr.py: 4929.12 1.8434 8117.71 1.7921\nperf_bench/core_yield_from.py: 16274.25 6.2584 12334.13 5.8125\nperf_bench/misc_aes.py: 57425.25 5.5226 17888.60 5.7482\nperf_bench/misc_mandel.py: 40809.25 8.2007 158107.00 9.8864\nperf_bench/misc_pystone.py: 39821.75 6.4145 100867.62 6.5043\nperf_bench/misc_raytrace.py: 36293.75 6.8501 26906.93 6.8402\nperf_bench/viper_call0.py: 15573.00 14.9931 19644.99 13.1550\nperf_bench/viper_call1a.py: 16725.75 9.8205 18099.96 9.2752\nperf_bench/viper_call1b.py: 20752.62 8.3372 14565.60 9.0663\nperf_bench/viper_call1c.py: 20849.88 5.8783 14444.80 6.6295\nperf_bench/viper_call2a.py: 16156.25 11.2956 18818.59 11.7959\nperf_bench/viper_call2b.py: 22047.38 8.9484 13725.73 9.6800\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003eN=2000 M=10000 n_average=8\nperf_bench/bm_chaos.py: SKIP\nperf_bench/bm_fannkuch.py: 94550.38 2.9145 84.68 2.8499\nperf_bench/bm_fft.py: 79920.38 10.0771 129269.74 8.8205\nperf_bench/bm_float.py: 43844.62 17.8229 353219.64 17.7693\nperf_bench/bm_hexiom.py: 32959.12 15.0243 775.77 14.8893\nperf_bench/bm_nqueens.py: 40855.00 10.7297 247776.15 11.3647\nperf_bench/bm_pidigits.py: 64547.75 2.5609 7751.36 2.5996\nperf_bench/core_import_mpy_multi.py: 15433.38 14.2733 33065.45 14.2368\nperf_bench/core_import_mpy_single.py: 263.00 11.3910 3858.35 12.9021\nperf_bench/core_qstr.py: 4929.12 1.8434 8117.71 1.7921\nperf_bench/core_yield_from.py: 16274.25 6.2584 12334.13 5.8125\nperf_bench/misc_aes.py: 57425.25 5.5226 17888.60 5.7482\nperf_bench/misc_mandel.py: 40809.25 8.2007 158107.00 9.8864\nperf_bench/misc_pystone.py: 39821.75 6.4145 100867.62 6.5043\nperf_bench/misc_raytrace.py: 36293.75 6.8501 26906.93 6.8402\nperf_bench/viper_call0.py: 15573.00 14.9931 19644.99 13.1550\nperf_bench/viper_call1a.py: 16725.75 9.8205 18099.96 9.2752\nperf_bench/viper_call1b.py: 20752.62 8.3372 14565.60 9.0663\nperf_bench/viper_call1c.py: 20849.88 5.8783 14444.80 6.6295\nperf_bench/viper_call2a.py: 16156.25 11.2956 18818.59 11.7959\nperf_bench/viper_call2b.py: 22047.38 8.9484 13725.73 9.6800\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eThe numbers across each line are times and scores for the test:\u003c/p\u003e\n\u003cul dir=\"auto\"\u003e\n\u003cli\u003eRuntime average (microseconds, lower is better)\u003c/li\u003e\n\u003cli\u003eRuntime standard deviation as a percentage\u003c/li\u003e\n\u003cli\u003eScore average (units depend on the benchmark, higher is better)\u003c/li\u003e\n\u003cli\u003eScore standard deviation as a percentage\u003c/li\u003e\n\u003c/ul\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch3 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eComparing performance\u003c/h3\u003e\u003ca id=\"user-content-comparing-performance\" class=\"anchor\" aria-label=\"Permalink: Comparing performance\" href=\"#comparing-performance\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eUsually you want to know if something is faster or slower than a reference. To\ndo this, copy the output of each \u003ccode\u003erun-perfbench.py\u003c/code\u003e run to a text file.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eThis can be done multiple ways, but one way on Linux/macOS is with the \u003ccode\u003etee\u003c/code\u003e\nutility: \u003ccode\u003e./run-perfbench.py -p 168 100 | tee pyb-run1.txt\u003c/code\u003e\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eOnce you have two files with output from two different runs (maybe with\ndifferent code or configuration), compare the runtimes with \u003ccode\u003e./run-perfbench.py -t pybv-run1.txt pybv-run2.txt\u003c/code\u003e or compare scores with \u003ccode\u003e./run-perfbench.py -s pybv-run1.txt pybv-run2.txt\u003c/code\u003e:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"\u0026gt; ./run-perfbench.py -s pyb-run1.txt pyb-run2.txt\ndiff of scores (higher is better)\nN=168 M=100 pyb-run1.txt -\u0026gt; pyb-run2.txt diff diff% (error%)\nbm_chaos.py 352.90 -\u0026gt; 352.63 : -0.27 = -0.077% (+/-0.00%)\nbm_fannkuch.py 77.52 -\u0026gt; 77.45 : -0.07 = -0.090% (+/-0.01%)\nbm_fft.py 2516.80 -\u0026gt; 2519.74 : +2.94 = +0.117% (+/-0.00%)\nbm_float.py 5749.27 -\u0026gt; 5749.65 : +0.38 = +0.007% (+/-0.00%)\nbm_hexiom.py 42.22 -\u0026gt; 42.30 : +0.08 = +0.189% (+/-0.00%)\nbm_nqueens.py 4407.55 -\u0026gt; 4414.44 : +6.89 = +0.156% (+/-0.00%)\nbm_pidigits.py 638.09 -\u0026gt; 632.14 : -5.95 = -0.932% (+/-0.25%)\ncore_import_mpy_multi.py 477.74 -\u0026gt; 477.57 : -0.17 = -0.036% (+/-0.00%)\ncore_import_mpy_single.py 58.74 -\u0026gt; 58.72 : -0.02 = -0.034% (+/-0.00%)\ncore_qstr.py 63.11 -\u0026gt; 63.11 : +0.00 = +0.000% (+/-0.01%)\ncore_yield_from.py 357.57 -\u0026gt; 357.57 : +0.00 = +0.000% (+/-0.00%)\nmisc_aes.py 397.27 -\u0026gt; 396.47 : -0.80 = -0.201% (+/-0.00%)\nmisc_mandel.py 3375.70 -\u0026gt; 3375.84 : +0.14 = +0.004% (+/-0.00%)\nmisc_pystone.py 2265.36 -\u0026gt; 2265.97 : +0.61 = +0.027% (+/-0.01%)\nmisc_raytrace.py 367.61 -\u0026gt; 368.15 : +0.54 = +0.147% (+/-0.01%)\nviper_call0.py 605.92 -\u0026gt; 605.92 : +0.00 = +0.000% (+/-0.00%)\nviper_call1a.py 576.78 -\u0026gt; 576.78 : +0.00 = +0.000% (+/-0.00%)\nviper_call1b.py 452.45 -\u0026gt; 452.46 : +0.01 = +0.002% (+/-0.01%)\nviper_call1c.py 457.39 -\u0026gt; 457.39 : +0.00 = +0.000% (+/-0.00%)\nviper_call2a.py 561.37 -\u0026gt; 561.37 : +0.00 = +0.000% (+/-0.00%)\nviper_call2b.py 389.49 -\u0026gt; 389.50 : +0.01 = +0.003% (+/-0.01%)\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003e\u0026gt; ./run-perfbench.py -s pyb-run1.txt pyb-run2.txt\ndiff of scores (higher is better)\nN=168 M=100 pyb-run1.txt -\u0026gt; pyb-run2.txt diff diff% (error%)\nbm_chaos.py 352.90 -\u0026gt; 352.63 : -0.27 = -0.077% (+/-0.00%)\nbm_fannkuch.py 77.52 -\u0026gt; 77.45 : -0.07 = -0.090% (+/-0.01%)\nbm_fft.py 2516.80 -\u0026gt; 2519.74 : +2.94 = +0.117% (+/-0.00%)\nbm_float.py 5749.27 -\u0026gt; 5749.65 : +0.38 = +0.007% (+/-0.00%)\nbm_hexiom.py 42.22 -\u0026gt; 42.30 : +0.08 = +0.189% (+/-0.00%)\nbm_nqueens.py 4407.55 -\u0026gt; 4414.44 : +6.89 = +0.156% (+/-0.00%)\nbm_pidigits.py 638.09 -\u0026gt; 632.14 : -5.95 = -0.932% (+/-0.25%)\ncore_import_mpy_multi.py 477.74 -\u0026gt; 477.57 : -0.17 = -0.036% (+/-0.00%)\ncore_import_mpy_single.py 58.74 -\u0026gt; 58.72 : -0.02 = -0.034% (+/-0.00%)\ncore_qstr.py 63.11 -\u0026gt; 63.11 : +0.00 = +0.000% (+/-0.01%)\ncore_yield_from.py 357.57 -\u0026gt; 357.57 : +0.00 = +0.000% (+/-0.00%)\nmisc_aes.py 397.27 -\u0026gt; 396.47 : -0.80 = -0.201% (+/-0.00%)\nmisc_mandel.py 3375.70 -\u0026gt; 3375.84 : +0.14 = +0.004% (+/-0.00%)\nmisc_pystone.py 2265.36 -\u0026gt; 2265.97 : +0.61 = +0.027% (+/-0.01%)\nmisc_raytrace.py 367.61 -\u0026gt; 368.15 : +0.54 = +0.147% (+/-0.01%)\nviper_call0.py 605.92 -\u0026gt; 605.92 : +0.00 = +0.000% (+/-0.00%)\nviper_call1a.py 576.78 -\u0026gt; 576.78 : +0.00 = +0.000% (+/-0.00%)\nviper_call1b.py 452.45 -\u0026gt; 452.46 : +0.01 = +0.002% (+/-0.01%)\nviper_call1c.py 457.39 -\u0026gt; 457.39 : +0.00 = +0.000% (+/-0.00%)\nviper_call2a.py 561.37 -\u0026gt; 561.37 : +0.00 = +0.000% (+/-0.00%)\nviper_call2b.py 389.49 -\u0026gt; 389.50 : +0.01 = +0.003% (+/-0.01%)\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eNote in particular the error percentages at the end of each line. If these are\nhigh relative to the percentage difference then it indicates high variability in\nthe test runs, and the absolute difference value is unreliable. High error\npercentages are particularly common on PC builds, where the host OS may\ninfluence test run times. Increasing the \u003ccode\u003eN\u003c/code\u003e value may help average this out by\nrunning each test longer.\u003c/p\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003einternal_bench\u003c/h2\u003e\u003ca id=\"user-content-internal_bench\" class=\"anchor\" aria-label=\"Permalink: internal_bench\" href=\"#internal_bench\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eThe \u003ccode\u003einternal_bench\u003c/code\u003e directory contains a set of tests for benchmarking\ndifferent internal Python features. By default, tests are run on the (unix or\nWindows) host, but the \u003ccode\u003e--pyboard\u003c/code\u003e option allows them to be run on an attached\nboard instead.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eTests are grouped by the first part of the file name, and the test runner compares\noutput between each group of tests.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eThe benchmarks measure the elapsed (wall time) for each test, according\nto MicroPython's own time module.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eIf run without any arguments, all test groups are run. Otherwise, it's possible\nto manually specify which test cases to run.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eExample:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"$ ./run-internalbench.py internal_bench/bytebuf-*.py\ninternal_bench/bytebuf:\n 0.094s (+00.00%) internal_bench/bytebuf-1-inplace.py\n 0.471s (+399.24%) internal_bench/bytebuf-2-join_map_bytes.py\n 0.177s (+87.78%) internal_bench/bytebuf-3-bytarray_map.py\n1 tests performed (3 individual testcases)\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003e$ ./run-internalbench.py internal_bench/bytebuf-*.py\ninternal_bench/bytebuf:\n 0.094s (+00.00%) internal_bench/bytebuf-1-inplace.py\n 0.471s (+399.24%) internal_bench/bytebuf-2-join_map_bytes.py\n 0.177s (+87.78%) internal_bench/bytebuf-3-bytarray_map.py\n1 tests performed (3 individual testcases)\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cdiv class=\"markdown-heading\" dir=\"auto\"\u003e\u003ch2 tabindex=\"-1\" class=\"heading-element\" dir=\"auto\"\u003eTest key/certificates\u003c/h2\u003e\u003ca id=\"user-content-test-keycertificates\" class=\"anchor\" aria-label=\"Permalink: Test key/certificates\" href=\"#test-keycertificates\"\u003e\u003csvg class=\"octicon octicon-link\" viewBox=\"0 0 16 16\" version=\"1.1\" width=\"16\" height=\"16\" aria-hidden=\"true\"\u003e\u003cpath d=\"m7.775 3.275 1.25-1.25a3.5 3.5 0 1 1 4.95 4.95l-2.5 2.5a3.5 3.5 0 0 1-4.95 0 .751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018 1.998 1.998 0 0 0 2.83 0l2.5-2.5a2.002 2.002 0 0 0-2.83-2.83l-1.25 1.25a.751.751 0 0 1-1.042-.018.751.751 0 0 1-.018-1.042Zm-4.69 9.64a1.998 1.998 0 0 0 2.83 0l1.25-1.25a.751.751 0 0 1 1.042.018.751.751 0 0 1 .018 1.042l-1.25 1.25a3.5 3.5 0 1 1-4.95-4.95l2.5-2.5a3.5 3.5 0 0 1 4.95 0 .751.751 0 0 1-.018 1.042.751.751 0 0 1-1.042.018 1.998 1.998 0 0 0-2.83 0l-2.5 2.5a1.998 1.998 0 0 0 0 2.83Z\"\u003e\u003c/path\u003e\u003c/svg\u003e\u003c/a\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eSSL/TLS tests in \u003ccode\u003emulti_net\u003c/code\u003e and \u003ccode\u003enet_inet\u003c/code\u003e use a\nself-signed key/cert pair that is randomly generated and to be used for\ntesting/demonstration only. You should always generate your own key/cert.\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eTo generate a new self-signed RSA key/cert pair with openssl do:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"$ openssl req -x509 -newkey rsa:2048 -keyout rsa_key.pem -out rsa_cert.pem -days 365 -nodes -subj '/CN=micropython.local/O=MicroPython/C=AU'\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003e$ openssl req -x509 -newkey rsa:2048 -keyout rsa_key.pem -out rsa_cert.pem -days 365 -nodes -subj '/CN=micropython.local/O=MicroPython/C=AU'\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eIn this case CN is: micropython.local\u003c/p\u003e\n\u003cp dir=\"auto\"\u003eConvert them to DER format:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"$ openssl pkey -in rsa_key.pem -out rsa_key.der -outform DER\n$ openssl x509 -in rsa_cert.pem -out rsa_cert.der -outform DER\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003e$ openssl pkey -in rsa_key.pem -out rsa_key.der -outform DER\n$ openssl x509 -in rsa_cert.pem -out rsa_cert.der -outform DER\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003cp dir=\"auto\"\u003eTo test elliptic curve key/cert pairs, create a key then a certificate using:\u003c/p\u003e\n\u003cdiv class=\"snippet-clipboard-content notranslate position-relative overflow-auto\" data-snippet-clipboard-copy-content=\"$ openssl ecparam -name prime256v1 -genkey -noout -out ec_key.der -outform DER\n$ openssl req -new -x509 -key ec_key.der -out ec_cert.der -outform DER -days 365 -nodes -subj '/CN=micropython.local/O=MicroPython/C=AU'\"\u003e\u003cpre class=\"notranslate\"\u003e\u003ccode\u003e$ openssl ecparam -name prime256v1 -genkey -noout -out ec_key.der -outform DER\n$ openssl req -new -x509 -key ec_key.der -out ec_cert.der -outform DER -days 365 -nodes -subj '/CN=micropython.local/O=MicroPython/C=AU'\n\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\n\u003c/article\u003e","errorMessage":null,"headerInfo":{"toc":[{"level":1,"text":"MicroPython Test Suite","anchor":"micropython-test-suite","htmlText":"MicroPython Test Suite"},{"level":2,"text":"perf_bench","anchor":"perf_bench","htmlText":"perf_bench"},{"level":3,"text":"Benchmarking a target","anchor":"benchmarking-a-target","htmlText":"Benchmarking a target"},{"level":3,"text":"Benchmarking the host","anchor":"benchmarking-the-host","htmlText":"Benchmarking the host"},{"level":3,"text":"Comparing performance","anchor":"comparing-performance","htmlText":"Comparing performance"},{"level":2,"text":"internal_bench","anchor":"internal_bench","htmlText":"internal_bench"},{"level":2,"text":"Test key/certificates","anchor":"test-keycertificates","htmlText":"Test key/certificates"}],"siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fcezer-io%2Fcircuitpython%2Ftree%2Fee7ed33c7041e2fb2e1a46db57b44b7a6122d3fd%2Ftests"}},"totalCount":33,"showBranchInfobar":false},"fileTree":{"":{"items":[{"name":".codespell","path":".codespell","contentType":"directory"},{"name":".devcontainer","path":".devcontainer","contentType":"directory"},{"name":".github","path":".github","contentType":"directory"},{"name":"LICENSES","path":"LICENSES","contentType":"directory"},{"name":"data","path":"data","contentType":"directory"},{"name":"devices","path":"devices","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"examples","path":"examples","contentType":"directory"},{"name":"extmod","path":"extmod","contentType":"directory"},{"name":"frozen","path":"frozen","contentType":"directory"},{"name":"lib","path":"lib","contentType":"directory"},{"name":"locale","path":"locale","contentType":"directory"},{"name":"logo","path":"logo","contentType":"directory"},{"name":"mpy-cross","path":"mpy-cross","contentType":"directory"},{"name":"ports","path":"ports","contentType":"directory"},{"name":"py","path":"py","contentType":"directory"},{"name":"shared-bindings","path":"shared-bindings","contentType":"directory"},{"name":"shared-module","path":"shared-module","contentType":"directory"},{"name":"shared","path":"shared","contentType":"directory"},{"name":"supervisor","path":"supervisor","contentType":"directory"},{"name":"tests","path":"tests","contentType":"directory"},{"name":"tools","path":"tools","contentType":"directory"},{"name":".codespellrc","path":".codespellrc","contentType":"file"},{"name":".git-blame-ignore-revs","path":".git-blame-ignore-revs","contentType":"file"},{"name":".gitattributes","path":".gitattributes","contentType":"file"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":".gitmodules","path":".gitmodules","contentType":"file"},{"name":".mailmap","path":".mailmap","contentType":"file"},{"name":".pre-commit-config.yaml","path":".pre-commit-config.yaml","contentType":"file"},{"name":".readthedocs.yml","path":".readthedocs.yml","contentType":"file"},{"name":".rosie.yml","path":".rosie.yml","contentType":"file"},{"name":"ACKNOWLEDGEMENTS","path":"ACKNOWLEDGEMENTS","contentType":"file"},{"name":"ACKNOWLEDGEMENTS.license","path":"ACKNOWLEDGEMENTS.license","contentType":"file"},{"name":"BUILDING.md","path":"BUILDING.md","contentType":"file"},{"name":"CODE_OF_CONDUCT.md","path":"CODE_OF_CONDUCT.md","contentType":"file"},{"name":"CONTRIBUTING.md","path":"CONTRIBUTING.md","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"LICENSE_MicroPython","path":"LICENSE_MicroPython","contentType":"file"},{"name":"MANIFEST.in-stubs","path":"MANIFEST.in-stubs","contentType":"file"},{"name":"Makefile","path":"Makefile","contentType":"file"},{"name":"README.rst","path":"README.rst","contentType":"file"},{"name":"README.rst-stubs","path":"README.rst-stubs","contentType":"file"},{"name":"WEBUSB_README.md","path":"WEBUSB_README.md","contentType":"file"},{"name":"conf.py","path":"conf.py","contentType":"file"},{"name":"main.c","path":"main.c","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"},{"name":"requirements-ci.txt","path":"requirements-ci.txt","contentType":"file"},{"name":"requirements-dev.txt","path":"requirements-dev.txt","contentType":"file"},{"name":"requirements-doc.txt","path":"requirements-doc.txt","contentType":"file"},{"name":"runtime.py","path":"runtime.py","contentType":"file"},{"name":"setup.py-stubs","path":"setup.py-stubs","contentType":"file"}],"totalCount":51}},"fileTreeProcessingTime":2.42048,"foldersToFetch":[],"treeExpanded":true,"symbolsExpanded":false,"csrf_tokens":{"/cezer-io/circuitpython/branches":{"post":"xaIYdzSZQpaLBmrTSDq8NMfl4sxvCN1ESGGPMNl3ZJwA07MndqAoDOao2cqnroRVjiq8gfqKDfvz-Tauv9xe0Q"}}},"title":"circuitpython/tests at ee7ed33c7041e2fb2e1a46db57b44b7a6122d3fd · cezer-io/circuitpython","appPayload":{"helpUrl":"https://docs.github.com","findFileWorkerPath":"/assets-cdn/worker/find-file-worker-263cab1760dd.js","findInFileWorkerPath":"/assets-cdn/worker/find-in-file-worker-1b17b3e7786a.js","githubDevUrl":null,"enabled_features":{"code_nav_ui_events":false,"react_blob_overlay":false,"accessible_code_button":true}}}
0