diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 86847e8ba..f76432f0c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,6 +9,9 @@ repos: hooks: - id: ruff args: [ --fix ] + - id: ruff + files: ^bin/.*\.py$ + args: ["--select=I", "--fix"] - id: ruff-format - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 @@ -30,5 +33,5 @@ repos: - --no-incremental # Fixes ruamel.yaml, see https://stackoverflow.com/a/65223004 - --python-version=3.10 - --scripts-are-modules - #- --strict # TODO #102: Enable flag once everything has type annotations - exclude: ^test/ + - --strict + exclude: ^(test|skel|scripts|bin/misc)/ diff --git a/bin/check_testing_tool.py b/bin/check_testing_tool.py index e0da3cf33..64bebd591 100644 --- a/bin/check_testing_tool.py +++ b/bin/check_testing_tool.py @@ -1,13 +1,22 @@ import shutil import sys +from collections.abc import Sequence from pathlib import Path -from typing import Optional, Sequence +from typing import Optional, TYPE_CHECKING import config import parallel from program import Program from run import Submission -from util import * +from util import ( + command_supports_memory_limit, + default_exec_code_map, + ensure_symlink, + error, + ExecResult, + ExecStatus, + ProgressBar, +) if TYPE_CHECKING: # Prevent circular import: https://stackoverflow.com/a/39757388 from problem import Problem @@ -28,7 +37,7 @@ class TestInput: - def __init__(self, problem: "Problem", in_path: Path, short_path: Path): + def __init__(self, problem: "Problem", in_path: Path, short_path: Path) -> None: assert in_path.suffix in [".in", ".download", ".statement"] self.problem = problem self.in_path = in_path @@ -43,7 +52,7 @@ def __init__(self, problem: "Problem", in_path: Path, short_path: Path): class WrappedSubmission: - def __init__(self, problem: "Problem", submission: Submission): + def __init__(self, problem: "Problem", submission: Submission) -> None: self.problem = problem self.submission = submission self.name = submission.name @@ -156,7 +165,7 @@ def run(self, bar: ProgressBar, testing_tool: "TestingTool", testinput: TestInpu class TestingTool(Program): - def __init__(self, problem: "Problem", path: Path): + def __init__(self, problem: "Problem", path: Path) -> None: super().__init__( problem, path, diff --git a/bin/config.py b/bin/config.py index 8c1b5438b..6e7d70b88 100644 --- a/bin/config.py +++ b/bin/config.py @@ -1,11 +1,17 @@ # Global variables that are constant after the programs arguments have been parsed. -import argparse +import copy import os import re +import sys +from collections.abc import Sequence +from colorama import Fore, Style from pathlib import Path -from collections.abc import Mapping, Sequence -from typing import Any, Final, Literal, Optional +from typing import Any, Final, Literal, Optional, TypeVar + +# Randomly generated uuid4 for BAPCtools +BAPC_UUID: Final[str] = "8ee7605a-d1ce-47b3-be37-15de5acd757e" +BAPC_UUID_PREFIX: Final[int] = 8 SPEC_VERSION: Final[str] = "2025-09" @@ -103,42 +109,6 @@ # Below here is some global state that will be filled in main(). -args = argparse.Namespace() - -DEFAULT_ARGS: Final[Mapping[str, Any]] = { - "jobs": (os.cpu_count() or 1) // 2, - "time": 600, # Used for `bt fuzz` - "verbose": 0, - "action": None, - "no_visualizer": True, -} - - -# The list of arguments below is generated using the following command: -""" -for cmd in $(bt --help | grep '^ {' | sed 's/ {//;s/}//;s/,/ /g') ; do bt $cmd --help ; done |& \ -grep '^ [^ ]' | sed 's/^ //' | cut -d ' ' -f 1 | sed -E 's/,//;s/^-?-?//;s/-/_/g' | sort -u | \ -grep -Ev '^(h|jobs|time|verbose)$' | sed 's/^/"/;s/$/",/' | tr '\n' ' ' | sed 's/^/ARGS_LIST: Final[Sequence[str]] = [/;s/, $/]\n/' -""" -# fmt: off -ARGS_LIST: Final[Sequence[str]] = ["1", "add", "all", "answer", "api", "author", "check_deterministic", "clean", "colors", "contest", "contest_id", "contestname", "cp", "defaults", "default_solution", "depth", "directory", "error", "force", "force_build", "generic", "input", "interaction", "interactive", "invalid", "kattis", "lang", "latest_bt", "legacy", "memory", "more", "move_to", "no_bar", "no_generate", "no_solution", "no_solutions", "no_testcase_sanity_checks", "no_time_limit", "no_validators", "no_visualizer", "number", "open", "order", "order_from_ccs", "overview", "password", "post_freeze", "problem", "problemname", "remove", "reorder", "samples", "sanitizer", "skel", "skip", "sort", "submissions", "table", "testcases", "time_limit", "timeout", "token", "tree", "type", "username", "valid_output", "watch", "web", "write"] -# fmt: on - - -def set_default_args() -> list[str]: - # Set default argument values. - missing = [] - for arg in ARGS_LIST: - if not hasattr(args, arg): - setattr(args, arg, None) - missing.append(arg) - for arg, value in DEFAULT_ARGS.items(): - if not hasattr(args, arg): - setattr(args, arg, value) - missing.append(arg) - return missing - - level: Optional[Literal["problem", "problemset"]] = None # The number of warnings and errors encountered. @@ -152,6 +122,168 @@ def set_default_args() -> list[str]: TEST_TLE_SUBMISSIONS: bool = False -# Randomly generated uuid4 for BAPCtools -BAPC_UUID: Final[str] = "8ee7605a-d1ce-47b3-be37-15de5acd757e" -BAPC_UUID_PREFIX: Final[int] = 8 +class ARGS: + def __init__(self, source: str | Path, **kwargs: Any) -> None: + self._set = set[str]() + self._source = source + + def warn(msg: Any) -> None: + global n_warn + print(f"{Fore.YELLOW}WARNING: {msg}{Style.RESET_ALL}", file=sys.stderr) + n_warn += 1 + + T = TypeVar("T") + + def normalize_arg(value: Any, t: type[Any]) -> Any: + if isinstance(value, str) and t is Path: + value = Path(value) + if isinstance(value, int) and t is float: + value = float(value) + if isinstance(value, bool) and t is int: + value = bool(value) + return value + + def get_optional_arg(key: str, t: type[T], constraint: Optional[str] = None) -> Optional[T]: + if key in kwargs: + value = normalize_arg(kwargs.pop(key), t) + if constraint: + assert isinstance(value, (float, int)) + if not eval(f"{value} {constraint}"): + warn( + f"value for '{key}' in {source} should be {constraint} but is {value}. SKIPPED." + ) + return None + if isinstance(value, t): + self._set.add(key) + return value + warn(f"incompatible value for key '{key}' in {source}. SKIPPED.") + return None + + def get_list_arg( + key: str, t: type[T], constraint: Optional[str] = None + ) -> Optional[list[T]]: + values = get_optional_arg(key, list) + if values is None: + return None + checked = [] + for value in values: + value = normalize_arg(value, t) + if constraint: + assert isinstance(value, (float, int)) + if not eval(f"{value} {constraint}"): + warn( + f"value for '{key}' in {source} should be {constraint} but is {value}. SKIPPED." + ) + continue + if not isinstance(value, t): + warn(f"incompatible value for key '{key}' in {source}. SKIPPED.") + continue + checked.append(value) + return checked + + def get_arg(key: str, default: T, constraint: Optional[str] = None) -> T: + value = get_optional_arg(key, type(default), constraint) + result = default if value is None else value + return result + + setattr(self, "1", get_arg("1", False)) + self.action: Optional[str] = get_optional_arg("action", str) + self.add: Optional[list[Path]] = get_list_arg("add", Path) + self.all: int = get_arg("all", 0) + self.answer: bool = get_arg("answer", False) + self.api: Optional[str] = get_optional_arg("api", str) + self.author: Optional[str] = get_optional_arg("author", str) + self.check_deterministic: bool = get_arg("check_deterministic", False) + self.clean: bool = get_arg("clean", False) + self.colors: Optional[str] = get_optional_arg("colors", str) + self.contest: Optional[Path] = get_optional_arg("contest", Path) + self.contest_id: Optional[str] = get_optional_arg("contest_id", str) + self.contestname: Optional[str] = get_optional_arg("contestname", str) + self.cp: bool = get_arg("cp", False) + self.defaults: bool = get_arg("defaults", False) + self.default_solution: Optional[Path] = get_optional_arg("default_solution", Path) + self.depth: Optional[int] = get_optional_arg("depth", int, ">= 0") + self.directory: list[Path] = get_list_arg("directory", Path) or [] + self.error: bool = get_arg("error", False) + self.force: bool = get_arg("force", False) + self.force_build: bool = get_arg("force_build", False) + self.generic: Optional[list[str]] = get_list_arg("generic", str) + self.input: bool = get_arg("input", False) + self.interaction: bool = get_arg("interaction", False) + self.interactive: bool = get_arg("interactive", False) + self.jobs: int = get_arg("jobs", (os.cpu_count() or 1) // 2, ">= 0") + self.invalid: bool = get_arg("invalid", False) + self.kattis: bool = get_arg("kattis", False) + self.lang: Optional[list[str]] = get_list_arg("lang", str) + self.latest_bt: bool = get_arg("latest_bt", False) + self.legacy: bool = get_arg("legacy", False) + self.memory: Optional[int] = get_optional_arg("legacy", int, "> 0") + + more: Optional[bool] = get_optional_arg("more", bool) + if more is not None: + self.all = int(more) + self._set.add("all") + self._set.discard("more") + warn("--more is deprecated, use --all instead!\n") + + self.move_to: Optional[str] = get_optional_arg("colors", str) + self.no_bar: bool = get_arg("no_bar", False) + self.no_generate: bool = get_arg("no_generate", False) + self.no_solution: bool = get_arg("no_solution", False) + self.no_solutions: bool = get_arg("no_solutions", False) + self.no_testcase_sanity_checks: bool = get_arg("no_testcase_sanity_checks", False) + self.no_time_limit: bool = get_arg("no_time_limit", False) + self.no_validators: bool = get_arg("no_validators", False) + self.no_visualizer: bool = get_arg("no_visualizer", True, ">= 0") + self.number: Optional[str] = get_optional_arg("number", str) + self.open: Optional[Literal[True] | Path] = get_optional_arg("open", Path) + self.order: Optional[str] = get_optional_arg("order", str) + self.order_from_ccs: Optional[str] = get_optional_arg("order_from_ccs", str) + self.overview: bool = get_arg("overview", False) + self.password: Optional[str] = get_optional_arg("password", str) + self.post_freeze: bool = get_arg("post_freeze", False) + self.problem: Optional[Path] = get_optional_arg("problem", Path) + self.problemname: Optional[str] = get_optional_arg("problemname", str) + self.remove: bool = get_arg("remove", False) + self.reorder: bool = get_arg("reorder", False) + self.samples: bool = get_arg("samples", False) + self.sanitizer: bool = get_arg("sanitizer", False) + self.skel: Optional[str] = get_optional_arg("skel", str) + self.skip: bool = get_arg("skip", False) + self.sort: bool = get_arg("sort", False) + self.submissions: Optional[list[Path]] = get_list_arg("submissions", Path) + self.table: bool = get_arg("table", False) + self.testcases: Optional[list[Path]] = get_list_arg("testcases", Path) + self.time: int = get_arg("time", 600, "> 0") + self.time_limit: Optional[float] = get_optional_arg("time_limit", float, "> 0") + self.timeout: Optional[int] = get_optional_arg("timeout", int, "> 0") + self.token: Optional[str] = get_optional_arg("token", str) + self.tree: bool = get_arg("tree", False) + self.type: Optional[str] = get_optional_arg("type", str) + self.username: Optional[str] = get_optional_arg("username", str) + self.valid_output: bool = get_arg("valid_output", False) + self.verbose: int = get_arg("verbose", 0, ">= 0") + self.watch: bool = get_arg("watch", False) + self.web: bool = get_arg("web", False) + self.write: bool = get_arg("write", False) + + for key in kwargs: + print(key, type(kwargs[key])) + warn(f"unknown key in {source}: '{key}'") + + def update(self, args: "ARGS", replace: bool = False) -> None: + for key in args._set: + if key not in self._set or replace: + setattr(self, key, getattr(args, key)) + self._set.add(key) + + def mark_set(self, *keys: str) -> None: + self._set.update(list(keys)) + + def copy(self) -> "ARGS": + res = copy.copy(self) + res._set = copy.copy(res._set) + return res + + +args = ARGS("config.py") diff --git a/bin/constraints.py b/bin/constraints.py index 40ad81bcb..5dc9c41ca 100644 --- a/bin/constraints.py +++ b/bin/constraints.py @@ -1,15 +1,12 @@ import re -import sys from collections import defaultdict +from colorama import Fore, Style from typing import Optional import latex import validate -from colorama import Fore, Style from problem import Problem - -# Local imports -from util import * +from util import eprint, error, log, warn """DISCLAIMER: @@ -30,7 +27,7 @@ def check_validators( problem.validate_data(validate.Mode.ANSWER, constraints=ans_constraints) if not problem.settings.ans_is_output and not ans_constraints: log("No constraint validation of answer values found in answer or output validators.") - print(file=sys.stderr) + eprint() validator_values: set[int | float] = set() validator_defs: list[str | tuple[int | float, str, int | float]] = [] @@ -273,10 +270,9 @@ def check_constraints(problem: Problem) -> bool: name_len = 8 left_width = 8 + name_len + 2 * value_len - print( + eprint( "{:^{width}}|{:^40}".format("VALIDATORS", "PROBLEM STATEMENT", width=left_width), sep="", - file=sys.stderr, ) while statement_defs or validator_defs: @@ -294,29 +290,28 @@ def check_constraints(problem: Problem) -> bool: if val is not None: validator_defs.remove(val) if isinstance(val, str): - print("{:^{width}}".format(val, width=left_width), sep="", end="", file=sys.stderr) + eprint("{:^{width}}".format(val, width=left_width), sep="", end="") else: - print( + eprint( "{:>{value_len}_} <= {:^{name_len}} <= {:<{value_len}_}".format( *val, name_len=name_len, value_len=value_len ), sep="", end="", - file=sys.stderr, ) else: - print("{:^{width}}".format("", width=left_width), sep="", end="", file=sys.stderr) - print("|", end="", file=sys.stderr) + eprint("{:^{width}}".format("", width=left_width), sep="", end="") + eprint("|", end="") if st is not None: languages = ",".join(statement_defs[st]) - print("{:^40} {}".format(st, languages), sep="", end="", file=sys.stderr) + eprint("{:^40} {}".format(st, languages), sep="", end="") else: - print("{:^40}".format(""), sep="", end="", file=sys.stderr) - print(file=sys.stderr) + eprint("{:^40}".format(""), sep="", end="") + eprint() if st is not None: statement_defs.pop(st) - print(file=sys.stderr) + eprint() warned = False for value in validator_values: @@ -326,20 +321,18 @@ def check_constraints(problem: Problem) -> bool: if not warned: warned = True warn("Values in validators but missing in some statement:") - print( + eprint( f"{Fore.YELLOW}{value}{Style.RESET_ALL} missing in", ",".join(missing), - file=sys.stderr, ) extra_in_statement = set(statement_values.keys()).difference(validator_values) if extra_in_statement: warn("Values in some statement but not in input validators:") for value in extra_in_statement: - print( + eprint( f"{Fore.YELLOW}{value}{Style.RESET_ALL} in", ",".join(sorted(statement_values[value])), - file=sys.stderr, ) return True diff --git a/bin/contest.py b/bin/contest.py index 87e9d1783..7953395b4 100644 --- a/bin/contest.py +++ b/bin/contest.py @@ -1,10 +1,11 @@ -import config -import sys - from pathlib import Path -from typing import cast, Any, Optional +from typing import Any, cast, Literal, Optional, TYPE_CHECKING -from util import * +import config +from util import eprint, error, fatal, log, read_yaml, read_yaml_settings, verbose + +if TYPE_CHECKING: + import requests # Read the contest.yaml, if available _contest_yaml: Optional[dict[str, Any]] = None @@ -23,7 +24,7 @@ def contest_yaml() -> dict[str, Any]: return _contest_yaml -_problems_yaml = None +_problems_yaml: Literal[False] | Optional[list[dict[str, Any]]] = None def problems_yaml() -> Optional[list[dict[str, Any]]]: @@ -43,7 +44,7 @@ def problems_yaml() -> Optional[list[dict[str, Any]]]: return None if not isinstance(_problems_yaml, list): fatal("problems.yaml must contain a list of problems") - return cast(list[dict[str, Any]], _problems_yaml) + return _problems_yaml def get_api() -> str: @@ -59,7 +60,7 @@ def get_api() -> str: return api -def get_contest_id(): +def get_contest_id() -> str: contest_id = ( config.args.contest_id if config.args.contest_id @@ -75,24 +76,31 @@ def get_contest_id(): fatal(f"Contest {contest_id} not found.") else: return contest_id - if len(contests) > 1: + if not contests: + fatal("Server has no active contests.") + elif len(contests) > 1: for contest in contests: log(f"{contest['id']}: {contest['name']}") fatal( "Server has multiple active contests. Pass --contest-id or set it in contest.yaml." ) - if len(contests) == 1: + else: + assert len(contests) == 1 + assert isinstance(contests[0]["id"], str) log(f"The only active contest has id {contests[0]['id']}") return contests[0]["id"] -def get_contests(): +def get_contests() -> list[dict[str, Any]]: contests = call_api_get_json("/contests") assert isinstance(contests, list) return contests -def call_api(method, endpoint, **kwargs): +def call_api(method: str, endpoint: str, **kwargs: Any) -> "requests.Response": + if config.args.username is None or config.args.password is None: + fatal("Username and Password are required to access CCS") + import requests # Slow import, so only import it inside this function. assert endpoint.startswith("/") @@ -110,10 +118,10 @@ def call_api(method, endpoint, **kwargs): return r -def call_api_get_json(url: str): +def call_api_get_json(url: str) -> Any: r = call_api("GET", url) r.raise_for_status() try: return r.json() except Exception as e: - print(f"\nError in decoding JSON:\n{e}\n{r.text()}", file=sys.stderr) + eprint(f"\nError in decoding JSON:\n{e}\n{r.text}") diff --git a/bin/download_submissions.py b/bin/download_submissions.py index 1c3e99dae..41271e45a 100644 --- a/bin/download_submissions.py +++ b/bin/download_submissions.py @@ -3,19 +3,19 @@ import json from os import makedirs from pathlib import Path +from typing import Any import config import parallel from contest import call_api_get_json, get_contest_id -from util import ProgressBar, fatal -from verdicts import Verdict, from_string - +from util import fatal, ProgressBar +from verdicts import from_string, Verdict # Example usage: # bt download_submissions [--user ] [--password ] [--contest ] [--api ] -def download_submissions(): +def download_submissions() -> None: contest_id = get_contest_id() if contest_id is None: fatal("No contest ID found. Set in contest.yaml or pass --contest-id .") @@ -51,7 +51,7 @@ def download_submissions(): bar = ProgressBar("Downloading sources", count=len(submissions), max_len=4) - def download_submission(s): + def download_submission(s: dict[str, Any]) -> None: i = int(s["id"]) bar.start(s["id"]) if "judgement_type_id" not in s: diff --git a/bin/export.py b/bin/export.py index fff9503b0..cef58f650 100644 --- a/bin/export.py +++ b/bin/export.py @@ -1,18 +1,38 @@ -import config import datetime import re import shutil -import sys -import util import yaml import zipfile from pathlib import Path from typing import Any, Optional -from contest import * +import config +from contest import call_api, call_api_get_json, contest_yaml, get_contests from latex import PdfType from problem import Problem -from validate import InputValidator, AnswerValidator, OutputValidator +from util import ( + ask_variable_bool, + drop_suffix, + ensure_symlink, + eprint, + error, + fatal, + glob, + has_ryaml, + has_substitute, + inc_label, + log, + normalize_yaml_value, + parse_yaml, + PrintBar, + read_yaml, + ryaml_filter, + substitute, + verbose, + warn, + write_yaml, +) +from validate import AnswerValidator, InputValidator, OutputValidator from visualize import InputVisualizer, OutputVisualizer @@ -62,7 +82,7 @@ def build_samples_zip(problems: list[Problem], output: Path, languages: list[str attachments_dir = problem.path / "attachments" if (problem.interactive or problem.multi_pass) and not attachments_dir.is_dir(): - util.error( + error( f"{problem.settings.type_name()} problem {problem.name} does not have an attachments/ directory." ) continue @@ -81,13 +101,13 @@ def build_samples_zip(problems: list[Problem], output: Path, languages: list[str if attachments_dir.is_dir(): for f in attachments_dir.iterdir(): if f.is_dir(): - util.error(f"{f} directory attachments are not yet supported.") + error(f"{f} directory attachments are not yet supported.") elif f.is_file() and f.exists(): if f.name.startswith("."): continue # Skip dotfiles destination = outputdir / f.name if destination in contents: - util.error( + error( f"Cannot overwrite {destination} from attachments/" + f" (sourced from {contents[destination]})." + "\n\tDo not include samples in attachments/," @@ -96,16 +116,16 @@ def build_samples_zip(problems: list[Problem], output: Path, languages: list[str else: contents[destination] = f else: - util.error(f"Cannot include broken file {f}.") + error(f"Cannot include broken file {f}.") if contents: for destination, source in contents.items(): zf.write(source, destination) else: - util.error(f"No attachments or samples found for problem {problem.name}.") + error(f"No attachments or samples found for problem {problem.name}.") zf.close() - print("Wrote zip to samples.zip", file=sys.stderr) + eprint("Wrote zip to samples.zip") def build_problem_zip(problem: Problem, output: Path) -> bool: @@ -115,6 +135,8 @@ def build_problem_zip(problem: Problem, output: Path) -> bool: error("zip needs the ruamel.yaml python3 library. Install python[3]-ruamel.yaml.") return False + bar = PrintBar("Zip", len(problem.name) + 4, item=problem) + from ruamel.yaml.comments import CommentedMap languages = select_languages([problem]) @@ -144,7 +166,7 @@ def build_problem_zip(problem: Problem, output: Path) -> bool: if problem.custom_output: files.append((f"{OutputValidator.source_dir}/**/*", True)) - message("preparing zip file content", "Zip", problem.path, color_type=MessageType.LOG) + bar.log("preparing zip file content") # prepare files inside dir export_dir = problem.tmpdir / "export" @@ -157,9 +179,7 @@ def build_problem_zip(problem: Problem, output: Path) -> bool: def add_file(path: Path, source: Path) -> None: if source.stat().st_size >= config.ICPC_FILE_LIMIT * 1024**2: - util.warn( - f"{path} is too large for the ICPC Archive (limit {config.ICPC_FILE_LIMIT}MiB)!" - ) + warn(f"{path} is too large for the ICPC Archive (limit {config.ICPC_FILE_LIMIT}MiB)!") path = export_dir / path path.parent.mkdir(parents=True, exist_ok=True) ensure_symlink(path, source) @@ -167,15 +187,15 @@ def add_file(path: Path, source: Path) -> None: # Include all files beside testcases for pattern, required in files: # Only include hidden files if the pattern starts with a '.'. - paths = list(util.glob(problem.path, pattern, include_hidden=True)) + paths = list(glob(problem.path, pattern, include_hidden=True)) if required and len(paths) == 0: - util.error(f"No matches for required path {pattern}.") + error(f"No matches for required path {pattern}.") for f in paths: if f.is_file() and not f.name.startswith("."): add_file(f.relative_to(problem.path), f) def add_testcase(in_file: Path) -> None: - base_name = util.drop_suffix(in_file, [".in", ".in.statement", ".in.download"]) + base_name = drop_suffix(in_file, [".in", ".in.statement", ".in.download"]) for ext in config.KNOWN_DATA_EXTENSIONS: f = base_name.with_suffix(ext) if f.is_file(): @@ -184,21 +204,21 @@ def add_testcase(in_file: Path) -> None: # Include all sample test cases and copy all related files. samples = problem.download_samples() if len(samples) == 0: - util.error("No samples found.") + error("No samples found.") for in_file, _ in samples: add_testcase(in_file) # Include all secret test cases and copy all related files. pattern = "data/secret/**/*.in" - paths = util.glob(problem.path, pattern) + paths = glob(problem.path, pattern) if len(paths) == 0: - util.error(f"No secret test cases found in {pattern}.") + error(f"No secret test cases found in {pattern}.") for f in paths: if f.is_file(): if f.with_suffix(".ans").is_file(): add_testcase(f) else: - util.warn(f"No answer file found for {f}, skipping.") + warn(f"No answer file found for {f}, skipping.") # handle languages (files and yaml have to be in sync) yaml_path = export_dir / "problem.yaml" @@ -230,17 +250,19 @@ def add_testcase(in_file: Path) -> None: ] for pattern in constants_supported: for f in export_dir.glob(pattern): - if f.is_file() and util.has_substitute(f, config.CONSTANT_SUBSTITUTE_REGEX): + if f.is_file() and has_substitute(f, config.CONSTANT_SUBSTITUTE_REGEX): text = f.read_text() - text = util.substitute( + text = substitute( text, problem.settings.constants, pattern=config.CONSTANT_SUBSTITUTE_REGEX, - bar=util.PrintBar("Zip"), + bar=bar, ) f.unlink() f.write_text(text) + bar = bar.start(output) + # move pdfs if config.args.legacy and languages: for type in PdfType: @@ -256,7 +278,7 @@ def add_testcase(in_file: Path) -> None: if not file.exists(): continue if out.exists(): - util.warn(f"can't add {path} (already exists).") + bar.warn(f"can't add {path} (already exists).") file.unlink() continue out.parent.mkdir(parents=True, exist_ok=True) @@ -288,7 +310,7 @@ def add_testcase(in_file: Path) -> None: # change source: if problem.settings.source: if len(problem.settings.source) > 1: - util.warn(f"Found multiple sources, using '{problem.settings.source[0].name}'.") + warn(f"Found multiple sources, using '{problem.settings.source[0].name}'.") yaml_data["source"] = problem.settings.source[0].name yaml_data["source_url"] = problem.settings.source[0].url # limits.time_multipliers -> time_multiplier / time_safety_margin @@ -337,7 +359,7 @@ def add_testcase(in_file: Path) -> None: f.unlink() f.write_text(t) else: - util.error(f"{f}: no name set for language {lang}.") + error(f"{f}: no name set for language {lang}.") # rename statement dirs if (export_dir / "statement").exists(): @@ -345,16 +367,11 @@ def add_testcase(in_file: Path) -> None: for d in ["solution", "problem_slide"]: if not (export_dir / d).is_dir(): continue - for f in list(util.glob(problem.path, f"{d}/*")): + for f in list(glob(problem.path, f"{d}/*")): if f.is_file(): out = Path("problem_statement") / f.relative_to(problem.path / d) if out.exists(): - message( - f"Can not export {f.relative_to(problem.path)} as {out}", - "Zip", - output, - color_type=MessageType.WARN, - ) + bar.warn(f"Cannot export {f.relative_to(problem.path)} as {out}") else: add_file(out, f) shutil.rmtree(export_dir / d) @@ -376,7 +393,7 @@ def add_testcase(in_file: Path) -> None: write_yaml(yaml_data, yaml_path) # Build .ZIP file. - message("writing zip file", "Zip", output, color_type=MessageType.LOG) + bar.log("writing zip file") try: zf = zipfile.ZipFile(output, mode="w", compression=zipfile.ZIP_DEFLATED, allowZip64=False) @@ -389,8 +406,8 @@ def add_testcase(in_file: Path) -> None: # Done. zf.close() - message("done", "Zip", color_type=MessageType.LOG) - print(file=sys.stderr) + bar.log("done") + eprint() except Exception: return False @@ -409,7 +426,7 @@ def build_contest_zip( error("zip needs the ruamel.yaml python3 library. Install python[3]-ruamel.yaml.") return - print(f"writing ZIP file {outfile}", file=sys.stderr) + eprint(f"writing ZIP file {outfile}") if not config.args.kattis: # Kattis does not use problems.yaml. update_problems_yaml(problems) @@ -449,8 +466,8 @@ def add_file(file: Path) -> None: for fname in zipfiles: fname.unlink() - print("done", file=sys.stderr) - print(file=sys.stderr) + eprint("done") + eprint() zf.close() @@ -502,7 +519,9 @@ def export_contest(cid: Optional[str]) -> str: fatal(parse_yaml(r.text)["message"]) r.raise_for_status() - new_cid = yaml.load(r.text, Loader=yaml.SafeLoader) + new_cid = normalize_yaml_value(yaml.load(r.text, Loader=yaml.SafeLoader), str) + assert isinstance(new_cid, str) + log(f"Uploaded the contest to contest_id {new_cid}.") if new_cid != cid: if ask_variable_bool("Update contest_id in contest.yaml automatically"): @@ -669,7 +688,8 @@ def export_contest_and_problems(problems: list[Problem], languages: list[str]) - if config.args.contest_id: cid = config.args.contest_id else: - cid = contest_yaml().get("contest_id") + cid = normalize_yaml_value(contest_yaml().get("contest_id"), str) + assert isinstance(cid, str) if cid is not None and cid != "": log(f"Reusing contest id {cid} from contest.yaml") if not any(contest["id"] == cid for contest in get_contests()): @@ -704,7 +724,9 @@ def get_problem_id(problem: Problem) -> Optional[str]: nonlocal ccs_problems for p in ccs_problems: if problem.name in [p.get("short_name"), p.get("id"), p.get("externalid")]: - return p["id"] + pid = normalize_yaml_value(p.get("id"), str) + assert isinstance(pid, str) + return pid return None for problem in problems: diff --git a/bin/fuzz.py b/bin/fuzz.py index ac1f86241..73e10416e 100644 --- a/bin/fuzz.py +++ b/bin/fuzz.py @@ -1,23 +1,35 @@ -import config -import problem import random -import generate import shutil import signal -import sys -import time import threading +import time from colorama import Style from pathlib import Path -from typing import Any, Optional, TextIO +from typing import Any, Optional +import config +import generate import parallel -from util import * +import problem from run import Run, Submission from testcase import Testcase -from validate import OutputValidator, Mode +from util import ( + eprint, + error, + fatal, + has_ryaml, + PrintBar, + ProgressBar, + read_yaml, + ryaml_get_or_add, + write_yaml, +) +from validate import Mode, OutputValidator from verdicts import Verdict +if has_ryaml: + from ruamel.yaml.comments import CommentedMap, CommentedSeq + # STEPS: # 1. Find generator invocations depending on {seed}. # 2. Generate a testcase + .ans using the rule using a random seed. @@ -28,7 +40,7 @@ class GeneratorTask: - def __init__(self, fuzz: "Fuzz", t: generate.TestcaseRule, i: int, tmp_id: int): + def __init__(self, fuzz: "Fuzz", t: generate.TestcaseRule, i: int, tmp_id: int) -> None: self.fuzz = fuzz self.rule = t generator = t.generator @@ -151,7 +163,7 @@ def __init__( submission: Submission, testcase: Testcase, tmp_id: int, - ): + ) -> None: self.generator_task = generator_task self.submission = submission self.testcase = testcase @@ -165,6 +177,7 @@ def _run(self, bar: ProgressBar) -> None: r = Run(self.generator_task.fuzz.problem, self.submission, self.testcase) localbar = bar.start(f"{self.generator_task.i}: {self.submission.name}") result = r.run(localbar) + assert result.verdict is not None self.generator_task.fuzz.queue.ensure_alive() if result.verdict != Verdict.ACCEPTED: self.generator_task.save_test(bar, self.submission, result.verdict) @@ -174,24 +187,22 @@ def _run(self, bar: ProgressBar) -> None: class FuzzProgressBar(ProgressBar): - def __init__(self, queue: parallel.AbstractQueue, prefix: str, max_len: int): + def __init__( + self, + queue: parallel.AbstractQueue[GeneratorTask | SubmissionTask], + prefix: str, + max_len: int, + ) -> None: super().__init__(prefix, max_len) self.queue = queue - def _print( - self, - *objects, - sep: str = "", - end: str = "\n", - file: TextIO = sys.stderr, - flush: bool = True, - ): + def _print(self, *args: Any, **kwargs: Any) -> None: self.queue.ensure_alive() - super()._print(*objects, sep=sep, end=end, file=file, flush=flush) + super()._print(*args, **kwargs) class Fuzz: - def __init__(self, problem: problem.Problem): + def __init__(self, problem: problem.Problem) -> None: self.generators_yaml_mutex = threading.Lock() self.problem = problem self.summary: dict[Submission, set[Verdict]] = {} @@ -249,8 +260,6 @@ def run(self) -> bool: error("No submissions found.") return False - message("Press CTRL+C to stop\n", "Fuzz", color_type=MessageType.LOG) - def runner(task: GeneratorTask | SubmissionTask) -> None: task.run(bar) @@ -274,6 +283,9 @@ def runner(task: GeneratorTask | SubmissionTask) -> None: ], ) max_len += len(f"{self.tmp_ids}: ") + # we use a PrintBar after an abort + printbar = PrintBar("Fuzz", max_len=max_len) + printbar.log("Press CTRL+C to stop\n") bar = FuzzProgressBar(self.queue, "Fuzz", max_len=max_len) def soft_exit(sig: Any, frame: Any) -> None: @@ -282,12 +294,8 @@ def soft_exit(sig: Any, frame: Any) -> None: else: self.queue.abort() with bar: - print(bar.carriage_return, file=sys.stderr) - message( - "Running interrupted (waiting on remaining tasks)\n", - "\nFuzz", - color_type=MessageType.ERROR, - ) + eprint(bar.carriage_return) + printbar.error("Running interrupted (waiting on remaining tasks)\n") old_handler = signal.signal(signal.SIGINT, soft_exit) @@ -303,8 +311,8 @@ def soft_exit(sig: Any, frame: Any) -> None: for submission, verdicts in self.summary.items(): msg = ", ".join(f"{v.color()}{v.short()}{Style.RESET_ALL}" for v in sorted(verdicts)) - message(msg, "Fuzz", submission.name) - message(f"Found {self.added} testcases in total.", "Fuzz") + printbar.start(submission).log(msg, color="") + printbar.log(f"Found {self.added} testcases in total.", color="") if self.queue.aborted: fatal("Running interrupted") @@ -355,13 +363,13 @@ def save_test( if generators_yaml.is_file(): data = read_yaml(generators_yaml) if data is None: - data = ruamel.yaml.comments.CommentedMap() + data = CommentedMap() parent = ryaml_get_or_add(data, "data") parent = ryaml_get_or_add(parent, "fuzz") - entry = ryaml_get_or_add(parent, "data", ruamel.yaml.comments.CommentedSeq) + entry = ryaml_get_or_add(parent, "data", CommentedSeq) - entry.append(ruamel.yaml.comments.CommentedMap()) + entry.append(CommentedMap()) entry[-1][""] = command # Overwrite generators.yaml. diff --git a/bin/generate.py b/bin/generate.py index 8279e09f5..73ff12680 100644 --- a/bin/generate.py +++ b/bin/generate.py @@ -1,15 +1,15 @@ import collections +import itertools import random import re import secrets +import shlex import shutil -import sys import time - -from collections.abc import Callable, Sequence +from collections.abc import Callable, Iterable, Iterator, Sequence from colorama import Fore, Style from pathlib import Path, PurePosixPath -from typing import Any, Final, Iterable, Optional, overload +from typing import Any, cast, Final, Literal, Optional, overload import config import parallel @@ -17,21 +17,53 @@ import run import validate import visualize +from problem import Problem from testcase import Testcase +from util import ( + combine_hashes, + combine_hashes_dict, + ensure_symlink, + eprint, + error, + ExecResult, + ExecStatus, + fatal, + get_basedirs, + glob, + has_ryaml, + hash_file_content, + hash_string, + is_relative_to, + log, + path_size, + PrintBar, + ProgressBar, + read_yaml, + ryaml_get_or_add, + shorten_path, + substitute, + warn, + write_yaml, +) from verdicts import Verdict -from problem import Problem -from util import * +if has_ryaml: + import ruamel.yaml + + +YAML_TYPE = Optional[str | dict[str, Any]] class ParseException(Exception): - def __init__(self, message=None, path=None): + def __init__(self, message: Optional[str] = None, path: Optional[Path | str] = None) -> None: super().__init__(message, path) self.message = message self.path = path -def assert_type(name, obj, types, path=None): +def assert_type( + name: str, obj: Any, types: list[type[Any]] | type[Any], path: Optional[Path] = None +) -> None: if not isinstance(types, list): types = [types] if any(isinstance(obj, t) for t in types): @@ -51,7 +83,7 @@ def assert_type(name, obj, types, path=None): ] + [e[1:] for e in config.KNOWN_TEXT_DATA_EXTENSIONS] -def is_testcase(yaml): +def is_testcase(yaml: YAML_TYPE) -> bool: return ( yaml is None or isinstance(yaml, str) @@ -59,18 +91,18 @@ def is_testcase(yaml): ) -def is_directory(yaml): +def is_directory(yaml: YAML_TYPE) -> bool: return isinstance(yaml, dict) and not is_testcase(yaml) -def has_count(yaml): +def has_count(yaml: YAML_TYPE) -> bool: return isinstance(yaml, dict) and "count" in yaml and isinstance(yaml["count"], int) # Returns the given path relative to the problem root. -def resolve_path(path, *, allow_absolute, allow_relative): - assert isinstance(path, str) - path = PurePosixPath(path) +def resolve_path(path_str: str, *, allow_absolute: bool, allow_relative: bool) -> Path: + assert isinstance(path_str, str) + path = PurePosixPath(path_str) if not allow_absolute: if path.is_absolute(): raise ParseException(f"Path must not be absolute: {path}") @@ -95,9 +127,10 @@ class Invocation: # `string` is the name of the submission (relative to generators/ or absolute from the problem root) with command line arguments. # A direct path may also be given. - def __init__(self, problem: Problem, string: str, *, allow_absolute: bool, allow_relative=True): - string = str(string) - commands = string.split() + def __init__( + self, problem: Problem, string: str, *, allow_absolute: bool, allow_relative: bool = True + ) -> None: + commands = shlex.split(string) command = commands[0] self.args = commands[1:] self.problem = problem @@ -123,20 +156,21 @@ def __init__(self, problem: Problem, string: str, *, allow_absolute: bool, allow # Automatically set self.program when that program has been built. self.program: Optional[program.Generator | run.Submission] = None - def callback(program): - self.program = program + def callback(prog: program.Program) -> None: + assert isinstance(prog, (program.Generator, run.Submission)) + self.program = prog program.Program.add_callback(problem, problem.path / self.program_path, callback) # Return the form of the command used for caching. # This is independent of {name} and the actual run_command. - def cache_command(self, seed=None) -> str: + def cache_command(self, seed: Optional[int] = None) -> str: command_string = self.command_string if seed: command_string = self.SEED_REGEX.sub(str(seed), command_string) return command_string - def hash(self, seed=None): + def hash(self, seed: Optional[int] = None) -> str: list = [] if self.program is not None: assert self.program.hash is not None @@ -145,11 +179,11 @@ def hash(self, seed=None): return combine_hashes(list) # Return the full command to be executed. - def _sub_args(self, *, seed=None): + def _sub_args(self, *, seed: Optional[int] = None) -> Sequence[str]: if self.uses_seed: assert seed is not None - def sub(arg): + def sub(arg: str) -> str: arg = self.NAME_REGEX.sub("testcase", arg) if self.uses_seed: arg = self.SEED_REGEX.sub(str(seed), arg) @@ -159,11 +193,13 @@ def sub(arg): class GeneratorInvocation(Invocation): - def __init__(self, problem, string): + def __init__(self, problem: Problem, string: str) -> None: super().__init__(problem, string, allow_absolute=False) # Try running the generator |retries| times, incrementing seed by 1 each time. - def run(self, bar, cwd, name, seed, retries=1): + def run( + self, bar: ProgressBar, cwd: Path, name: str, seed: int, retries: int = 1 + ) -> ExecResult: assert isinstance(self.program, program.Generator), "Generator program must be built!" for retry in range(retries): @@ -190,12 +226,12 @@ def run(self, bar, cwd, name, seed, retries=1): class SolutionInvocation(Invocation): - def __init__(self, problem, string): + def __init__(self, problem: Problem, string: str) -> None: super().__init__(problem, string, allow_absolute=True, allow_relative=False) # Run the submission, reading testcase.in from stdin and piping stdout to testcase.ans. # If the .ans already exists, nothing is done - def run(self, bar, cwd): + def run(self, bar: ProgressBar, cwd: Path) -> ExecResult: assert isinstance(self.program, run.Submission), "Submission program must be built!" in_path = cwd / "testcase.in" @@ -217,7 +253,7 @@ def run(self, bar, cwd): bar.log("stderr", result.err) return result - def generate_interaction(self, bar, cwd, t): + def generate_interaction(self, bar: ProgressBar, cwd: Path, t: "TestcaseRule") -> bool: in_path = cwd / "testcase.in" interaction_path = cwd / "testcase.interaction" interaction_path.unlink(missing_ok=True) @@ -227,9 +263,9 @@ def generate_interaction(self, bar, cwd, t): r = run.Run(self.problem, self.program, testcase) # No {name}/{seed} substitution is done since all IO should be via stdin/stdout. - ret = r.run(bar, interaction=interaction_path, submission_args=self.args) - if ret.verdict != Verdict.ACCEPTED: - bar.error(ret.verdict) + result = r.run(bar, interaction=interaction_path, submission_args=self.args) + if result.verdict != Verdict.ACCEPTED: + bar.error(f"could not generate .interaction, submission got {result.verdict}") return False return True @@ -238,18 +274,15 @@ def generate_interaction(self, bar, cwd, t): # Return absolute path to default submission, starting from the submissions directory. # This function will always prints a message. # Which submission is used is implementation defined, unless one is explicitly given on the command line. -def default_solution_path(generator_config): +def default_solution_path(generator_config: "GeneratorConfig") -> Path: problem = generator_config.problem solution = None stored_solution = problem.tmpdir / ".default_solution" + bar = PrintBar("generators.yaml") if config.args.default_solution: if generator_config.has_yaml: - message( - f"""--default-solution Ignored. Set the default solution in the generators.yaml! -solution: /{config.args.default_solution}""", - "generators.yaml", - color_type=MessageType.WARN, - ) + bar.warn(f"""--default-solution Ignored. Set the default solution in the generators.yaml! +solution: /{config.args.default_solution}""") else: solution = problem.path / config.args.default_solution else: @@ -273,10 +306,8 @@ def default_solution_path(generator_config): raw = yaml_path.read_text() raw = f"solution: /{solution.relative_to(problem.path)}\n" + raw yaml_path.write_text(raw) - message( - f"No solution specified. {solution_short_path} added as default solution in the generators.yaml", - "generators.yaml", - color_type=MessageType.LOG, + bar.log( + f"No solution specified. {solution_short_path} added as default solution in the generators.yaml" ) else: log( @@ -289,14 +320,6 @@ def default_solution_path(generator_config): return Path("/") / solution.relative_to(problem.path) -# A wrapper that lazily initializes the underlying SolutionInvocation on first -# usage. This is to prevent instantiating the default solution when it's not -# actually needed. -class DefaultSolutionInvocation(SolutionInvocation): - def __init__(self, generator_config): - super().__init__(generator_config.problem, default_solution_path(generator_config)) - - KNOWN_TESTCASE_KEYS: Final[Sequence[str]] = [ "type", "generate", @@ -324,92 +347,109 @@ def __init__(self, generator_config): # Holds all inheritable configuration options. Currently: # - config.solution # - config.random_salt +# - config.retries class Config: # Used at each directory or testcase level. @staticmethod - def parse_solution(p, x, path): - assert_type("Solution", x, [type(None), str], path) + def _parse_solution(p: Problem, x: Any, path: Path) -> Optional[SolutionInvocation]: + assert_type("solution", x, [type(None), str], path) if x is None: return None return SolutionInvocation(p, x) @staticmethod - def parse_random_salt(p, x, path): - assert_type("Random_salt", x, [type(None), str], path) + def _parse_random_salt(x: Any, path: Path) -> str: + assert_type("random_salt", x, [type(None), str], path) if x is None: return "" - return x - - INHERITABLE_KEYS: Final[Sequence] = [ - # True: use an AC submission by default when the solution: key is not present. - ("solution", True, parse_solution), - ("random_salt", "", parse_random_salt), - # Non-portable keys only used by BAPCtools: - # The number of retries to run a generator when it fails, each time incrementing the {seed} - # by 1. - ("retries", 1, lambda p, x, path: int(x)), - ] + return cast(str, x) - solution: SolutionInvocation - random_salt: str - retries: int + @staticmethod + def _parse_retries(x: Any, path: Path) -> int: + assert_type("retries", x, [type(None), int], path) + if x is None: + return 1 + return cast(int, x) - def __init__(self, problem, path, yaml=None, parent_config=None): - assert not yaml or isinstance(yaml, dict) + def __init__( + self, + problem: Problem, + path: Path, + yaml: Optional[dict[str, Any]] = None, + parent_config: Optional["Config"] = None, + ) -> None: + if parent_config is None: + self.needs_default_solution = True + self.solution: Optional[SolutionInvocation] = None + self.random_salt: str = "" + self.retries: int = 1 + else: + self.needs_default_solution = parent_config.needs_default_solution + self.solution = parent_config.solution + self.random_salt = parent_config.random_salt + self.retries = parent_config.retries - for key, default, func in Config.INHERITABLE_KEYS: - if func is None: - func = lambda p, x, path: x # noqa: E731 # TODO this can probably be prettier - if yaml and key in yaml: - setattr(self, key, func(problem, yaml[key], path)) - elif parent_config is not None: - setattr(self, key, getattr(parent_config, key)) - else: - setattr(self, key, default) + if yaml is not None: + if "solution" in yaml: + self.needs_default_solution = False + self.solution = Config._parse_solution(problem, yaml["solution"], path) + if "random_salt" in yaml: + self.random_salt = Config._parse_random_salt(yaml["random_salt"], path) + if "retries" in yaml: + self.retries = Config._parse_retries(yaml["retries"], path) class Rule: # key: the dictionary key in the yaml file, i.e. `testcase` # name: the numbered testcase name, i.e. `01-testcase` - def __init__(self, problem, key, name, yaml, parent): - assert parent is not None - + def __init__( + self, + problem: Problem, + key: str, + name: str, + yaml: YAML_TYPE, + parent: "AnyDirectory", + ) -> None: self.parent = parent - if isinstance(yaml, dict): - self.config = Config(problem, parent.path / name, yaml, parent_config=parent.config) - else: - self.config = parent.config - # Yaml key of the current directory/testcase. self.key = key # Filename of the current directory/testcase. - self.name = name + self.name: str = name # Path of the current directory/testcase relative to data/. self.path: Path = parent.path / self.name # store Yaml self.yaml = yaml + if parent.config is not None: + self.config: Config = parent.config + else: + self.config = Config(problem, parent.path / name) + if isinstance(yaml, dict): + self.config = Config(problem, parent.path / name, yaml, parent_config=parent.config) + class TestcaseRule(Rule): def __init__( self, problem: Problem, - generator_config, - key, + generator_config: "GeneratorConfig", + key: str, name: str, - yaml: dict[str, Any], - parent, - count_index, - ): + yaml: YAML_TYPE, + parent: "AnyDirectory", + count_index: int, + ) -> None: assert is_testcase(yaml) # if not None rule will be skipped during generation self.parse_error: Optional[str] = None + # root in /data + self.root = (parent.path / name).parts[0] # Whether this testcase is a sample. - self.sample: bool = len(parent.path.parts) > 0 and parent.path.parts[0] == "sample" + self.sample: bool = self.root == "sample" # each test case needs some kind of input self.required_in: list[list[str]] = [[".in"]] if self.sample: @@ -443,23 +483,18 @@ def __init__( # used to handle duplicated testcase rules self.copy_of = None + bar = PrintBar("generators.yaml", item=parent.path / name) + if name.endswith(".in"): - message( - "Testcase names should not end with '.in'", - "generators.yaml", - parent.path / name, - color_type=MessageType.ERROR, - ) + bar.error("Testcase names should not end with '.in'") name = name[:-3] - super().__init__(problem, key, name, yaml, parent) - - # root in /data - self.root = self.path.parts[0] - - # files to consider for hashing - hashes = {} try: + super().__init__(problem, key, name, yaml, parent) + bar = bar.start(self.path) + + # files to consider for hashing + hashes = {} if not config.COMPILED_FILE_NAME_REGEX.fullmatch(name + ".in"): raise ParseException("Test case does not have a valid name.") @@ -477,11 +512,8 @@ def __init__( if isinstance(yaml, str): yaml = {"generate": yaml} if yaml["generate"].endswith(".in"): - message( - f"Use the new `copy: path/to/case` key instead of {yaml['generate']}.", - "generators.yaml", - self.path, - color_type=MessageType.WARN, + bar.warn( + f"Use the new `copy: path/to/case` key instead of {yaml['generate']}." ) yaml = {"copy": yaml["generate"][:-3]} @@ -528,10 +560,8 @@ def __init__( "{count}", f"{self.count_index + 1}" ) else: - message( - "Found {count} in generator command but no count in yaml. Ignored.", - self.path, - color_type=MessageType.WARN, + bar.warn( + "Found {count} in generator command but no count in yaml. Ignored." ) self.generator = GeneratorInvocation(problem, command_string) @@ -553,12 +583,7 @@ def __init__( if "copy" in yaml: assert_type("`copy`", yaml["copy"], str) if Path(yaml["copy"]).suffix in config.KNOWN_TEXT_DATA_EXTENSIONS: - message( - f"`copy: {yaml['copy']}` should not include the extension.", - "generators.yaml", - self.path, - color_type=MessageType.WARN, - ) + bar.warn(f"`copy: {yaml['copy']}` should not include the extension.") self.copy = resolve_path( yaml["copy"], allow_absolute=False, allow_relative=True ) @@ -596,12 +621,7 @@ def __init__( raise ParseException(f"Testcase must not contain reserved key {key}.") if key not in KNOWN_TESTCASE_KEYS: if config.args.action == "generate": - message( - f"Unknown testcase level key: {key}", - "generators.yaml", - self.path, - color_type=MessageType.LOG, - ) + bar.log(f"Unknown testcase level key: {key}") # combine hashes self.hash = combine_hashes_dict(hashes) @@ -626,7 +646,9 @@ def _has_required_in(t, infile: Path) -> bool: return True return False - def link(t, problem, generator_config, bar, dst): + def link( + t, problem: Problem, generator_config: "GeneratorConfig", bar: ProgressBar, dst: Path + ) -> None: src_dir = problem.path / "data" / t.path.parent src = src_dir / (t.name + ".in") @@ -657,7 +679,9 @@ def link(t, problem, generator_config, bar, dst): # both source and target do not exist pass - def validate_in(t, problem: Problem, testcase: Testcase, meta_yaml: dict, bar: ProgressBar): + def validate_in( + t, problem: Problem, testcase: Testcase, meta_yaml: dict[str, Any], bar: ProgressBar + ) -> bool: infile = problem.tmpdir / "data" / t.hash / "testcase.in" assert infile.is_file() @@ -703,8 +727,8 @@ def validate_in(t, problem: Problem, testcase: Testcase, meta_yaml: dict, bar: P return True def validate_ans_and_out( - t, problem: Problem, testcase: Testcase, meta_yaml: dict, bar: ProgressBar - ): + t, problem: Problem, testcase: Testcase, meta_yaml: dict[str, Any], bar: ProgressBar + ) -> bool: infile = problem.tmpdir / "data" / t.hash / "testcase.in" assert infile.is_file() @@ -757,7 +781,9 @@ def validate_ans_and_out( ) return True - def generate(t, problem: Problem, generator_config, parent_bar): + def generate( + t, problem: Problem, generator_config: "GeneratorConfig", parent_bar: ProgressBar + ) -> None: bar = parent_bar.start(str(t.path)) t.generate_success = False @@ -777,11 +803,6 @@ def generate(t, problem: Problem, generator_config, parent_bar): if t.generator and t.generator.program is None: bar.done(False, "Generator didn't build. Skipping.") return - if t.hash is None: - # Input can only be missing when the `copy:` does not have a corresponding `.in` file. - # (When `generate:` or `in:` is used, the input is always present.) - bar.done(False, f"{t.copy} does not exist. Skipping.") - return target_dir = problem.path / "data" / t.path.parent target_infile = target_dir / (t.name + ".in") @@ -793,7 +814,7 @@ def generate(t, problem: Problem, generator_config, parent_bar): ansfile = cwd / "testcase.ans" meta_path = cwd / "meta_.yaml" - def init_meta(): + def init_meta() -> dict[str, Any]: meta_yaml = read_yaml(meta_path) if meta_path.is_file() else None if meta_yaml is None: meta_yaml = { @@ -810,7 +831,7 @@ def init_meta(): meta_yaml = init_meta() - def _check_deterministic(tmp, tmp_infile): + def _check_deterministic(tmp: Path, tmp_infile: Path) -> None: assert t.generator is not None result = t.generator.run(bar, tmp, tmp_infile.stem, t.seed, t.config.retries) if not result.status: @@ -855,7 +876,7 @@ def _check_deterministic(tmp, tmp_infile): # which is also set to True when running `bt all`. # This doesn't do anything for non-generated cases. # It also checks that the input changes when the seed changes. - def check_deterministic(force=False): + def check_deterministic(force: bool = False) -> None: if not force and not config.args.check_deterministic: return if t.generator is None: @@ -871,7 +892,7 @@ def check_deterministic(force=False): # clean up shutil.rmtree(tmp) - def generate_from_rule(): + def generate_from_rule() -> bool: nonlocal meta_yaml # create expected cache entry for generate @@ -939,7 +960,7 @@ def generate_from_rule(): assert t._has_required_in(infile), f"Failed to generate in file: {infile.name}" return True - def generate_from_solution(testcase: Testcase, bar: ProgressBar): + def generate_from_solution(testcase: Testcase, bar: ProgressBar) -> bool: nonlocal meta_yaml if testcase.root in [ @@ -952,7 +973,7 @@ def generate_from_solution(testcase: Testcase, bar: ProgressBar): return True if t.config.solution is not None: - solution_hash = { + solution_hash: dict[str, Optional[str]] = { "solution_hash": t.config.solution.hash(), "solution": t.config.solution.cache_command(), } @@ -962,7 +983,9 @@ def generate_from_solution(testcase: Testcase, bar: ProgressBar): "solution": None, } - def needed(ext, interactor_hash=None): + def needed( + ext: str, interactor_hash: Optional[dict[str, dict[str, str]]] = None + ) -> bool: if ext in meta_yaml["generated_extensions"]: return False if not infile.with_suffix(ext).is_file(): @@ -1023,7 +1046,7 @@ def needed(ext, interactor_hash=None): assert ansfile.is_file(), f"Failed to generate ans file: {ansfile}" return True - def generate_visualization(testcase: Testcase, bar: ProgressBar): + def generate_visualization(testcase: Testcase, bar: ProgressBar) -> bool: nonlocal meta_yaml if testcase.root in config.INVALID_CASE_DIRECTORIES: @@ -1126,7 +1149,7 @@ def skip_images(src: str, content: list[str]) -> list[str]: # errors in the visualizer are not critical return True - def generate_empty_interactive_sample_ans(): + def generate_empty_interactive_sample_ans() -> bool: if not t.sample: return True if not problem.interactive and not problem.multi_pass: @@ -1140,7 +1163,7 @@ def generate_empty_interactive_sample_ans(): return True return True - def copy_generated(): + def copy_generated() -> None: identical_exts = set() for ext in config.KNOWN_DATA_EXTENSIONS: @@ -1182,7 +1205,7 @@ def copy_generated(): # both source and target do not exist pass - def add_test_case_to_cache(): + def add_test_case_to_cache() -> None: # Used to identify generated test cases generator_config.hashed_in.add(hash_file_content(infile)) @@ -1279,9 +1302,9 @@ def __init__( problem: Problem, key: str, name: str, - yaml: dict, + yaml: dict[str, Any], parent: "AnyDirectory", - ): + ) -> None: assert is_directory(yaml) # The root Directory object has name ''. @@ -1290,29 +1313,21 @@ def __init__( raise ParseException("Directory does not have a valid name.", parent.path / name) super().__init__(problem, key, name, yaml, parent) + bar = PrintBar("generators.yaml", item=self.path) - if name == "": + if isinstance(parent, RootDirectory): for key in yaml: if key in RESERVED_DIRECTORY_KEYS: raise ParseException( f"Directory must not contain reserved key {key}.", self.path ) if key in DEPRECATED_ROOT_KEYS: - message( - f"Deprecated root level key: {key}, ignored", - "generators.yaml", - self.path, - color_type=MessageType.WARN, - ) + bar.warn(f"Deprecated root level key: {key}, ignored") elif key not in [*KNOWN_DIRECTORY_KEYS, *KNOWN_ROOT_KEYS]: if config.args.action == "generate": - message( - f"Unknown root level key: {key}", - "generators.yaml", - self.path, - color_type=MessageType.LOG, - ) + bar.log(f"Unknown root level key: {key}") else: + assert name != "" for key in yaml: if key in [*RESERVED_DIRECTORY_KEYS, *KNOWN_ROOT_KEYS]: raise ParseException( @@ -1320,12 +1335,7 @@ def __init__( ) if key not in KNOWN_DIRECTORY_KEYS: if config.args.action == "generate": - message( - f"Unknown directory level key: {key}", - "generators.yaml", - self.path, - color_type=MessageType.LOG, - ) + bar.log(f"Unknown directory level key: {key}") self.test_group_yaml: Any = yaml.get("test_group.yaml", False) self.numbered = False @@ -1341,8 +1351,6 @@ def __init__( data = yaml["data"] if data is None: return - if data == "": - return assert_type("Data", data, [dict, list]) if isinstance(data, list): @@ -1371,9 +1379,7 @@ def __init__( def walk( self, testcase_f: Optional[Callable[["TestcaseRule | Directory"], Any]], - *, - dir_last=False, - ): ... + ) -> None: ... # This overload takes one function for test cases and a separate function for directories. @overload @@ -1381,35 +1387,37 @@ def walk( self, testcase_f: Optional[Callable[[TestcaseRule], Any]], dir_f: Optional[Callable[["Directory"], Any]], - *, - dir_last=False, - ): ... - - # Below is the actual implementation of `walk`, - # no parameter types are needed here because they are already defined by the @overloads. + ) -> None: ... # Map a function over all test cases directory tree. # dir_f by default reuses testcase_f - def walk(self, testcase_f=None, dir_f=True, *, dir_last=False): + def walk( + self, + testcase_f: Optional[ + Callable[["TestcaseRule | Directory"], Any] | Callable[[TestcaseRule], Any] + ] = None, + dir_f: Literal[True] + | Optional[ + Callable[["TestcaseRule | Directory"], Any] | Callable[["Directory"], Any] + ] = True, + ) -> None: if dir_f is True: - dir_f = testcase_f - - if not dir_last and dir_f: + dir_f = cast(Optional[Callable[["TestcaseRule | Directory"], Any]], testcase_f) + if dir_f: dir_f(self) for d in self.data: if isinstance(d, Directory): - d.walk(testcase_f, dir_f, dir_last=dir_last) + d.walk(testcase_f, dir_f) elif isinstance(d, TestcaseRule): if testcase_f: testcase_f(d) else: assert False - if dir_last and dir_f: - dir_f(self) - - def generate(d, problem, generator_config, bar): + def generate( + d, problem: Problem, generator_config: "GeneratorConfig", bar: ProgressBar + ) -> None: # Generate the current directory: # - Create the directory. # - Write test_group.yaml. @@ -1441,13 +1449,15 @@ def generate(d, problem, generator_config, bar): # new file -> create it test_group_yaml_path.write_text(yaml_text) bar.log("NEW: test_group.yaml") - elif d.test_group_yaml == "" and test_group_yaml_path.is_file(): + elif d.test_group_yaml is None and test_group_yaml_path.is_file(): # empty -> remove it generator_config.remove(test_group_yaml_path) bar.log("REMOVED: test_group.yaml") bar.done() - def generate_includes(d, problem, generator_config, bar): + def generate_includes( + d, problem: Problem, generator_config: "GeneratorConfig", bar: ProgressBar + ) -> None: for key in d.includes: t = d.includes[key] target = t.path @@ -1494,14 +1504,15 @@ def generate_includes(d, problem, generator_config, bar): # Returns the numbered name -def numbered_test_case_name(base_name, i, n): +def next_numbered_name(base_name: str, i: Iterator[int], n: int) -> Iterator[str]: width = len(str(n)) - number_prefix = f"{i:0{width}}" - if base_name: - return number_prefix + "-" + base_name - else: - assert base_name is None or base_name == "" - return number_prefix + while True: + number_prefix = f"{next(i):0{width}}" + if base_name: + yield f"{number_prefix}-{base_name}" + else: + assert base_name is None or base_name == "" + yield number_prefix AnyDirectory = RootDirectory | Directory @@ -1509,10 +1520,11 @@ def numbered_test_case_name(base_name, i, n): class GeneratorConfig: @staticmethod - def parse_generators(generators_yaml): + def _parse_generators(generators_yaml: YAML_TYPE) -> dict[Path, list[Path]]: assert_type("Generators", generators_yaml, dict) + assert isinstance(generators_yaml, dict) generators = {} - for gen in generators_yaml: + for gen, deps in generators_yaml.items(): if ( gen.startswith("/") or Path(gen).is_absolute() @@ -1522,7 +1534,6 @@ def parse_generators(generators_yaml): path = Path("generators") / gen - deps = generators_yaml[gen] assert_type("Generator dependencies", deps, list) if len(deps) == 0: raise ParseException("Generator dependencies must not be empty.", path) @@ -1532,15 +1543,8 @@ def parse_generators(generators_yaml): generators[path] = [Path("generators") / d for d in deps] return generators - # Only used at the root directory level. - ROOT_KEYS: Final[Sequence] = [ - ("generators", dict[Path, list[Path]](), parse_generators), - ] - - generators: dict[Path, list[Path]] - # Parse generators.yaml. - def __init__(self, problem, restriction=None): + def __init__(self, problem: Problem, restriction: Optional[Sequence[Path]] = None) -> None: self.problem = problem yaml_path = self.problem.path / "generators" / "generators.yaml" self.n_parse_error = 0 @@ -1548,26 +1552,28 @@ def __init__(self, problem, restriction=None): # A map of paths `secret/test_group/test_case` to their canonical TestcaseRule. # For generated cases this is the rule itself. # For included cases, this is the 'resolved' location of the test case that is included. - self.known_cases = dict() + self.known_cases = dict[Path, TestcaseRule]() # A map of paths `secret/test_group` to Directory rules. - self.known_directories = dict() + self.known_directories = dict[Path, Directory]() # Used for cleanup - self.known_files = set() + self.known_files = set[Path]() # A map from key to (is_included, list of test cases and directories), # used for `include` statements. self.known_keys = collections.defaultdict[str, tuple[bool, list[TestcaseRule | Directory]]]( lambda: (False, []) ) # A set of testcase rules, including seeds. - self.rules_cache = dict() + self.rules_cache = dict[str, TestcaseRule]() # The set of generated test cases keyed by hash(test_case). - self.generated_test_cases = dict() + self.generated_test_cases = dict[str, TestcaseRule]() # Path to the trash directory for this run self.trash_dir: Optional[Path] = None # Set of hash(.in) for all generated testcases - self.hashed_in = set() + self.hashed_in = set[str]() # Files that should be processed self.restriction = restriction + # replaced during parse_yaml + self.generators = dict[Path, list[Path]]() if yaml_path.is_file(): self.yaml = read_yaml(yaml_path) @@ -1580,11 +1586,10 @@ def __init__(self, problem, restriction=None): self.parse_yaml(self.yaml) except ParseException as e: # Handle fatal parse errors - message(e.message, "generators.yaml", e.path, color_type=MessageType.FATAL) - exit() + PrintBar("generators.yaml", item=e.path).fatal(e.message or "") # testcase_short_path: secret/1.in - def process_testcase(self, relative_testcase_path): + def process_testcase(self, relative_testcase_path: Path) -> bool: if not self.restriction: return True absolute_testcase_path = self.problem.path / "data" / relative_testcase_path.with_suffix("") @@ -1594,19 +1599,17 @@ def process_testcase(self, relative_testcase_path): return True return False - def parse_yaml(self, yaml): + def parse_yaml(self, yaml: YAML_TYPE) -> None: assert_type("Root yaml", yaml, [type(None), dict]) if yaml is None: yaml = dict() + assert isinstance(yaml, dict) # Read root level configuration - for key, default, func in GeneratorConfig.ROOT_KEYS: - if yaml and key in yaml: - setattr(self, key, func(yaml[key] if yaml[key] is not None else default)) - else: - setattr(self, key, default) + if "generators" in yaml: + self.generators = self._parse_generators(yaml["generators"]) - def add_known(obj): + def add_known(obj: TestcaseRule | Directory) -> None: path = obj.path name = path.name if isinstance(obj, TestcaseRule): @@ -1619,51 +1622,38 @@ def add_known(obj): is_included, cases_list = self.known_keys[obj.key] cases_list.append(obj) if is_included and len(cases_list) == 2: - message( - f"Included key {name} exists more than once as {cases_list[0].path} and {cases_list[1].path}.", - "generators.yaml", - obj.path, - color_type=MessageType.ERROR, + PrintBar("generators.yaml", item=obj.path).error( + f"Included key {name} exists more than once as {cases_list[0].path} and {cases_list[1].path}." ) num_numbered_test_cases = 0 - test_case_id = 0 + next_test_case_id = itertools.count(1) - def parse_count(yaml, warn_for=None): + def parse_count(yaml: YAML_TYPE, warn_for: Optional[Path] = None) -> int: if not has_count(yaml): return 1 + assert isinstance(yaml, dict) count = yaml["count"] + assert isinstance(count, int) + bar = PrintBar("generators.yaml", item=warn_for) if count < 1: if warn_for is not None: - message( - f"Found count: {count}, increased to 1.", - "generators.yaml", - warn_for, - color_type=MessageType.WARN, - ) + bar.warn(f"Found count: {count}, increased to 1.") return 1 if count > 1000: if warn_for is not None: - message( - f"Found count: {count}, limited to 1000.", - "generators.yaml", - warn_for, - color_type=MessageType.ERROR, - ) + bar.error(f"Found count: {count}, limited to 1000.") return 1000 if count > 100 and warn_for is not None: - message( - f"Found large count: {count}.", - "generators.yaml", - warn_for, - color_type=MessageType.LOG, - ) + bar.log(f"Found large count: {count}.") return count # Count the number of testcases in the given directory yaml. # This parser is quite forgiving, - def count(yaml): + def count(yaml: YAML_TYPE) -> None: nonlocal num_numbered_test_cases + if not isinstance(yaml, dict): + return ds = yaml.get("data") if isinstance(ds, dict): ds = [ds] @@ -1686,8 +1676,10 @@ def count(yaml): # key: the yaml key e.g. 'testcase' # name_gen: each call should result in the next (possibly numbered) name e.g. '01-testcase' # Returns either a single Rule or a list of Rules - def parse(key: str, name_gen: Callable[[], str], yaml: dict, parent: AnyDirectory): - name = name_gen() + def parse( + key: str, name_gen: Iterator[str], yaml: YAML_TYPE, parent: AnyDirectory + ) -> Directory | list[TestcaseRule]: + name = next(name_gen) assert_type("Testcase/directory", yaml, [type(None), str, dict], parent.path) if not is_testcase(yaml) and not is_directory(yaml): raise ParseException("not parsed as a testcase or directory.", parent.path / name) @@ -1701,7 +1693,7 @@ def parse(key: str, name_gen: Callable[[], str], yaml: dict, parent: AnyDirector ts = [] for count_index in range(count): if count_index > 0: - name = name_gen() + name = next(name_gen) if has_count(yaml): name += f"-{count_index + 1:0{len(str(count))}}" @@ -1711,11 +1703,8 @@ def parse(key: str, name_gen: Callable[[], str], yaml: dict, parent: AnyDirector t = TestcaseRule(self.problem, self, key, name, yaml, parent, count_index) if t.path in self.known_cases: - message( - "was already parsed. Skipping.", - "generators.yaml", - t.path, - color_type=MessageType.ERROR, + PrintBar("generators.yaml", item=t.path).error( + "was already parsed. Skipping." ) continue @@ -1724,6 +1713,7 @@ def parse(key: str, name_gen: Callable[[], str], yaml: dict, parent: AnyDirector return ts assert is_directory(yaml) + assert isinstance(yaml, dict) d = Directory(self.problem, key, name, yaml, parent) if d.path in self.known_cases or d.path in self.known_directories: @@ -1739,11 +1729,11 @@ def parse(key: str, name_gen: Callable[[], str], yaml: dict, parent: AnyDirector assert_type("Elements of data", dictionary, dict, d.path) for key in dictionary.keys(): assert_type("Key of data", key, [type(None), str], d.path / str(key)) - for child_name, child_yaml in sorted(dictionary.items()): + for _, child_yaml in sorted(dictionary.items()): if is_directory(child_yaml): num_test_groups += 1 - test_group_id = 0 + next_test_group_id = itertools.count(1) for dictionary in data: for key in dictionary: assert_type("Test case/group name", key, [type(None), str], d.path) @@ -1782,32 +1772,21 @@ def parse(key: str, name_gen: Callable[[], str], yaml: dict, parent: AnyDirector child_yaml = dictionary[child_key] if d.numbered: if is_directory(child_yaml): - - def next_test_group_name(): - nonlocal test_group_id - test_group_id += 1 - return numbered_test_case_name( - child_key, test_group_id, num_test_groups - ) - - child_name = next_test_group_name + child_name = next_numbered_name( + child_key, next_test_group_id, num_test_groups + ) elif is_testcase(child_yaml): - - def next_test_case_name(): - nonlocal test_case_id - test_case_id += 1 - return numbered_test_case_name( - child_key, test_case_id, num_numbered_test_cases - ) - - child_name = next_test_case_name + child_name = next_numbered_name( + child_key, next_test_case_id, num_numbered_test_cases + ) else: # Use error will be given inside parse(child). - child_name = lambda: "" # noqa: E731 # TODO this can probably be prettier + child_name = itertools.repeat("") else: - child_name = lambda: child_key # noqa: E731 # TODO this can probably be prettier - if not child_name(): + assert isinstance(child_key, str) + child_name = itertools.repeat(child_key) + if not next(child_name): raise ParseException( "Unnumbered test cases must not have an empty key", d.path, @@ -1819,25 +1798,18 @@ def next_test_case_name(): d.data.append(c) # Include TestcaseRule t for the current directory. - def add_included_case(t: TestcaseRule): + def add_included_case(t: TestcaseRule) -> None: target = t.path name = target.name p = d.path / name if p in self.known_cases: + bar = PrintBar("generators.yaml", item=p) if target != self.known_cases[p].path: if self.known_cases[p].path == p: - message( - f"conflict with included case {target}.", - "generators.yaml", - p, - color_type=MessageType.ERROR, - ) + bar.error(f"conflict with included case {target}.") else: - message( - f"included with multiple targets {target} and {self.known_cases[p].path}.", - "generators.yaml", - p, - color_type=MessageType.ERROR, + bar.error( + f"included with multiple targets {target} and {self.known_cases[p].path}." ) return self.known_cases[p] = t @@ -1846,26 +1818,17 @@ def add_included_case(t: TestcaseRule): if "include" in yaml: assert_type("includes", yaml["include"], list, d.path) + bar = PrintBar("generators.yaml", item=d.path) for include in yaml["include"]: assert_type("include", include, str, d.path) if "/" in include: - message( - f"Include {include} should be a test case/group key, not a path.", - "generators.yaml", - d.path, - color_type=MessageType.ERROR, - ) + bar.error(f"Include {include} should be a test case/group key, not a path.") continue if include in self.known_keys: is_included, cases_list = self.known_keys[include] if len(cases_list) != 1: - message( - f"Included key {include} exists more than once.", - "generators.yaml", - d.path, - color_type=MessageType.ERROR, - ) + bar.error(f"Included key {include} exists more than once.") continue self.known_keys[include] = (True, cases_list) @@ -1875,51 +1838,54 @@ def add_included_case(t: TestcaseRule): else: obj.walk( add_included_case, - lambda d: [add_included_case(t) for t in d.includes.values()], + lambda d: list(map(add_included_case, d.includes.values())), ) pass else: - message( - f"Unknown include key {include} does not refer to a previous test case.", - "generators.yaml", - d.path, - color_type=MessageType.ERROR, + bar.error( + f"Unknown include key {include} does not refer to a previous test case." ) continue return d - self.root_dir = parse("", lambda: "", yaml, RootDirectory()) + root_dir = parse("", itertools.repeat(""), yaml, RootDirectory()) + assert isinstance(root_dir, Directory) + self.root_dir = root_dir - def build(self, build_visualizers=True, skip_double_build_warning=False): + def build( + self, build_visualizers: bool = True, skip_double_build_warning: bool = False + ) -> None: generators_used: set[Path] = set() solutions_used: set[Path] = set() # Collect all programs that need building. - # Also, convert the default submission into an actual Invocation. - default_solution = None + # Also, set the default submission if needed. + # We only do this now to prevent instantiating + # the default solution when it's not actually needed. + default_solution: Optional[SolutionInvocation] = None - def collect_programs(t): + def collect_programs(t: TestcaseRule) -> None: if isinstance(t, TestcaseRule): if t.generator: generators_used.add(t.generator.program_path) + if config.args.no_solution: + t.config.solution = None + elif t.config.needs_default_solution: + # Initialize the default solution if needed. + nonlocal default_solution + if default_solution is None: + default_path = default_solution_path(self) + default_solution = SolutionInvocation(self.problem, str(default_path)) + t.config.solution = default_solution if t.config.solution: - if config.args.no_solution: - t.config.solution = None - else: - # Initialize the default solution if needed. - if t.config.solution is True: - nonlocal default_solution - if default_solution is None: - default_solution = DefaultSolutionInvocation(self) - t.config.solution = default_solution - solutions_used.add(t.config.solution.program_path) + solutions_used.add(t.config.solution.program_path) self.root_dir.walk(collect_programs, dir_f=None) def build_programs( program_type: type[program.Generator | run.Submission], program_paths: Iterable[Path], - ): + ) -> None: programs = list[program.Generator | run.Submission]() for program_path in program_paths: path = self.problem.path / program_path @@ -1942,7 +1908,7 @@ def build_programs( bar = ProgressBar(f"Build {program_type.__name__.lower()}s", items=programs) - def build_program(p): + def build_program(p: program.Generator | run.Submission) -> None: localbar = bar.start(p) p.build(localbar) localbar.done() @@ -1962,20 +1928,20 @@ def build_program(p): self.problem.validators(validate.AnswerValidator) self.problem.validators(validate.OutputValidator) - def cleanup_build_failures(t): + def cleanup_build_failures(t: TestcaseRule) -> None: if t.config.solution and t.config.solution.program is None: t.config.solution = None self.root_dir.walk(cleanup_build_failures, dir_f=None) - def run(self): + def run(self) -> None: self.update_gitignore_file() self.problem.reset_testcase_hashes() item_names = [] self.root_dir.walk(lambda x: item_names.append(x.path)) - def count_dir(d): + def count_dir(d: Directory) -> None: for name in d.includes: item_names.append(d.path / name) @@ -1990,12 +1956,13 @@ def count_dir(d): # included testcases. # 1 - def runner(t: TestcaseRule) -> Any: - return t.copy_of is None and t.generate(self.problem, self, bar) + def runner(t: TestcaseRule) -> None: + if t.copy_of is None: + t.generate(self.problem, self, bar) p = parallel.new_queue(runner) - def generate_dir(d): + def generate_dir(d: Directory) -> None: p.join() d.generate(self.problem, self, bar) @@ -2003,12 +1970,13 @@ def generate_dir(d): p.done() # 2 - def runner_copies(t: TestcaseRule): - return t.copy_of is not None and t.generate(self.problem, self, bar) + def runner_copies(t: TestcaseRule) -> None: + if t.copy_of is not None: + t.generate(self.problem, self, bar) p = parallel.new_queue(runner_copies) - def generate_copies_and_includes(d): + def generate_copies_and_includes(d: Directory) -> None: p.join() d.generate_includes(self.problem, self, bar) @@ -2018,7 +1986,7 @@ def generate_copies_and_includes(d): bar.finalize() # move a file or into the trash directory - def remove(self, src): + def remove(self, src: Path) -> None: if self.trash_dir is None: self.trash_dir = self.problem.tmpdir / "trash" / secrets.token_hex(4) dst = self.trash_dir / src.absolute().relative_to((self.problem.path / "data").absolute()) @@ -2026,7 +1994,7 @@ def remove(self, src): shutil.move(src, dst) - def _remove_unknown(self, path, bar, silent=False): + def _remove_unknown(self, path: Path, bar: ProgressBar, silent: bool = False) -> None: local = path.relative_to(self.problem.path / "data") keep = any( ( @@ -2054,7 +2022,7 @@ def _remove_unknown(self, path, bar, silent=False): bar.log(f"REMOVED: {path.name}") # remove all files in data that were not written by the during run - def clean_up(self): + def clean_up(self) -> None: bar = ProgressBar("Clean Up", max_len=-1) self._remove_unknown(self.problem.path / "data", bar) @@ -2063,7 +2031,7 @@ def clean_up(self): bar.finalize() # write a gitignore file to ignore everything in data/ except data/sample/ - def update_gitignore_file(self): + def update_gitignore_file(self) -> None: gitignorefile = self.problem.path / ".gitignore" content = """#GENERATED BY BAPCtools @@ -2086,7 +2054,7 @@ def update_gitignore_file(self): # add all testcases specified as copy keys in the generators.yaml # can handle files and complete directories - def add(self, to_add): + def add(self, to_add: Sequence[Path]) -> bool: if not has_ryaml: error( "generate --add needs the ruamel.yaml python3 library. Install python[3]-ruamel.yaml." @@ -2145,7 +2113,7 @@ def add(self, to_add): return True # reorder all testcases in the given directories - def reorder(self): + def reorder(self) -> bool: if self.n_parse_error > 0: return False @@ -2156,23 +2124,24 @@ def reorder(self): return False directory_rules = set() - for d in config.args.testcases: - path = d.relative_to("data") + assert config.args.testcases is not None # set in tools.py + for t in config.args.testcases: + path = t.relative_to("data") parts = path.parts if not parts: warn("Cannot reorder Root directory. Skipping.") elif parts[0] in config.INVALID_CASE_DIRECTORIES: - warn(f"{d} is used for invalid test data. Skipping.") + warn(f"{t} is used for invalid test data. Skipping.") elif parts[0] == "valid_output": - warn(f"{d} is used for valid test data. Skipping.") + warn(f"{t} is used for valid test data. Skipping.") elif parts[0] == "testing_tool_test": - warn(f"{d} is used to test the testing tool. Skipping.") + warn(f"{t} is used to test the testing tool. Skipping.") elif path not in self.known_directories: - warn(f"{d} is not a generated directory. Skipping.") + warn(f"{t} is not a generated directory. Skipping.") elif not self.known_directories[path].numbered: - warn(f"{d} is not numbered. Skipping.") + warn(f"{t} is not numbered. Skipping.") elif not self.known_directories[path].data: - warn(f"{d} is empty. Skipping.") + warn(f"{t} is empty. Skipping.") else: directory_rules.add(self.known_directories[path]) @@ -2206,10 +2175,11 @@ def reorder(self): testcase_paths = {t.in_path.relative_to(data).with_suffix("") for t in testcases} max_testcase_len = max([len(str(t)) for t in testcase_paths]) for d in directory_rules: - print(file=sys.stderr) - print(f"{Fore.CYAN}Reorder{Style.RESET_ALL}: {d.path}", file=sys.stderr) + eprint() + eprint(f"{Fore.CYAN}Reorder{Style.RESET_ALL}: {d.path}") # directory must be numbered + assert isinstance(d.yaml, dict) assert "data" in d.yaml assert isinstance(d.yaml["data"], list) @@ -2222,13 +2192,13 @@ def reorder(self): others = [e for e in d.yaml["data"] if id(next(iter(e.values()))) not in test_nodes] class TestcaseResult: - def __init__(self, yaml): + def __init__(self, yaml: dict[str, Any]) -> None: self.yaml = yaml - self.test_node = test_nodes[id(next(iter(yaml.values())))] + self.name = test_nodes[id(next(iter(yaml.values())))] self.scores = [] self.result = [] for i in range(len(submissions)): - verdict = verdict_table.results[i][self.test_node] + verdict = verdict_table.results[i][self.name] # moving TLE cases to the front is most important to save resources # RTE are less reliable and therefore less important than WA if verdict == Verdict.TIME_LIMIT_EXCEEDED: @@ -2237,15 +2207,15 @@ def __init__(self, yaml): self.scores.append((i, 4)) elif verdict == Verdict.RUNTIME_ERROR: self.scores.append((i, 3)) - self.result.append(verdict_table._get_verdict(i, self.test_node)) + self.result.append(verdict_table._get_verdict(i, self.name)) - def __str__(self): - return f"{Fore.CYAN}Reorder{Style.RESET_ALL}: {self.test_node:<{max_testcase_len}} {''.join(self.result)}" + def __str__(self) -> str: + return f"{Fore.CYAN}Reorder{Style.RESET_ALL}: {self.name:<{max_testcase_len}} {''.join(self.result)}" - def score(self, weights): + def score(self, weights: list[int]) -> int: return sum(weights[i] * x for i, x in self.scores) - def update(self, weights): + def update(self, weights: list[int]) -> list[int]: # the weights for each submission that did not fail on this testcase get doubled # up to a limit of 2**16. (The same as halving the weight of all submission that failed) weights = [x * 2 for x in weights] @@ -2265,13 +2235,13 @@ def update(self, weights): if id(next(iter(e.values()))) in test_nodes ] - # TODO: ProgressBar? # Each submission is initially assigned a weight of one. The weight contributes to the score of a testcase if # the submission fails on this testcase. If a testcase is selected the weights for each submission that it fails # get halved (or all other get doubled) to encourage making the remaining submissions fail. We greedily pick the # submission that has the heighest score. Note that we additionally consider the type of failing (WA/TLE/RTE) # see class TestcaseResult. # Worstcase runtime testcases^2 * submissions + bar = ProgressBar("Reorder", items=todo) done = [] weights = [1] * len(submissions) while todo: @@ -2281,9 +2251,15 @@ def update(self, weights): break index = scores.index(score) result = todo.pop(index) + localbar = bar.start(result) done.append(result.yaml) weights = result.update(weights) - print(result, file=sys.stderr) + localbar.log("moved to front") + localbar.done() + + for _ in todo: + bar.skip() + bar.finalize() # move all unknown subgroups/testcases to the end (keeping their relative order) d.yaml["data"].clear() @@ -2293,7 +2269,7 @@ def update(self, weights): write_yaml(self.yaml, generators_yaml) # regenerate cases - print(file=sys.stderr) + eprint() new_config = GeneratorConfig(self.problem, config.args.testcases) new_config.build(skip_double_build_warning=True) new_config.run() @@ -2303,21 +2279,21 @@ def update(self, weights): # Delete files in the tmpdir trash directory. By default all files older than 10min are removed # and additionally the oldest files are removed until the trash is less than 1 GiB -def clean_trash(problem, time_limit=10 * 60, size_lim=1024**3): +def clean_trash(problem: Problem, time_limit: int = 10 * 60, size_lim: int = 1024**3) -> None: trashdir = problem.tmpdir / "trash" if trashdir.exists(): dirs = [(d, path_size(d)) for d in trashdir.iterdir()] dirs.sort(key=lambda d: d[0].stat().st_mtime) total_size = sum(x for d, x in dirs) - time_limit = time.time() - time_limit + begin = time.time() - time_limit for d, x in dirs: - if x == 0 or total_size > size_lim or d.stat().st_mtime < time_limit: + if x == 0 or total_size > size_lim or d.stat().st_mtime < begin: total_size -= x shutil.rmtree(d) # Clean data/ and tmpdir/data/ -def clean_data(problem, data=True, cache=True): +def clean_data(problem: Problem, data: bool = True, cache: bool = True) -> None: dirs = [ problem.path / "data" if data else None, problem.tmpdir / "data" if cache else None, @@ -2327,7 +2303,7 @@ def clean_data(problem, data=True, cache=True): shutil.rmtree(d) -def generate(problem): +def generate(problem: Problem) -> bool: clean_trash(problem) if config.args.clean: @@ -2355,7 +2331,7 @@ def generate(problem): return True -def testcases(problem): +def testcases(problem: Problem) -> set[Path]: gen_config = GeneratorConfig(problem) if gen_config.has_yaml: return { diff --git a/bin/interactive.py b/bin/interactive.py index 369da89f3..b50df01d1 100644 --- a/bin/interactive.py +++ b/bin/interactive.py @@ -4,14 +4,25 @@ import sys import threading import time - +from collections.abc import Sequence from contextlib import nullcontext from pathlib import Path -from typing import Final, Literal, Optional, TYPE_CHECKING +from typing import Any, Final, IO, Literal, Optional, TYPE_CHECKING import config import validate -from util import * +from util import ( + eprint, + error, + exec_command, + ExecResult, + ExecStatus, + is_bsd, + is_windows, + limit_setter, + PrintBar, + ProgressBar, +) from verdicts import Verdict if TYPE_CHECKING: @@ -32,9 +43,9 @@ def run_interactive_testcase( # True: stderr # else: path interaction: Optional[bool | Path] = False, - submission_args: Optional[list[str]] = None, + submission_args: Optional[Sequence[str | Path]] = None, bar: Optional[ProgressBar] = None, -): +) -> Optional[ExecResult]: output_validators = run.problem.validators(validate.OutputValidator) if not output_validators: return None @@ -49,25 +60,23 @@ def run_interactive_testcase( memory = run.problem.limits.memory # Validator command - def get_validator_command(): + def get_validator_command() -> Sequence[str | Path]: assert output_validator.run_command, "Output validator must be built" - return ( - output_validator.run_command - + [ - run.in_path.absolute(), - run.testcase.ans_path.absolute(), - run.feedbackdir.absolute(), - ] - + run.testcase.test_case_yaml_args( + return [ + *output_validator.run_command, + run.in_path.absolute(), + run.testcase.ans_path.absolute(), + run.feedbackdir.absolute(), + *run.testcase.test_case_yaml_args( output_validator, bar or PrintBar("Run interactive test case"), - ) - ) + ), + ] assert run.submission.run_command, "Submission must be built" submission_command = run.submission.run_command if submission_args: - submission_command += submission_args + submission_command = [*submission_command, *submission_args] # Both validator and submission run in their own directory. validator_dir = output_validator.tmpdir @@ -76,8 +85,8 @@ def get_validator_command(): nextpass = run.feedbackdir / "nextpass.in" if run.problem.multi_pass else None if config.args.verbose >= 2: - print("Validator: ", *get_validator_command(), file=sys.stderr) - print("Submission: ", *submission_command, file=sys.stderr) + eprint("Validator: ", *get_validator_command()) + eprint("Submission: ", *submission_command) # On Windows: # - Start the validator @@ -173,7 +182,10 @@ def get_validator_command(): if not validator_err: validator_err = bytes() - if not verdict and not run._continue_with_tle(verdict, max_duration >= timeout): + if verdict == Verdict.TIME_LIMIT_EXCEEDED: + if not run._continue_with_tle(verdict, max_duration >= timeout): + break + elif verdict != Verdict.ACCEPTED: break if not run._prepare_nextpass(nextpass): @@ -312,7 +324,7 @@ def interrupt_handler(sig: Any, frame: Any) -> None: stop_kill_handler = threading.Event() submission_time: Optional[float] = None - def kill_handler_function(): + def kill_handler_function() -> None: if stop_kill_handler.wait(timeout + 1): return nonlocal submission_time @@ -444,7 +456,7 @@ def kill_handler_function(): team_err = submission.stderr.read().decode("utf-8", "replace") finally: # clean up resources - def close_io(stream): + def close_io(stream: Optional[IO[bytes]]) -> None: if stream: stream.close() @@ -478,7 +490,10 @@ def close_io(stream): else: tle_result.timeout_expired |= aborted - if not verdict and not run._continue_with_tle(verdict, aborted): + if verdict == Verdict.TIME_LIMIT_EXCEEDED: + if not run._continue_with_tle(verdict, max_duration >= timeout): + break + elif verdict != Verdict.ACCEPTED: break if not run._prepare_nextpass(nextpass): @@ -511,15 +526,12 @@ def close_io(stream): return tle_result -def _feedback(run, err): +def _feedback(run: "Run", err: bytes) -> str: judgemessage = run.feedbackdir / "judgemessage.txt" judgeerror = run.feedbackdir / "judgeerror.txt" - if err is None: - err = "" - else: - err = err.decode("utf-8", "replace") + res = "" if err is None else err.decode("utf-8", "replace") if judgeerror.is_file(): - err = judgeerror.read_text(errors="replace") - if len(err) == 0 and judgemessage.is_file(): - err = judgemessage.read_text(errors="replace") - return err + res = judgeerror.read_text(errors="replace") + if len(res) == 0 and judgemessage.is_file(): + res = judgemessage.read_text(errors="replace") + return res diff --git a/bin/latex.py b/bin/latex.py index 6fd114429..d3a05e7b6 100644 --- a/bin/latex.py +++ b/bin/latex.py @@ -3,22 +3,21 @@ import os import re import shutil -import sys +from collections.abc import Collection +from colorama import Fore, Style from enum import Enum from pathlib import Path -from typing import Optional, TYPE_CHECKING - -from colorama import Fore, Style +from typing import Optional, TextIO, TYPE_CHECKING import config from contest import contest_yaml, problems_yaml from util import ( copy_and_substitute, ensure_symlink, + eprint, exec_command, + ExecResult, fatal, - message, - MessageType, PrintBar, substitute, tail, @@ -59,7 +58,7 @@ def create_samples_file(problem: "Problem", language: str) -> None: samples_file_path.write_text("") return - def build_sample_command(content): + def build_sample_command(content: str) -> str: return f"\\expandafter\\def\\csname Sample{i + 1}\\endcsname{{{content}}}\n" samples_data = [] @@ -83,7 +82,7 @@ def build_sample_command(content): interaction_id = 0 pass_id = 1 - def flush(): + def flush() -> None: assert last in "<>" nonlocal current_sample, interaction_id @@ -124,7 +123,7 @@ def flush(): current_sample.append("\\MultipassSampleHeading{}\n") - def flush(): + def flush() -> None: nonlocal current_sample in_path = multi_pass_dir / f"{sample_name}-{pass_id:02}.in" @@ -181,15 +180,12 @@ def create_constants_file(problem: "Problem", language: str) -> None: # Steps needed for both problem and contest compilation. -def prepare_problem(problem: "Problem", language: str): +def prepare_problem(problem: "Problem", language: str) -> None: create_samples_file(problem, language) create_constants_file(problem, language) -def get_tl(problem: "Problem"): - tl = problem.limits.time_limit - tl = int(tl) if abs(tl - int(tl)) < 0.0001 else tl - +def get_tl(problem: "Problem") -> str: if "print_time_limit" in contest_yaml(): print_tl = contest_yaml()["print_time_limit"] elif "print_timelimit" in contest_yaml(): # TODO remove legacy at some point @@ -197,10 +193,14 @@ def get_tl(problem: "Problem"): else: print_tl = not config.args.no_time_limit - return tl if print_tl else "" + if not print_tl: + return "" + tl = problem.limits.time_limit + tl = int(tl) if abs(tl - int(tl)) < 0.0001 else tl + return str(tl) -def problem_data(problem: "Problem", language: str): +def problem_data(problem: "Problem", language: str) -> dict[str, Optional[str]]: background = next( ( p["rgb"][1:] @@ -243,7 +243,7 @@ def make_environment() -> dict[str, str]: ] texinputs = os.pathsep.join(map(str, latex_paths)) if config.args.verbose >= 2: - print(f"export TEXINPUTS='{texinputs}'", file=sys.stderr) + eprint(f"export TEXINPUTS='{texinputs}'") env["TEXINPUTS"] = texinputs return env @@ -254,7 +254,7 @@ def build_latex_pdf( language: str, bar: PrintBar, problem_path: Optional[Path] = None, -): +) -> bool: env = make_environment() if shutil.which("latexmk") is None: @@ -311,7 +311,7 @@ def build_latex_pdf( latexmk_command.append(tex_path.absolute()) - def run_latexmk(stdout, stderr): + def run_latexmk(stdout: Optional[TextIO], stderr: Optional[TextIO]) -> ExecResult: logfile.unlink(True) return exec_command( latexmk_command, @@ -345,9 +345,9 @@ def run_latexmk(stdout, stderr): if not ret.status: bar.error("Failure compiling PDF:") if ret.out is not None: - print(ret.out, file=sys.stderr) + eprint(ret.out) if logfile.exists(): - print(logfile, file=sys.stderr) + eprint(logfile) bar.error(f"return code {ret.returncode}") bar.error(f"duration {ret.duration}\n") return False @@ -363,7 +363,9 @@ def run_latexmk(stdout, stderr): # substituting variables. # 2. Create tmpdir//latex//{samples,constants}.tex. # 3. Run latexmk and link the resulting ..pdf into the problem directory. -def build_problem_pdf(problem: "Problem", language: str, build_type=PdfType.PROBLEM, web=False): +def build_problem_pdf( + problem: "Problem", language: str, build_type: PdfType = PdfType.PROBLEM, web: bool = False +) -> bool: """ Arguments: -- language: str, the two-letter language code appearing the file name, such as problem.en.tex @@ -388,19 +390,18 @@ def build_problem_pdf(problem: "Problem", language: str, build_type=PdfType.PROB return build_latex_pdf(builddir, builddir / main_file, language, bar, problem.path) -def build_problem_pdfs(problem: "Problem", build_type=PdfType.PROBLEM, web=False): +def build_problem_pdfs( + problem: "Problem", build_type: PdfType = PdfType.PROBLEM, web: bool = False +) -> bool: """Build PDFs for various languages. If list of languages is specified, (either via config files or --lang arguments), build those. Otherwise build all languages for which there is a statement latex source. """ + bar = PrintBar(problem.name) if config.args.lang is not None: for lang in config.args.lang: if lang not in problem.statement_languages: - message( - f"No statement source for language {lang}", - problem.name, - color_type=MessageType.FATAL, - ) + bar.fatal(f"No statement source for language {lang}") languages = config.args.lang else: languages = problem.statement_languages @@ -411,11 +412,7 @@ def build_problem_pdfs(problem: "Problem", build_type=PdfType.PROBLEM, web=False if (problem.path / build_type.path(lang)).exists(): filtered_languages.append(lang) else: - message( - f"{build_type.path(lang)} not found", - problem.name, - color_type=MessageType.WARN, - ) + bar.warn(f"{build_type.path(lang)} not found") languages = filtered_languages if config.args.watch and len(languages) > 1: fatal("--watch does not work with multiple languages. Please use --lang") @@ -436,8 +433,8 @@ def build_contest_pdf( problems: list["Problem"], tmpdir: Path, language: str, - build_type=PdfType.PROBLEM, - web=False, + build_type: PdfType = PdfType.PROBLEM, + web: bool = False, ) -> bool: builddir = tmpdir / contest / "latex" / language builddir.mkdir(parents=True, exist_ok=True) @@ -535,38 +532,36 @@ def build_contest_pdf( return build_latex_pdf(builddir, Path(main_file), language, bar) -def build_contest_pdfs(contest, problems, tmpdir, lang=None, build_type=PdfType.PROBLEM, web=False): +def build_contest_pdfs( + contest: str, + problems: list["Problem"], + tmpdir: Path, + lang: Optional[str] = None, + build_type: PdfType = PdfType.PROBLEM, + web: bool = False, +) -> bool: if lang: return build_contest_pdf(contest, problems, tmpdir, lang, build_type, web) + bar = PrintBar(contest) """Build contest PDFs for all available languages""" statement_languages = set.intersection(*(set(p.statement_languages) for p in problems)) if not statement_languages: - message( - "No statement language present in every problem.", contest, color_type=MessageType.FATAL - ) + bar.fatal("No statement language present in every problem.") if config.args.lang is not None: - languages = config.args.lang + languages: Collection[str] = config.args.lang for lang in set(languages) - statement_languages: - message( - f"Unable to build all statements for language {lang}", - contest, - color_type=MessageType.FATAL, - ) + bar.fatal(f"Unable to build all statements for language {lang}") else: languages = statement_languages if config.args.watch and len(languages) > 1: - message( - "--watch does not work with multiple languages. Please use --lang", - contest, - color_type=MessageType.FATAL, - ) + bar.fatal("--watch does not work with multiple languages. Please use --lang") return all( build_contest_pdf(contest, problems, tmpdir, lang, build_type, web) for lang in languages ) -def get_argument_for_command(texfile, command): +def get_argument_for_command(texfile: TextIO, command: str) -> Optional[str]: """Return the (whitespace-normalised) argument for the given command in the given texfile. If texfile contains `\foo{bar baz }`, returns the string 'bar baz'. The command is given without backslash. diff --git a/bin/misc/rejudge_analyser.py b/bin/misc/rejudge_analyser.py index 95f8b455b..4cf7547af 100755 --- a/bin/misc/rejudge_analyser.py +++ b/bin/misc/rejudge_analyser.py @@ -5,8 +5,8 @@ # those that were AC originally and on the latest rejudge. import json -from pathlib import Path import matplotlib.pyplot as plt +from pathlib import Path def read_json(path): diff --git a/bin/parallel.py b/bin/parallel.py index af265f9a3..4121f003e 100644 --- a/bin/parallel.py +++ b/bin/parallel.py @@ -12,7 +12,7 @@ class QueueItem(Generic[T]): - def __init__(self, task: T, priority: int, index: int): + def __init__(self, task: T, priority: int, index: int) -> None: self.task = task self.priority = priority self.index = index @@ -29,7 +29,7 @@ def __lt__(self, other: "QueueItem[T]") -> bool: class AbstractQueue(Generic[T]): - def __init__(self, f: Callable[[T], Any], pin: bool): + def __init__(self, f: Callable[[T], Any], pin: bool) -> None: self.f = f self.pin = pin self.num_threads = 1 @@ -70,7 +70,7 @@ def ensure_alive(self) -> None: class SequentialQueue(AbstractQueue[T]): - def __init__(self, f: Callable[[T], Any], pin: bool): + def __init__(self, f: Callable[[T], Any], pin: bool) -> None: super().__init__(f, pin) # Add one task. Higher priority => done first @@ -101,7 +101,7 @@ def done(self) -> None: class ParallelQueue(AbstractQueue[T]): - def __init__(self, f: Callable[[T], Any], pin: bool, num_threads: int): + def __init__(self, f: Callable[[T], Any], pin: bool, num_threads: int) -> None: super().__init__(f, pin) assert num_threads and type(num_threads) is int diff --git a/bin/problem.py b/bin/problem.py index fd19a45de..659470cce 100644 --- a/bin/problem.py +++ b/bin/problem.py @@ -1,34 +1,62 @@ import datetime import re import shutil -import sys import threading - from collections.abc import Callable, Sequence +from colorama import Fore, Style from pathlib import Path -from typing import Any, Final, Literal, Optional, overload, TypeVar, TYPE_CHECKING +from typing import Any, Final, Literal, Optional, overload, TYPE_CHECKING, TypeVar if TYPE_CHECKING: # Prevent circular import: https://stackoverflow.com/a/39757388 from program import Program +import math + +import check_testing_tool import config import latex -import math import parallel import run import testcase -import check_testing_tool import validate import validator_tests import verdicts import visualize -from util import * -from colorama import Fore, Style +from util import ( + BAR_TYPE, + combine_hashes_dict, + drop_suffix, + eprint, + error, + fatal, + generate_problem_uuid, + glob, + has_ryaml, + hash_file_content, + is_relative_to, + is_uuid, + log, + normalize_yaml_value, + parse_yaml, + PrintBar, + ProgressBar, + read_yaml, + read_yaml_settings, + resolve_path_argument, + ryaml_get_or_add, + substitute, + verbose, + warn, + write_yaml, +) + +if has_ryaml: + import ruamel.yaml # The parse_* functions will remove (.pop()) keys from the yaml data during parsing. # We will warn for any unknown keys that remain after this process. -def check_unknown_keys(yaml_data: dict[str, Any], sub_key: Optional[str] = None): +def check_unknown_keys(yaml_data: dict[str, Any], sub_key: Optional[str] = None) -> None: for key in yaml_data: assert isinstance(key, str) warn(f"found unknown problem.yaml key: {key} in {f'`{sub_key}`' if sub_key else 'root'}") @@ -39,14 +67,9 @@ def check_unknown_keys(yaml_data: dict[str, Any], sub_key: Optional[str] = None) def parse_optional_setting(yaml_data: dict[str, Any], key: str, t: type[T]) -> Optional[T]: if key in yaml_data: - value = yaml_data.pop(key) - if isinstance(value, int) and t is float: - value = float(value) + value = normalize_yaml_value(yaml_data.pop(key), t) if isinstance(value, t): return value - if value == "" and (t is list or t is dict): - # handle empty yaml keys - return t() warn(f"incompatible value for key '{key}' in problem.yaml. SKIPPED.") return None @@ -193,7 +216,7 @@ def source_from_dict(source_dict: dict[str, str]) -> ProblemSource: self.append(source_from_dict(source)) return if isinstance(yaml_data["source"], list): - sources = parse_setting(yaml_data, "source", list[dict[str, str]]()) + sources = parse_setting(yaml_data, "source", list[Any]()) for i, source in enumerate(sources): if isinstance(source, str): self.append(ProblemSource(source)) @@ -458,7 +481,7 @@ def __init__(self, path: Path, tmpdir: Path, label: Optional[str] = None): if (self.path / "data" / d).is_dir(): warn(f"Found directory: data/{d}, should be: data/{d[:-1]} (singular form).") - def _determine_statement_languages(self): + def _determine_statement_languages(self) -> list[str]: """Determine the languages that are both mentioned in the problem.yaml under name and have a corresponding problem statement. @@ -505,7 +528,7 @@ def _determine_statement_languages(self): ) return sorted(texlangs & yamllangs) - def _read_settings(self): + def _read_settings(self) -> None: # parse problem.yaml yaml_path = self.path / "problem.yaml" if has_ryaml: @@ -534,7 +557,7 @@ def _read_settings(self): self.custom_output: bool = self.settings.custom_output # TODO #102 move to a new TestGroup class - def _parse_test_case_and_groups_yaml(p, path: Path, bar: BAR_TYPE): + def _parse_test_case_and_groups_yaml(p, path: Path, bar: BAR_TYPE) -> None: assert path.is_relative_to(p.path / "data"), f"{path} is not in data" for f in [path] + list(path.parents): # Do not go above the data directory. @@ -560,12 +583,6 @@ def _parse_test_case_and_groups_yaml(p, path: Path, bar: BAR_TYPE): flags, "input_validator_flags", validate.InputValidator.args_key ) - # Use variable kwargs so the type checker does not complain when passing them to a PrintBar (nothing happens in that case anyway) - bar_warn_kwargs = {} if isinstance(bar, PrintBar) else {"print_item": False} - bar_error_kwargs = ( - {} if isinstance(bar, PrintBar) else {"resume": True, "print_item": False} - ) - # Verify test_group.yaml for k in flags: match k: @@ -577,17 +594,11 @@ def _parse_test_case_and_groups_yaml(p, path: Path, bar: BAR_TYPE): ): if not isinstance(flags[k], list): bar.error( - f"{k} must be a list of strings", - None, - **bar_error_kwargs, + f"{k} must be a list of strings", resume=True, print_item=False ) case validate.InputValidator.args_key: if not isinstance(flags[k], (list, dict)): - bar.error( - f"{k} must be list or map", - None, - **bar_error_kwargs, - ) + bar.error(f"{k} must be list or map", resume=True, print_item=False) if isinstance(flags[k], dict): input_validator_names = set( val.name for val in p.validators(validate.InputValidator) @@ -595,20 +606,18 @@ def _parse_test_case_and_groups_yaml(p, path: Path, bar: BAR_TYPE): for name in set(flags[k]) - input_validator_names: bar.warn( f"Unknown input validator {name}; expected {input_validator_names}", - None, - **bar_warn_kwargs, + print_item=False, ) case "description" | "hint": pass # We don't do anything with hint or description in BAPCtools, but no need to warn about this case "args" | "full_feedback" | "scoring" | "static_validation": bar.warn( f"{k} in test_group.yaml not implemented in BAPCtools", - None, - **bar_warn_kwargs, + print_item=False, ) case _: path = f.relative_to(p.path / "data") - bar.warn(f'Unknown key "{k}" in {path}', None, **bar_warn_kwargs) + bar.warn(f'Unknown key "{k}" in {path}', print_item=False) def get_test_case_yaml( p, @@ -674,12 +683,13 @@ def get_test_case_yaml( return [] return args elif name in args: - if not isinstance(args[name], list) or any( - not isinstance(arg, str) for arg in args[name] + args = args[name] + if not isinstance(args, list) or any( + not isinstance(arg, str) for arg in args ): bar.error(f"{key} must be list of strings or map of lists") return [] - return args[name] + return args elif key in known_args_keys: if not isinstance(args, list) or any(not isinstance(arg, str) for arg in args): bar.error(f"{key} must be a list of strings") @@ -692,7 +702,7 @@ def get_test_case_yaml( # this cache makes sure that some warnings (like malformed test case names) only appear once. _warned_for_test_case = set[str]() - def _warn_once(p, test_name, msg): + def _warn_once(p, test_name: str, msg: str) -> None: if test_name not in p._warned_for_test_case: p._warned_for_test_case.add(test_name) warn(msg) @@ -701,9 +711,9 @@ def testcases( p, *, mode: Optional[validate.Mode] = None, - needans=True, - only_samples=False, - testing_tool_test=False, + needans: bool = True, + only_samples: bool = False, + testing_tool_test: bool = False, ) -> Sequence[testcase.Testcase]: only_samples = config.args.samples or only_samples @@ -716,15 +726,15 @@ def testcases( assert not only_samples # Deduplicate testcases with both .in and .ans. in_paths = [] - for t in config.args.testcases: - t = resolve_path_argument(p, t, "data", suffixes=[".in"]) - if t: + for path in config.args.testcases: + res_path = resolve_path_argument(p, path, "data", suffixes=[".in"]) + if res_path: # When running from contest level, the testcase must be inside the problem. - if config.level != "problemset" or is_relative_to(p.path, t): - if t.is_dir(): - in_paths += glob(t, "**/*.in") + if config.level != "problemset" or is_relative_to(p.path, res_path): + if res_path.is_dir(): + in_paths += glob(res_path, "**/*.in") else: - in_paths.append(t) + in_paths.append(res_path) in_paths = list(set(in_paths)) elif mode is not None: @@ -763,14 +773,20 @@ def testcases( and mode in [validate.Mode.INVALID, validate.Mode.VALID_OUTPUT] and t.root in ["invalid_output", "valid_output"] ): - warn( - f"Found file {f} for {mode} validation in {p.settings.type_name()} problem. Skipping." + p._warn_once( + t.name, + f"Found file {f} for {mode} validation in {p.settings.type_name()} problem. Skipping.", ) continue if needans and not t.ans_path.is_file(): if t.root != "invalid_input": p._warn_once(t.name, f"Found input file {f} without a .ans file. Skipping.") continue + if t.root in ["valid_output", "invalid_output"]: + assert t.out_path is not None + if not t.out_path.is_file(): + p._warn_once(t.name, f"Found input file {f} without a .out file. Skipping.") + continue if mode == validate.Mode.VALID_OUTPUT: if t.out_path is None: continue @@ -934,7 +950,7 @@ def submissions(problem) -> list[run.Submission] | Literal[False]: paths = [] if config.args.submissions: - def add(s): + def add(s: Path) -> None: if s in paths: warn(f"Ignoring duplicate submission: {s}") return @@ -970,21 +986,21 @@ def add(s): programs = [run.Submission(problem, path) for path in paths] - # - first all submission with just one verdict (sorted by that verdict) + # - first all submission with just one verdict (grouped by that verdict and sorted by the path) # - then by subdir # - then by list of verdicts # - then by name - def submissions_key(x): - if len(x.expected_verdicts) == 1: - return (1, x.expected_verdicts[0], x.name) - else: - return (len(x.expected_verdicts), x.subdir, x.expected_verdicts, x.name) + def submissions_key( + x: run.Submission, + ) -> tuple[int, str, Sequence[verdicts.Verdict], str, str]: + group = "" if len(x.expected_verdicts) == 1 else x.subdir + return (len(x.expected_verdicts), group, x.expected_verdicts, x.subdir, x.name) programs.sort(key=submissions_key) bar = ProgressBar("Build submissions", items=programs) - def build_program(p): + def build_program(p: run.Submission) -> None: localbar = bar.start(p) p.build(localbar) localbar.done() @@ -1030,9 +1046,9 @@ def visualizer( def validators( problem, cls: type[validate.AnyValidator], - check_constraints=False, - strict=False, - print_warn=True, + check_constraints: bool = False, + strict: bool = False, + print_warn: bool = True, ) -> Sequence[validate.AnyValidator]: """ Gets the validators of the given class. @@ -1073,7 +1089,7 @@ def validators( return validators if build_ok else [] def _validators( - problem, cls: type[validate.AnyValidator], check_constraints=False + problem, cls: type[validate.AnyValidator], check_constraints: bool = False ) -> list[validate.AnyValidator]: key = (cls, check_constraints) if key in problem._validators_cache: @@ -1252,21 +1268,17 @@ def make_verdict(tc: testcase.Testcase) -> str: scores[t.name] += 1.0 / failures scores_list = sorted(scores.values()) - print( + eprint( "\nVerdict analysis table. Submissions are ordered per column as above. Higher " - "scores indicate they are critical to break some submissions. Only cases breaking at least one submission are listed.", - file=sys.stderr, + "scores indicate they are critical to break some submissions. Only cases breaking at least one submission are listed." ) fail = ( verdicts.to_char(verdicts.Verdict.WRONG_ANSWER) + verdicts.to_char(verdicts.Verdict.TIME_LIMIT_EXCEEDED) + verdicts.to_char(verdicts.Verdict.RUNTIME_ERROR) ) - print(f"{fail}: submission fails testcase", file=sys.stderr) - print( - f"{verdicts.to_char(verdicts.Verdict.ACCEPTED)}: submission passes testcase\n", - file=sys.stderr, - ) + eprint(f"{fail}: submission fails testcase") + eprint(f"{verdicts.to_char(verdicts.Verdict.ACCEPTED)}: submission passes testcase\n") name_col_width = min(50, max([len(testcase.name) for testcase in testcases])) @@ -1284,7 +1296,7 @@ def make_verdict(tc: testcase.Testcase) -> str: if len(name) > name_col_width: name = "..." + name[-name_col_width + 3 :] padding = " " * (name_col_width - len(name)) - print(f"{Fore.CYAN}{name}{Style.RESET_ALL}:{padding}", end=" ", file=sys.stderr) + eprint(f"{Fore.CYAN}{name}{Style.RESET_ALL}:{padding}", end=" ") color = Style.RESET_ALL if len(scores_list) > 6 and scores[case.name] >= scores_list[-6]: @@ -1292,11 +1304,11 @@ def make_verdict(tc: testcase.Testcase) -> str: if len(scores_list) > 3 and scores[case.name] >= scores_list[-3]: color = Fore.RED resultant = make_verdict(case) - print(resultant, end=" ", file=sys.stderr) - print(f"{color}{scores[case.name]:0.3f}{Style.RESET_ALL} ", end="", file=sys.stderr) + eprint(resultant, end=" ") + eprint(f"{color}{scores[case.name]:0.3f}{Style.RESET_ALL} ", end="") if resultant in resultant_id: - print(str.format("(Type {})", resultant_id[resultant]), end="", file=sys.stderr) - print(end="\n", file=sys.stderr) + eprint(f"(Type {resultant_id[resultant]})", end="") + eprint() # called by bt check_testing_tool def check_testing_tool(problem) -> bool: @@ -1349,14 +1361,15 @@ def matches_existing_testcase(self, t: testcase.Testcase) -> Optional[testcase.T return None def validate_data( - problem, mode: validate.Mode, constraints: dict | Literal[True] | None = None + problem, + mode: validate.Mode, + constraints: validate.ConstraintsDict | Literal[True] | None = None, ) -> bool: """Validate aspects of the test data files. Arguments: mode: validate.Mode.INPUT | validate.Mode.ANSWER | validate.Mode.INVALID | validate.Mode.VALID_OUTPUT constraints: True | dict | None. True means "do check constraints but discard the result." - False: TODO is this ever used? Return: True if all validation was successful. Successful validation includes, e.g., correctly rejecting invalid inputs. @@ -1375,6 +1388,7 @@ def validate_data( return problem._validate_data(mode, constraints, action, testcases) def validate_invalid_extra_data(p) -> bool: + assert config.args.generic is not None base_path = p.tmpdir / "invalid_data" # pick at most first 3 samples (assuming they are valid and have .ans) # also add a dummy entry to always run generators that don't read or copy anything from a valid testcase @@ -1454,6 +1468,7 @@ def validate_invalid_extra_data(p) -> bool: ) def validate_valid_extra_data(p) -> bool: + assert config.args.generic is not None if "valid_output" not in config.args.generic: return True if p.interactive or p.multi_pass: @@ -1554,7 +1569,7 @@ def _validate_data( # validate the testcases bar = ProgressBar(action, items=[t.name for t in testcases]) - def process_testcase(testcase: testcase.Testcase): + def process_testcase(testcase: testcase.Testcase) -> None: nonlocal success localbar = bar.start(testcase.name) @@ -1596,7 +1611,7 @@ def process_testcase(testcase: testcase.Testcase): return success - def determine_time_limit(problem): + def determine_time_limit(problem) -> bool: ts_pair = problem.prepare_run() if not ts_pair: return False @@ -1607,7 +1622,10 @@ def determine_time_limit(problem): problem.limits.time_limit_is_default = False problem.limits.timeout = problem.limits.time_limit + 1 - def run_all(select_verdict, select): + def run_all( + select_verdict: Callable[[Sequence[verdicts.Verdict]], bool], + select: Callable[[Sequence[float]], float], + ) -> tuple[str, str, float] | tuple[None, None, None]: nonlocal ok cur_submissions = [s for s in submissions if select_verdict(s.expected_verdicts)] @@ -1620,7 +1638,7 @@ def run_all(select_verdict, select): ok = False return None, None, None - def get_slowest(result): + def get_slowest(result: verdicts.Verdicts) -> tuple[str, float]: slowest_pair = result.slowest_test_case() assert slowest_pair is not None return slowest_pair @@ -1637,6 +1655,8 @@ def get_slowest(result): if submission is None: error("No AC submissions found") return False + assert testcase is not None + assert duration is not None problem.limits.time_limit = problem.limits.time_resolution * math.ceil( duration * problem.limits.ac_to_time_limit / problem.limits.time_resolution @@ -1656,35 +1676,37 @@ def get_slowest(result): limits["time_limit"] = problem.limits.time_limit write_yaml(problem_yaml, problem.path / "problem.yaml") - print(file=sys.stderr) - message(f"{duration:.3f}s @ {testcase} ({submission})", "slowest AC") - message( - f"{problem.limits.time_limit}s >= {duration:.3f}s * {problem.limits.ac_to_time_limit}", - "time limit", + eprint() + PrintBar("slowest AC").log(f" {duration:.3f}s @ {testcase} ({submission})", color="") + PrintBar("time limit").log( + f" {problem.limits.time_limit:.1f}s >= {duration:.3f}s * {problem.limits.ac_to_time_limit}", + color="", ) - message( - f"{safety_time_limit}s >= {problem.limits.time_limit}s * {problem.limits.time_limit_to_tle}", - "safety limit", + PrintBar("safety limit").log( + f"{safety_time_limit:.1f}s >= {problem.limits.time_limit:.1f}s * {problem.limits.time_limit_to_tle}", + color="", ) - message( - f"{problem.limits.timeout}s >= {problem.limits.time_limit}s * {problem.limits.time_limit_to_tle}²", - "timeout", + PrintBar("timeout").log( + f" {problem.limits.timeout:.1f}s >= {problem.limits.time_limit:.1f}s * {problem.limits.time_limit_to_tle}²", + color="", ) - print(file=sys.stderr) + eprint() submission, testcase, duration = run_all( lambda vs: vs == [verdicts.Verdict.TIME_LIMIT_EXCEEDED], min ) if submission is not None: - print(file=sys.stderr) - message(f"{duration:.3f}s @ {testcase} ({submission})", "fastest TLE") + assert testcase is not None + assert duration is not None + eprint() + PrintBar("fastest TLE").log(f" {duration:.3f}s @ {testcase} ({submission})", color="") if duration <= problem.limits.time_limit: error("TLE submission runs within time limit") elif duration <= safety_time_limit: warn("TLE submission runs within safety margin") elif duration >= problem.limits.timeout: log(f"No TLE submission finished within {problem.limits.timeout}s") - print(file=sys.stderr) + eprint() else: log("No TLE submissions found") @@ -1695,6 +1717,8 @@ def get_slowest(result): max, ) if submission is not None: + assert testcase is not None + assert duration is not None if duration > problem.limits.time_limit: warn("Non TLE submission timed out") else: diff --git a/bin/program.py b/bin/program.py index d48235502..967a21d15 100644 --- a/bin/program.py +++ b/bin/program.py @@ -1,22 +1,40 @@ import re +import shlex import shutil import stat -import shlex import subprocess import threading +from collections.abc import Callable, Mapping, Sequence from colorama import Fore from pathlib import Path -from typing import Any, Final, Mapping, Optional, Sequence, TypeVar, TYPE_CHECKING +from typing import Any, Final, Optional, TYPE_CHECKING, TypeVar import config -from util import * +from util import ( + combine_hashes, + copy_and_substitute, + ensure_symlink, + error, + exec_command, + ExecResult, + ExecStatus, + fatal, + glob, + has_substitute, + hash_file, + ProgressBar, + read_yaml, + strip_newline, + warn, + write_yaml, +) if TYPE_CHECKING: # Prevent circular import: https://stackoverflow.com/a/39757388 from problem import Problem class Language: - def __init__(self, lang_id: str, conf: dict[str, Any]): + def __init__(self, lang_id: str, conf: dict[str, Any]) -> None: self.ok = True self.id = lang_id @@ -201,7 +219,7 @@ def __init__( skip_double_build_warning: bool = False, limits: dict[str, int] = {}, substitute_constants: bool = False, - ): + ) -> None: if deps is not None: assert isinstance(self, Generator) assert isinstance(deps, list) @@ -236,8 +254,8 @@ def __init__( self.name: str = str(relpath) self.tmpdir = problem.tmpdir / self.subdir / self.name - self.compile_command: Optional[list[str]] = None - self.run_command: Optional[list[str]] = None + self.compile_command: Optional[Sequence[str | Path]] = None + self.run_command: Optional[Sequence[str | Path]] = None self.hash: Optional[str] = None self.env: dict[str, int | str | Path] = {} self.limits: dict[str, int] = limits @@ -589,14 +607,14 @@ def _exec_command(self, *args: Any, **kwargs: Any) -> ExecResult: return exec_command(*args, **kwargs) @staticmethod - def add_callback(problem: "Problem", path: Path, c: Callable[["Program"], None]) -> None: + def add_callback(problem: "Problem", path: Path, c: Callable[["Program"], Any]) -> None: if path not in problem._program_callbacks: problem._program_callbacks[path] = [] problem._program_callbacks[path].append(c) class Generator(Program): - def __init__(self, problem: "Problem", path: Path, **kwargs: Any): + def __init__(self, problem: "Problem", path: Path, **kwargs: Any) -> None: super().__init__( problem, path, @@ -610,7 +628,7 @@ def __init__(self, problem: "Problem", path: Path, **kwargs: Any): # May write files in |cwd| and stdout is piped to {name}.in if it's not written already. # Returns ExecResult. Success when result.status == ExecStatus.ACCEPTED. def run( - self, bar: ProgressBar, cwd: Path, name: str, args: list[str | Path] = [] + self, bar: ProgressBar, cwd: Path, name: str, args: Sequence[str | Path] = [] ) -> ExecResult: assert self.run_command is not None @@ -630,7 +648,7 @@ def run( with stdout_path.open("w") as stdout_file: result = self._exec_command( - self.run_command + args, + [*self.run_command, *args], stdout=stdout_file, cwd=cwd, ) diff --git a/bin/run.py b/bin/run.py index 4707fdd47..5661e8c07 100644 --- a/bin/run.py +++ b/bin/run.py @@ -2,7 +2,7 @@ import shutil import subprocess import sys - +from collections.abc import Sequence from colorama import Fore, Style from contextlib import nullcontext from pathlib import Path @@ -20,6 +20,7 @@ BAR_TYPE, crop_output, ensure_symlink, + eprint, error, ExecResult, ExecStatus, @@ -29,11 +30,13 @@ shorten_path, warn, ) -from verdicts import from_string, from_string_domjudge, RunUntil, Verdict, Verdicts +from verdicts import from_string, from_string_domjudge, RunUntil, Verdict, Verdicts, VerdictTable class Run: - def __init__(self, problem: "problem.Problem", submission: "Submission", testcase: Testcase): + def __init__( + self, problem: "problem.Problem", submission: "Submission", testcase: Testcase + ) -> None: self.problem = problem self.submission = submission self.testcase = testcase @@ -60,7 +63,13 @@ def __init__(self, problem: "problem.Problem", submission: "Submission", testcas ensure_symlink(self.in_path, self.testcase.in_path) # Return an ExecResult object amended with verdict. - def run(self, bar, *, interaction=None, submission_args=None): + def run( + self, + bar: ProgressBar, + *, + interaction: Optional[bool | Path] = None, + submission_args: Optional[Sequence[str | Path]] = None, + ) -> ExecResult: if self.problem.interactive: result = interactive.run_interactive_testcase( self, interaction=interaction, submission_args=submission_args, bar=bar @@ -80,12 +89,13 @@ def run(self, bar, *, interaction=None, submission_args=None): Verdict.VALIDATOR_CRASH, ) else: + assert interaction is not True if interaction: assert not interaction.is_relative_to(self.tmpdir) with interaction.open("a") if interaction else nullcontext(None) as interaction_file: nextpass = self.feedbackdir / "nextpass.in" if self.problem.multi_pass else None pass_id = 0 - max_duration = 0 + max_duration = 0.0 tle_result = None while True: pass_id += 1 @@ -186,7 +196,7 @@ def run(self, bar, *, interaction=None, submission_args=None): ): self.out_path.unlink() - if result.verdict and (self.feedbackdir / "nextpass.in").is_file(): + if result.verdict != Verdict.ACCEPTED and (self.feedbackdir / "nextpass.in").is_file(): assert not self.problem.multi_pass bar.warn("Validator created nextpass.in for non multi-pass problem. Ignored.") @@ -194,7 +204,7 @@ def run(self, bar, *, interaction=None, submission_args=None): return result # check if we should continue after tle - def _continue_with_tle(self, verdict, timeout_expired): + def _continue_with_tle(self, verdict: Verdict, timeout_expired: bool) -> bool: if not self.problem.multi_pass: return False if config.args.all == 2 or config.args.reorder: @@ -203,10 +213,14 @@ def _continue_with_tle(self, verdict, timeout_expired): return False if timeout_expired: return False - return config.args.verbose or config.args.all or config.args.action in ["all", "time_limit"] + return ( + config.args.verbose > 0 + or config.args.all > 0 + or config.args.action in ["all", "time_limit"] + ) # prepare next pass - def _prepare_nextpass(self, nextpass): + def _prepare_nextpass(self, nextpass: Optional[Path]) -> bool: if not nextpass or not nextpass.is_file(): return False # clear all files outside of feedbackdir @@ -221,7 +235,7 @@ def _prepare_nextpass(self, nextpass): shutil.move(nextpass, self.in_path) return True - def _validate_output(self, bar: BAR_TYPE) -> Optional[ExecResult]: + def _validate_output(self, bar: ProgressBar) -> Optional[ExecResult]: output_validators = self.problem.validators(validate.OutputValidator) if not output_validators: return None @@ -249,7 +263,9 @@ def _visualize_output(self, bar: BAR_TYPE) -> Optional[ExecResult]: class Submission(program.Program): - def __init__(self, problem, path, skip_double_build_warning=False): + def __init__( + self, problem: "problem.Problem", path: Path, skip_double_build_warning: bool = False + ) -> None: super().__init__( problem, path, @@ -263,7 +279,7 @@ def __init__(self, problem, path, skip_double_build_warning=False): skip_double_build_warning=skip_double_build_warning, ) - self.verdict = None + self.verdict: Optional[Verdict] = None self.duration = None # The first element will match the directory the file is in, if possible. @@ -345,7 +361,15 @@ def _get_expected_verdicts(self) -> list[Verdict]: # Returns ExecResult # The `generator_timeout` argument is used when a submission is run as a solution when # generating testcases. - def run(self, in_path, out_path, crop=True, args=[], cwd=None, generator_timeout=False): + def run( + self, + in_path: Path, + out_path: Path, + crop: bool = True, + args: Sequence[str | Path] = [], + cwd: Optional[Path] = None, + generator_timeout: bool = False, + ) -> ExecResult: assert self.run_command is not None # Just for safety reasons, change the cwd. if cwd is None: @@ -356,7 +380,7 @@ def run(self, in_path, out_path, crop=True, args=[], cwd=None, generator_timeout ): # Print stderr to terminal is stdout is None, otherwise return its value. result = self._exec_command( - self.run_command + args, + [*self.run_command, *args], crop=crop, stdin=in_file, stdout=out_file, @@ -375,11 +399,11 @@ def run(self, in_path, out_path, crop=True, args=[], cwd=None, generator_timeout def run_testcases( self, max_submission_name_len: int, - verdict_table, - testcases, + verdict_table: VerdictTable, + testcases: Sequence[Testcase], *, - needs_leading_newline, - ): + needs_leading_newline: bool, + ) -> tuple[bool, bool]: runs = [Run(self.problem, self, testcase) for testcase in testcases] max_testcase_len = max(len(run.name) for run in runs) if self.problem.multi_pass: @@ -411,13 +435,14 @@ def run_testcases( needs_leading_newline=needs_leading_newline, ) - def process_run(run: Run): + def process_run(run: Run) -> None: if not verdicts.run_is_needed(run.name): bar.skip() return localbar = bar.start(run) result = run.run(localbar) + assert result.verdict is not None verdict_table.update_verdicts(run.name, result.verdict, result.duration) @@ -485,8 +510,9 @@ def process_run(run: Run): parallel.run_tasks(process_run, runs, pin=True) - self.verdict = verdicts["."] - assert isinstance(self.verdict, Verdict), "Verdict of root must not be empty" + verdict = verdicts["."] + assert isinstance(verdict, Verdict), "Verdict of root must not be empty" + self.verdict = verdict # Use a bold summary line if things were printed before. if bar.logged: @@ -533,13 +559,13 @@ def process_run(run: Run): if config.args.tree: verdict_table.print(force=True, new_lines=0) verdict_table.last_printed = [] - print(file=sys.stderr) + eprint() printed_newline = True return self.verdict in self.expected_verdicts, printed_newline - def test(self): - print(ProgressBar.action("Running", str(self.name)), file=sys.stderr) + def test(self) -> None: + eprint(ProgressBar.action("Running", str(self.name))) testcases = self.problem.testcases(needans=False) @@ -548,7 +574,7 @@ def test(self): for testcase in testcases: header = ProgressBar.action("Running " + str(self.name), testcase.name) - print(header, file=sys.stderr) + eprint(header) if not self.problem.interactive: assert self.run_command is not None @@ -568,9 +594,8 @@ def test(self): elif not result.status and result.status != ExecStatus.TIMEOUT: config.n_error += 1 status = None - print( - f"{Fore.RED}Run time error!{Style.RESET_ALL} exit code {result.returncode} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}", - file=sys.stderr, + eprint( + f"{Fore.RED}Run time error!{Style.RESET_ALL} exit code {result.returncode} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}" ) elif ( result.duration > self.problem.limits.time_limit @@ -582,38 +607,41 @@ def test(self): status = f"{Fore.GREEN}Done:" if status: - print( - f"{status}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}", - file=sys.stderr, + eprint( + f"{status}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}" ) - print(file=sys.stderr) + eprint() else: # Interactive problem. run = Run(self.problem, self, testcase) - result = interactive.run_interactive_testcase( + optional_result = interactive.run_interactive_testcase( run, interaction=True, validator_error=None, team_error=None ) + if optional_result is None: + config.n_error += 1 + eprint( + f"{Fore.RED}No output validator found for testcase {testcase.name}{Style.RESET_ALL}" + ) + continue + result = optional_result if result.verdict != Verdict.ACCEPTED: config.n_error += 1 - print( - f"{Fore.RED}{result.verdict}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}", - file=sys.stderr, + eprint( + f"{Fore.RED}{result.verdict}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}" ) else: - print( - f"{Fore.GREEN}{result.verdict}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}", - file=sys.stderr, + eprint( + f"{Fore.GREEN}{result.verdict}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}" ) # Run the submission using stdin as input. - def test_interactive(self): + def test_interactive(self) -> None: if not self.problem.validators(validate.OutputValidator): return bar = ProgressBar("Running " + str(self.name), max_len=1, count=1) bar.start() - # print(ProgressBar.action('Running', str(self.name)), file=sys.stderr) is_tty = sys.stdin.isatty() @@ -677,19 +705,17 @@ def test_interactive(self): if not result.status: config.n_error += 1 status = None - print( - f"{Fore.RED}Run time error!{Style.RESET_ALL} exit code {result.returncode} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}", - file=sys.stderr, + eprint( + f"{Fore.RED}Run time error!{Style.RESET_ALL} exit code {result.returncode} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}" ) else: status = f"{Fore.GREEN}Done:" if status: - print( - f"{status}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}", - file=sys.stderr, + eprint( + f"{status}{Style.RESET_ALL} {Style.BRIGHT}{result.duration:6.3f}s{Style.RESET_ALL}" ) - print(file=sys.stderr) + eprint() finally: os.close(r) os.close(w) diff --git a/bin/skel.py b/bin/skel.py index 2a17f45fc..d90cb4849 100644 --- a/bin/skel.py +++ b/bin/skel.py @@ -1,5 +1,5 @@ -import os import datetime +import os import re import shutil from pathlib import Path @@ -9,7 +9,23 @@ import contest import latex from problem import Problem -from util import * +from util import ( + ask_variable_bool, + ask_variable_choice, + ask_variable_string, + copytree_and_substitute, + error, + exec_command, + fatal, + generate_problem_uuid, + has_ryaml, + inc_label, + log, + read_yaml, + substitute, + warn, + write_yaml, +) from validate import OutputValidator @@ -247,7 +263,6 @@ def copy_skel_dir(problems: list[Problem]) -> None: skeldir, preserve_symlinks = get_skel_dir(problem.path) for d in config.args.directory: - d = Path(d) sources = [skeldir / d, skeldir / d.parent / (d.name + ".template")] target = problem.path / d @@ -277,14 +292,14 @@ def create_gitlab_jobs(contest: str, problems: list[Problem]) -> None: error("git command not found!") return - def git(*args): + def git(*args: str | Path) -> str: res = exec_command( ["git", *args], crop=False, preexec_fn=False, timeout=None, ) - return res.out if res else "" + return res.out if res and res.out else "" if not git("rev-parse", "--is-inside-work-tree").startswith("true"): error("not inside git") diff --git a/bin/slack.py b/bin/slack.py index 0f16426da..e6bad1e86 100644 --- a/bin/slack.py +++ b/bin/slack.py @@ -1,5 +1,11 @@ +from typing import Any, TYPE_CHECKING + import config -from util import * +from problem import Problem +from util import error, fatal, log, verbose + +if TYPE_CHECKING: + import requests # Perform slack actions for the selected problems (all, or the selected/current one). # - create a slack channel @@ -11,7 +17,7 @@ # It can be passed as `--token ` or stored in `.bapctools.yaml` as `token: `. -def call_slack_api(path, **kwargs): +def call_slack_api(path: str, **kwargs: Any) -> "requests.Response": import requests # Slow import, so only import it inside this function. verbose(f"Calling slack api {path}") @@ -26,7 +32,7 @@ def call_slack_api(path, **kwargs): return result -def get_channel_ids(): +def get_channel_ids() -> dict[str, str]: r = call_slack_api("conversations.list").json() if not r["ok"]: fatal(r["error"]) @@ -37,24 +43,25 @@ def get_channel_ids(): return channel_ids -def get_user_id(username): +def get_user_id(username: str) -> str: r = call_slack_api("users.list").json() if not r["ok"]: fatal(r["error"]) members = r["members"] for m in members: if m["profile"]["real_name"] == username or m["profile"]["display_name"] == username: + assert isinstance(m["id"], str) return m["id"] fatal(f"User {username} not found") # Function to create a slack channel for each problem -def create_slack_channels(problems): +def create_slack_channels(problems: list[Problem]) -> None: for p in problems: create_slack_channel(p.name) -def create_slack_channel(name): +def create_slack_channel(name: str) -> None: r = call_slack_api("conversations.create", name=name) if not r.ok: error(r.text) @@ -66,7 +73,7 @@ def create_slack_channel(name): log(f"Created channel {name}") -def join_slack_channels(problems, username): +def join_slack_channels(problems: list[Problem], username: str) -> None: userid = get_user_id(username) channel_ids = get_channel_ids() @@ -74,7 +81,7 @@ def join_slack_channels(problems, username): join_slack_channel(p.name, channel_ids[p.name], username, userid) -def join_slack_channel(channel_name, channel_id, username, userid): +def join_slack_channel(channel_name: str, channel_id: str, username: str, userid: str) -> None: # The bot account invites the user to the channel. r = call_slack_api("conversations.invite", channel=channel_id, users=userid) if not r.ok: diff --git a/bin/solve_stats.py b/bin/solve_stats.py index 21d9e0c96..c84ce064a 100644 --- a/bin/solve_stats.py +++ b/bin/solve_stats.py @@ -1,11 +1,11 @@ -from os import makedirs from multiprocessing import Pool +from os import makedirs from pathlib import Path -from typing import Optional +from typing import Any, Optional import config import parallel -from contest import get_contest_id, call_api_get_json +from contest import call_api_get_json, get_contest_id from util import ProgressBar # Note on multiprocessing: @@ -22,7 +22,7 @@ # Turns an endpoint list result into an object, mapped by 'id' -def get_json_assoc(url: str) -> dict[str, dict]: +def get_json_assoc(url: str) -> dict[str, dict[str, Any]]: return {o["id"]: o for o in call_api_get_json(url)} @@ -34,8 +34,8 @@ def time_string_to_minutes(time_string: str) -> float: def plot_problem( minutes: list[dict[str, int]], label: str, - judgement_types: dict[str, dict], -): + judgement_types: dict[str, dict[str, Any]], +) -> None: import matplotlib.pyplot as plt # Have to import it separately in multiprocessing worker. fig, ax = plt.subplots(figsize=(12, 2)) @@ -63,7 +63,7 @@ def plot_problem( fig.savefig(f"solve_stats/activity/{label}.pdf", bbox_inches="tight", transparent=True) -def generate_solve_stats(post_freeze: bool): +def generate_solve_stats(post_freeze: bool) -> None: # Import takes more than 1000 ms to evaluate, so only import inside function (when it is actually needed) import matplotlib import matplotlib.pyplot as plt @@ -87,12 +87,12 @@ def generate_solve_stats(post_freeze: bool): contest_duration = time_string_to_minutes(contest["duration"]) scale = contest_duration / bins - def get_contest_data(tuple): - i, endpoint = tuple + def get_contest_data(i_endpoint: tuple[int, str]) -> None: + i, endpoint = i_endpoint data[i] = get_json_assoc(url_prefix + endpoint) bar.start("Contest data") - data: list[Optional[dict]] = [None] * 5 + data: list[Optional[dict[str, Any]]] = [None] * 5 parallel.run_tasks( get_contest_data, list( diff --git a/bin/stats.py b/bin/stats.py index eb7671b28..2b9537c18 100644 --- a/bin/stats.py +++ b/bin/stats.py @@ -1,13 +1,11 @@ import shutil import statistics -import sys -from collections.abc import Callable +from collections.abc import Callable, Sequence +from colorama import ansi, Fore, Style from datetime import datetime, timedelta, timezone from dateutil import parser from pathlib import Path -from typing import Any, cast, Literal, Optional, Sequence - -from colorama import ansi, Fore, Style +from typing import Any, cast, Literal, Optional import config import generate @@ -15,7 +13,7 @@ import program import validate from problem import Problem -from util import error, exec_command, glob, log, warn +from util import eprint, error, exec_command, glob, log, warn Selector = ( str | Callable[[Problem], int | float] | list[str] | list[Callable[[set[Path]], set[str]]] @@ -24,9 +22,6 @@ def stats(problems: list[Problem]) -> None: - if config.args.more is not None: - config.args.all = config.args.more - warn("--more is deprecated, use --all instead!\n") problem_stats(problems) if config.args.all: more_stats(problems) @@ -140,7 +135,7 @@ def problem_stats(problems: list[Problem]) -> None: format_string += " {:>" + str(width + len(Fore.WHITE) + len(Style.RESET_ALL)) + "}" header = header_string.format(*headers) - print(Style.BRIGHT + header + Style.RESET_ALL, file=sys.stderr) + eprint(Style.BRIGHT + header + Style.RESET_ALL) for problem in problems: generated_testcases = generate.testcases(problem) @@ -212,30 +207,24 @@ def value(x: Stat) -> Optional[int | float]: else: comment = Fore.YELLOW + comment + Style.RESET_ALL - print( + eprint( format_string.format( f"{problem.label} {problem.name}", *[ _get_stat( counts[i], - # mypy does not support variable-length tuples very well: - # https://github.com/python/mypy/pull/16237#:~:text=indirect%20comparisons - True if len(stats[i]) <= 2 else stats[i][2], # type: ignore[misc] - None if len(stats[i]) <= 3 else stats[i][3], # type: ignore[misc] + True if len(stat) <= 2 else stat[2], + None if len(stat) <= 3 else stat[3], ) - for i in range(len(stats)) + for i, stat in enumerate(stats) ], comment, ), - file=sys.stderr, ) # print the cumulative count - print("-" * len(header), file=sys.stderr) - print( - format_string.format("TOTAL", *(_get_stat(x, False) for x in cumulative), ""), - file=sys.stderr, - ) + eprint("-" * len(header)) + eprint(format_string.format("TOTAL", *(_get_stat(x, False) for x in cumulative), "")) try: @@ -248,7 +237,7 @@ def value(x: Stat) -> Optional[int | float]: has_pygments = False -def _is_code(language: str, type, text) -> bool: +def _is_code(language: str, type: Any, text: str) -> bool: if type in pygments.token.Comment and type not in ( pygments.token.Comment.Preproc, # pygments treats preprocessor statements as comments pygments.token.Comment.PreprocFile, @@ -305,7 +294,7 @@ def more_stats(problems: list[Problem]) -> None: return if not Path("submissions").is_dir(): - print(file=sys.stderr) + eprint() log( "No team submissions found, try running 'bt download_submissions' to get stats for team submissions." ) @@ -327,10 +316,10 @@ def get_stats(values: Sequence[float | int]) -> list[Optional[float | int]]: + f" {{:>{stat_len + len(Fore.WHITE)}}}{Style.RESET_ALL}" * len(columns) ) - print(file=sys.stderr) + eprint() header = header_string.format("", *columns) - print(Style.BRIGHT + header + Style.RESET_ALL, file=sys.stderr) - print("-" * len(header), file=sys.stderr) + eprint(Style.BRIGHT + header + Style.RESET_ALL) + eprint("-" * len(header)) def format_value( value: Optional[str | float | int | timedelta], default_color: str = Fore.WHITE @@ -403,19 +392,19 @@ def get_submissions_row( # handle jury solutions best_jury = get_submissions_row("Jury", True, False) - print(format_row(*best_jury), file=sys.stderr) + eprint(format_row(*best_jury)) for display_name, names in languages.items(): values = get_submissions_row(display_name, names, False) for i in range(1, 1 + len(problems)): if values[i] == best_jury[i]: values[i] = format_value(values[i], Fore.CYAN) - print(format_row(*values), file=sys.stderr) + eprint(format_row(*values)) # handle team submissions if Path("submissions").is_dir(): - print("-" * len(header), file=sys.stderr) + eprint("-" * len(header)) best_team = get_submissions_row("Teams", True, True) - print(format_row(*best_team), file=sys.stderr) + eprint(format_row(*best_team)) for display_name, names in languages.items(): values = get_submissions_row(display_name, names, True) for i in range(1, 1 + len(problems)): @@ -427,7 +416,7 @@ def get_submissions_row( leq_jury = True if values[i] == best_team[i] and leq_jury: values[i] = format_value(values[i], Fore.CYAN) - print(format_row(*values), file=sys.stderr) + eprint(format_row(*values)) # git stats if shutil.which("git") is None: @@ -450,10 +439,10 @@ def git(*args: str | Path) -> str: def parse_time(date: str) -> Optional[datetime]: return parser.parse(date) if date else None - print("-" * len(header), file=sys.stderr) + eprint("-" * len(header)) testcases = [len(generate.testcases(p)) for p in problems] testcase_stats = get_stats(testcases) - print(format_row("Testcases", *testcases, *testcase_stats), file=sys.stderr) + eprint(format_row("Testcases", *testcases, *testcase_stats)) changed: list[Optional[float | int]] = [] for p in problems: times = [ @@ -470,7 +459,7 @@ def parse_time(date: str) -> Optional[datetime]: changed += get_stats([c for c in changed if c is not None]) changed[-4] = None # sum of last changed is meaningless... changed_times = [timedelta(seconds=s) if s is not None else None for s in changed] - print(format_row("└╴changed", *changed_times), file=sys.stderr) + eprint(format_row("└╴changed", *changed_times)) # this is hacky and does not handle all renames properly... # for example: if A is renamed to C and B is renamed to A this will break @@ -492,22 +481,19 @@ def countCommits(problem: Problem) -> int: commits = [countCommits(p) for p in problems] commit_stats = get_stats(commits) commit_stats[-4] = None # one commit can change multiple problems so the sum is meaningless... - print(format_row("Commits", *commits, *commit_stats), file=sys.stderr) - print(file=sys.stderr) - print( + eprint(format_row("Commits", *commits, *commit_stats)) + eprint() + eprint( f"{Fore.CYAN}Total Commits{Style.RESET_ALL}:", int(git("rev-list", "--all", "--count")), - file=sys.stderr, ) - print( + eprint( f"{Fore.CYAN}Total Authors{Style.RESET_ALL}:", git("shortlog", "--group=%ae", "-s").count("\n"), - file=sys.stderr, ) duration = datetime.now(timezone.utc) - parser.parse( git("log", "--reverse", "--format=%cI").partition("\n")[0] ) - print( - f"{Fore.CYAN}Preparation{Style.RESET_ALL}: {duration.days}d, {duration.seconds // 3600}h", - file=sys.stderr, + eprint( + f"{Fore.CYAN}Preparation{Style.RESET_ALL}: {duration.days}d, {duration.seconds // 3600}h" ) diff --git a/bin/testcase.py b/bin/testcase.py index 05cf5992a..294a9f54c 100644 --- a/bin/testcase.py +++ b/bin/testcase.py @@ -5,21 +5,21 @@ from pathlib import Path from typing import Optional, TYPE_CHECKING +import config +import validate from util import ( BAR_TYPE, - ExecStatus, combine_hashes_dict, + ExecStatus, fatal, print_name, ProgressBar, shorten_path, ) -import config -import validate if TYPE_CHECKING: # Prevent circular import: https://stackoverflow.com/a/39757388 - import visualize import problem + import visualize # TODO #102: Consistently separate the compound noun "test case", e.g. "TestCase" or "test_case" @@ -282,6 +282,7 @@ def _run_validators( ) -> bool: args = [] results = [] + output_validator_crash = False for validator in validators: name = validator.name if isinstance(validator, validate.OutputValidator) and mode == validate.Mode.ANSWER: @@ -336,11 +337,31 @@ def _run_validators( data = ret.err if expect_rejection: - bar.debug( - message, - data=data, - color=Fore.GREEN if ret.status == ExecStatus.REJECTED else Fore.YELLOW, - ) + warn = False + if ( + isinstance(validator, validate.OutputValidator) + and ret.status == ExecStatus.ERROR + ): + output_validator_crash = True + warn = True + elif ret.status == ExecStatus.TIMEOUT: + warn = True + else: + color = Fore.GREEN if ret.status == ExecStatus.REJECTED else Fore.YELLOW + + if warn: + bar.part_done( + False, + message, + data=data, + warn_instead_of_error=warn_instead_of_error, + ) + else: + bar.debug( + message, + data=data, + color=color, + ) elif ret.status == ExecStatus.ERROR and ret.returncode == 0: bar.part_done( False, @@ -356,7 +377,11 @@ def _run_validators( warn_instead_of_error=warn_instead_of_error, ) - if ret.status or self.root in [*config.INVALID_CASE_DIRECTORIES, "valid_output"]: + if ( + ret.status + or expect_rejection + or self.root in [*config.INVALID_CASE_DIRECTORIES, "valid_output"] + ): continue # Move testcase to destination directory if specified. @@ -384,15 +409,19 @@ def _run_validators( break if expect_rejection: - success = ExecStatus.REJECTED in results - accepted = all(results) + issues = [] + if all(results): + issues.append("All validators accepted.") + elif ExecStatus.REJECTED not in results: + issues.append(f"At least one validator must exit with {config.RTV_WA}.") + elif ExecStatus.TIMEOUT in results: + issues.append("Validator timed out.") + if output_validator_crash: + issues.append("Output Validator crashed.") + + success = not issues if not success: - reason = ( - "All validators accepted." - if accepted - else f"At least one validator must exit with {config.RTV_WA}." - ) - msg = f"was not properly rejected by {mode} validation. {reason}" + msg = f"was not properly rejected by {mode} validation. {' '.join(issues)}" if warn_instead_of_error: bar.warn(msg) else: diff --git a/bin/tools.py b/bin/tools.py index 1a7c07ad8..5fd62dba2 100755 --- a/bin/tools.py +++ b/bin/tools.py @@ -15,40 +15,56 @@ """ import argparse +import colorama import hashlib import os +import re +import shutil +import signal import sys import tempfile -import shutil - -import colorama -import re - from collections import Counter from colorama import Style from pathlib import Path -from typing import Any, cast, Optional +from typing import Any, Optional # Local imports import config import constraints import contest +import download_submissions import export -import generate import fuzz +import generate import latex import skel import slack import solve_stats -import download_submissions import stats import upgrade import validate -import signal - +from contest import call_api_get_json, contest_yaml, get_contest_id, problems_yaml from problem import Problem -from contest import * -from util import * +from util import ( + AbortException, + ask_variable_bool, + eprint, + error, + fatal, + glob, + has_ryaml, + inc_label, + is_problem_directory, + is_relative_to, + is_windows, + log, + ProgressBar, + read_yaml, + resolve_path_argument, + verbose, + warn, + write_yaml, +) if not is_windows(): import argcomplete # For automatic shell completions @@ -82,12 +98,10 @@ def change_directory() -> Optional[Path]: problem_dir: Optional[Path] = None config.level = "problemset" if config.args.contest: - # TODO #102: replace cast with typed Namespace field - contest_dir = cast(Path, config.args.contest).absolute() + contest_dir = config.args.contest.absolute() os.chdir(contest_dir) if config.args.problem: - # TODO #102: replace cast with typed Namespace field - problem_dir = cast(Path, config.args.problem).absolute() + problem_dir = config.args.problem.absolute() elif is_problem_directory(Path.cwd()): problem_dir = Path.cwd().absolute() if problem_dir is not None: @@ -190,17 +204,17 @@ def fallback_problems() -> list[tuple[Path, str]]: # Sort by position of id in order def get_pos(id: Optional[str]) -> int: - if id in order: + if id and id in order: return order.index(id) else: return len(order) - problems.sort(key=lambda p: (get_pos(p.label), p.label)) + problems.sort(key=lambda p: (get_pos(p.label), p.label, p.name)) if config.args.order_from_ccs: # Sort by increasing difficulty, extracted from the CCS api. class ProblemStat: - def __init__(self): + def __init__(self) -> None: self.solved = 0 self.submissions = 0 self.pending = 0 @@ -307,7 +321,7 @@ def split_submissions_and_testcases(s: list[Path]) -> tuple[list[Path], list[Pat # If we would not do this, it would not be possible to check which keys are explicitly set from the command line. # This check is necessary when loading the personal config file in `read_personal_config`. class SuppressingParser(argparse.ArgumentParser): - def __init__(self, **kwargs): + def __init__(self, **kwargs: Any) -> None: super(SuppressingParser, self).__init__(**kwargs, argument_default=argparse.SUPPRESS) @@ -381,9 +395,8 @@ def build_parser() -> SuppressingParser: global_parser.add_argument("--lang", nargs="+", help="Languages to include.") subparsers = parser.add_subparsers( - title="actions", dest="action", parser_class=SuppressingParser + title="actions", dest="action", parser_class=SuppressingParser, required=True ) - subparsers.required = True # upgrade subparsers.add_parser( @@ -437,6 +450,7 @@ def build_parser() -> SuppressingParser: skelparser.add_argument( "directory", nargs="+", + type=Path, help="Directories to copy from skel/problem/, relative to the problem directory.", ) skelparser.add_argument("--skel", help="Skeleton problem directory to copy from.") @@ -678,7 +692,7 @@ def build_parser() -> SuppressingParser: help="Generate random testcases and search for inconsistencies in AC submissions.", ) fuzzparser.add_argument("--time", type=int, help="Number of seconds to run for. Default: 600") - fuzzparser.add_argument("--time-limit", "-t", type=int, help="Time limit for submissions.") + fuzzparser.add_argument("--time-limit", "-t", type=float, help="Time limit for submissions.") fuzzparser.add_argument( "submissions", nargs="*", @@ -746,7 +760,9 @@ def build_parser() -> SuppressingParser: type=int, help="Override the default timeout. Default: 1.5 * time_limit + 1.", ) - runparser.add_argument("--time-limit", "-t", type=int, help="Override the default time-limit.") + runparser.add_argument( + "--time-limit", "-t", type=float, help="Override the default time-limit." + ) runparser.add_argument( "--no-testcase-sanity-checks", action="store_true", @@ -989,6 +1005,7 @@ def build_parser() -> SuppressingParser: ) tmpparser.add_argument( "--clean", + "-C", action="store_true", help="Delete the temporary cache directory for the current problem/contest.", ) @@ -1053,8 +1070,7 @@ def find_personal_config() -> Optional[Path]: ) -def read_personal_config(problem_dir: Optional[Path]) -> dict[str, Any]: - args = {} +def read_personal_config(problem_dir: Optional[Path]) -> None: home_config = find_personal_config() # possible config files, sorted by priority config_files = [] @@ -1067,12 +1083,16 @@ def read_personal_config(problem_dir: Optional[Path]) -> dict[str, Any]: for config_file in config_files: if not config_file.is_file(): continue - config_data = read_yaml(config_file) or {} - for arg, value in config_data.items(): - if arg not in args: - args[arg] = value - return args + config_data = read_yaml(config_file) + if not config_data: + continue + if not isinstance(config_data, dict): + warn(f"invalid data in {config_data}. SKIPPED.") + continue + + tmp = config.ARGS(config_file, **config_data) + config.args.update(tmp) def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) -> None: @@ -1081,8 +1101,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) os.environ["MALLOC_PERTURB_"] = str(0b01011001) # Process arguments - config.args = args - missing_args = config.set_default_args() + config.args = config.ARGS("args", **vars(args)) # cd to contest directory call_cwd = Path.cwd().absolute() @@ -1091,10 +1110,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) contest_name = Path.cwd().name if personal_config: - personal_args = read_personal_config(problem_dir) - for arg in missing_args: - if arg in personal_args: - setattr(config.args, arg, personal_args[arg]) + read_personal_config(problem_dir) action = config.args.action @@ -1159,7 +1175,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) checked_paths.append(path) config.args.add = checked_paths - if config.args.reorder is not None: + if config.args.reorder: # default to 'data/secret' if not config.args.testcases: config.args.testcases = [Path("data/secret")] @@ -1187,7 +1203,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) if level_tmpdir.is_file(): level_tmpdir.unlink() else: - print(level_tmpdir) + eprint(level_tmpdir) return @@ -1247,6 +1263,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) return if action == "join_slack_channels": + assert config.args.username is not None slack.join_slack_channels(problems, config.args.username) return @@ -1261,7 +1278,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) and not config.args.all ): continue - print(Style.BRIGHT, "PROBLEM ", problem.name, Style.RESET_ALL, sep="", file=sys.stderr) + eprint(Style.BRIGHT, "PROBLEM ", problem.name, Style.RESET_ALL, sep="") if action in ["generate"]: success &= generate.generate(problem) @@ -1270,7 +1287,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) and not config.args.no_generate ): # Call `generate` with modified arguments. - old_args = argparse.Namespace(**vars(config.args)) + old_args = config.args.copy() config.args.jobs = (os.cpu_count() or 1) // 2 config.args.add = None config.args.verbose = 0 @@ -1307,7 +1324,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) if action == "all" or not specified or config.args.invalid: success &= problem.validate_data(validate.Mode.INVALID) if action == "all" or not specified or config.args.generic is not None: - if not config.args.generic: + if config.args.generic is None: config.args.generic = [ "invalid_input", "invalid_answer", @@ -1340,7 +1357,7 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) if not config.args.skip: if not config.args.no_generate: # Set up arguments for generate. - old_args = argparse.Namespace(**vars(config.args)) + old_args = config.args.copy() config.args.check_deterministic = not config.args.force config.args.add = None config.args.verbose = 0 @@ -1370,14 +1387,14 @@ def run_parsed_arguments(args: argparse.Namespace, personal_config: bool = True) success &= export.build_problem_zip(problem, output) if len(problems) > 1: - print(file=sys.stderr) + eprint() if action in ["export"]: languages = export.select_languages(problems) export.export_contest_and_problems(problems, languages) if level == "problemset": - print(f"{Style.BRIGHT}CONTEST {contest_name}{Style.RESET_ALL}", file=sys.stderr) + eprint(f"{Style.BRIGHT}CONTEST {contest_name}{Style.RESET_ALL}") # build pdf for the entire contest if action in ["pdf"]: diff --git a/bin/upgrade.py b/bin/upgrade.py index 5362e9c4f..3bf6ae358 100644 --- a/bin/upgrade.py +++ b/bin/upgrade.py @@ -1,17 +1,29 @@ -import config -import generate -import shlex -from collections import defaultdict -from util import * -from validate import InputValidator, AnswerValidator, OutputValidator - import secrets +import shlex import shutil +from collections import defaultdict +from collections.abc import Callable from pathlib import Path -from typing import Any, cast +from typing import Any, cast, Optional + +import config +import generate +from util import ( + error, + fatal, + has_ryaml, + is_problem_directory, + ProgressBar, + read_yaml, + ryaml_filter, + ryaml_get_or_add, + ryaml_replace, + warn, + write_yaml, +) +from validate import AnswerValidator, InputValidator, OutputValidator if has_ryaml: - # TODO #102 The conditional import in util.py isn't picked up properly from ruamel.yaml.comments import CommentedMap, CommentedSeq @@ -73,7 +85,7 @@ def movetree(src: Path, dst: Path) -> None: movetree(src_base, dst_base) -def args_split(args: str): +def args_split(args: str) -> "CommentedSeq": splitted = CommentedSeq(shlex.split(args)) splitted.fa.set_flow_style() return splitted @@ -269,7 +281,7 @@ def move_testcase(name: str, value: Any, new_parent: str) -> None: changed = True def apply_recursively( - operation: Callable[[dict[str, Any], str], bool], data: dict[str, Any], path="" + operation: Callable[[dict[str, Any], str], bool], data: dict[str, Any], path: str = "" ) -> bool: changed = operation(data, path) if "data" in data and data["data"]: diff --git a/bin/util.py b/bin/util.py index 57376599f..50e0a908e 100644 --- a/bin/util.py +++ b/bin/util.py @@ -13,29 +13,27 @@ import tempfile import threading import time -from enum import Enum +import yaml as yamllib from collections.abc import Callable, Mapping, Sequence +from colorama import Fore, Style +from enum import Enum +from io import StringIO from pathlib import Path from typing import ( - cast, Any, + cast, Iterable, Literal, NoReturn, Optional, overload, Protocol, - TextIO, + TYPE_CHECKING, TypeAlias, TypeVar, - TYPE_CHECKING, ) from uuid import UUID -import yaml as yamllib -from colorama import Fore, Style -from io import StringIO - import config try: @@ -108,37 +106,43 @@ def exit1(force: bool = False) -> NoReturn: sys.exit(1) +# we almost always want to print to stderr +def eprint(*args: Any, **kwargs: Any) -> None: + kwargs.setdefault("file", sys.stderr) + print(*args, **kwargs) + + def debug(*msg: Any) -> None: - print(Fore.CYAN, end="", file=sys.stderr) - print("DEBUG:", *msg, end="", file=sys.stderr) - print(Style.RESET_ALL, file=sys.stderr) + eprint(Fore.CYAN, end="") + eprint("DEBUG:", *msg, end="") + eprint(Style.RESET_ALL) def log(msg: Any) -> None: - print(f"{Fore.GREEN}LOG: {msg}{Style.RESET_ALL}", file=sys.stderr) + eprint(f"{Fore.GREEN}LOG: {msg}{Style.RESET_ALL}") def verbose(msg: Any) -> None: if config.args.verbose >= 1: - print(f"{Fore.CYAN}VERBOSE: {msg}{Style.RESET_ALL}", file=sys.stderr) + eprint(f"{Fore.CYAN}VERBOSE: {msg}{Style.RESET_ALL}") def warn(msg: Any) -> None: - print(f"{Fore.YELLOW}WARNING: {msg}{Style.RESET_ALL}", file=sys.stderr) + eprint(f"{Fore.YELLOW}WARNING: {msg}{Style.RESET_ALL}") config.n_warn += 1 def error(msg: Any) -> None: if config.RUNNING_TEST: fatal(msg) - print(f"{Fore.RED}ERROR: {msg}{Style.RESET_ALL}", file=sys.stderr) + eprint(f"{Fore.RED}ERROR: {msg}{Style.RESET_ALL}") config.n_error += 1 def fatal(msg: Any, *, force: Optional[bool] = None) -> NoReturn: if force is None: force = threading.active_count() > 1 - print(f"\n{Fore.RED}FATAL ERROR: {msg}{Style.RESET_ALL}", file=sys.stderr) + eprint(f"\n{Fore.RED}FATAL ERROR: {msg}{Style.RESET_ALL}") exit1(force) @@ -164,44 +168,6 @@ class Named(Protocol): ITEM_TYPE: TypeAlias = str | Path | Named -def message( - msg: Any, - task: Optional[str | Path] = None, - item: Optional[ITEM_TYPE] = None, - *, - color_type: Any = "", -) -> None: - if task is not None: - print(f"{Fore.CYAN}{task}{Style.RESET_ALL}: ", end="", file=sys.stderr) - if item is not None: - print(item, end=" ", file=sys.stderr) - print(f"{color_type}{msg}{Style.RESET_ALL}", file=sys.stderr) - if color_type == MessageType.WARN: - config.n_warn += 1 - if color_type == MessageType.ERROR: - config.n_error += 1 - if color_type == MessageType.FATAL: - exit1() - - -# A simple bar that only holds a task prefix -class PrintBar: - def __init__(self, task: Optional[str | Path] = None): - self.task = task - - def log(self, msg: Any, item: Optional[ITEM_TYPE] = None) -> None: - message(msg, self.task, item, color_type=MessageType.LOG) - - def warn(self, msg: Any, item: Optional[ITEM_TYPE] = None) -> None: - message(msg, self.task, item, color_type=MessageType.WARN) - - def error(self, msg: Any, item: Optional[ITEM_TYPE] = None) -> None: - message(msg, self.task, item, color_type=MessageType.ERROR) - - def fatal(self, msg: Any, item: Optional[ITEM_TYPE] = None) -> None: - message(msg, self.task, item, color_type=MessageType.FATAL) - - # A class that draws a progressbar. # Construct with a constant prefix, the max length of the items to process, and # the number of items to process. @@ -227,12 +193,18 @@ def update_columns(_: Any, __: Any) -> None: signal.signal(signal.SIGWINCH, update_columns) @staticmethod - def item_len(item: ITEM_TYPE) -> int: + def item_text(item: Optional[ITEM_TYPE]) -> str: + if item is None: + return "" if isinstance(item, str): - return len(item) + return item if isinstance(item, Path): - return len(str(item)) - return len(item.name) + return str(item) + return item.name + + @staticmethod + def item_len(item: ITEM_TYPE) -> int: + return len(ProgressBar.item_text(item)) def _is_locked(self) -> bool: return ProgressBar.lock_depth > 0 @@ -246,7 +218,7 @@ def __init__( *, items: Optional[Sequence[ITEM_TYPE]] = None, needs_leading_newline: bool = False, - ): + ) -> None: assert ProgressBar.current_bar is None, ProgressBar.current_bar.prefix ProgressBar.current_bar = self @@ -284,15 +256,10 @@ def __exit__(self, *args: Any) -> None: ProgressBar.lock_depth -= 1 ProgressBar.lock.__exit__(*args) - def _print( - self, - *objects: Any, - sep: str = "", - end: str = "\n", - file: TextIO = sys.stderr, - flush: bool = True, - ) -> None: - print(*objects, sep=sep, end=end, file=file, flush=flush) + def _print(self, *args: Any, **kwargs: Any) -> None: + kwargs.setdefault("sep", "") + kwargs.setdefault("flush", True) + eprint(*args, **kwargs) def total_width(self) -> int: cols = ProgressBar.columns @@ -300,9 +267,7 @@ def total_width(self) -> int: cols -= 1 return cols - def bar_width(self) -> Optional[int]: - if self.item_width is None: - return None + def bar_width(self) -> int: return self.total_width() - len(self.prefix) - 2 - self.item_width def update(self, count: int, max_len: int) -> None: @@ -323,23 +288,25 @@ def clearline(self) -> None: @staticmethod def action( - prefix: str, + prefix: Optional[str], item: Optional[ITEM_TYPE], width: Optional[int] = None, total_width: Optional[int] = None, print_item: bool = True, ) -> str: - if width is not None and total_width is not None and len(prefix) + 2 + width > total_width: - width = total_width - len(prefix) - 2 - item = "" if item is None else (item if isinstance(item, str) else item.name) - if width is not None and len(item) > width: - item = item[:width] + if width is not None and total_width is not None: + if prefix is None and width > total_width: + width = total_width + if prefix is not None and len(prefix) + 2 + width > total_width: + width = total_width - len(prefix) - 2 + text = ProgressBar.item_text(item) + if width is not None and len(text) > width: + text = text[:width] if width is None or width <= 0: width = 0 - if print_item: - return f"{Fore.CYAN}{prefix}{Style.RESET_ALL}: {item:<{width}}" - else: - return f"{Fore.CYAN}{prefix}{Style.RESET_ALL}: {' ' * width}" + prefix = "" if prefix is None else f"{Fore.CYAN}{prefix}{Style.RESET_ALL}: " + suffix = f"{text:<{width}}" if print_item else " " * width + return prefix + suffix def get_prefix(self, print_item: bool = True) -> str: return ProgressBar.action( @@ -431,8 +398,6 @@ def log( print_item: bool = True, ) -> None: with self: - if message is None: - message = "" self.clearline() self.logged = True @@ -472,7 +437,7 @@ def debug( print_item: bool = True, ) -> None: if config.args.verbose: - self.log(message, data, color=color, resume=resume, print_item=print_item) + self.log(message, data, color, resume=resume, print_item=print_item) def warn(self, message: str, data: Optional[str] = None, *, print_item: bool = True) -> None: with self.lock: @@ -598,6 +563,76 @@ def finalize( return self.global_logged and not suppress_newline +# A simple bar that only holds a task prefix +class PrintBar: + def __init__( + self, + prefix: Optional[str | Path] = None, + max_len: Optional[int] = None, + *, + item: Optional[ITEM_TYPE] = None, + ) -> None: + self.prefix = str(prefix) if prefix else None + self.item_width = max_len + 1 if max_len is not None else None + self.item = item + + def start(self, item: Optional[ITEM_TYPE] = None) -> "PrintBar": + bar_copy = copy.copy(self) + bar_copy.item = item + return bar_copy + + def log( + self, + message: str, + data: Optional[str] = None, + color: str = Fore.GREEN, + *, + resume: bool = True, + print_item: bool = True, + ) -> None: + prefix = ProgressBar.action(self.prefix, self.item, self.item_width, None, print_item) + eprint(prefix, color, message, ProgressBar._format_data(data), Style.RESET_ALL, sep="") + + def debug( + self, + message: str, + data: Optional[str] = None, + color: str = Fore.GREEN, + *, + resume: bool = True, + print_item: bool = True, + ) -> None: + if config.args.verbose: + self.log(message, data, color, resume=resume, print_item=print_item) + + def warn(self, message: str, data: Optional[str] = None, *, print_item: bool = True) -> None: + config.n_warn += 1 + self.log(message, data, Fore.YELLOW, print_item=print_item) + + def error( + self, + message: str, + data: Optional[str] = None, + *, + resume: bool = False, + print_item: bool = True, + ) -> None: + config.n_error += 1 + self.log(message, data, Fore.RED, print_item=print_item) + + def fatal( + self, + message: str, + data: Optional[str] = None, + *, + resume: bool = False, + print_item: bool = True, + ) -> None: + config.n_error += 1 + self.log(message, data, Fore.RED, resume=resume, print_item=print_item) + exit1() + + BAR_TYPE = PrintBar | ProgressBar @@ -689,7 +724,7 @@ def parse_yaml(data: str, path: Optional[Path] = None, plain: bool = False) -> A return yaml.safe_load(data) except Exception as e: - print(f"{Fore.YELLOW}{e}{Style.RESET_ALL}", end="", file=sys.stderr) + eprint(f"{Fore.YELLOW}{e}{Style.RESET_ALL}", end="") fatal(f"Failed to parse {path}.") @@ -712,6 +747,14 @@ def read_yaml_settings(path: Path) -> Any: return settings +def normalize_yaml_value(value: Any, t: type[Any]) -> Any: + if isinstance(value, str) and t is Path: + value = Path(value) + if isinstance(value, int) and t is float: + value = float(value) + return value + + if has_ryaml: U = TypeVar("U") @@ -731,7 +774,7 @@ def ryaml_get_or_add( yaml[key] = t() value = yaml[key] assert isinstance(value, t) - return value # type: ignore + return cast(ruamel.yaml.comments.CommentedMap | U, value) # This tries to preserve the correct comments. def ryaml_filter(data: Any, remove: str) -> Any: @@ -777,6 +820,13 @@ def ryaml_replace(data: Any, old_key: str, new_key: str, new_value: Any = None) write_yaml_lock = threading.Lock() +# The @overload definitions are purely here for static typing reasons. +@overload +def write_yaml(data: Any, path: None = None, allow_yamllib: bool = False) -> str: ... +@overload +def write_yaml(data: Any, path: Path, allow_yamllib: bool = False) -> None: ... + + # Writing a yaml file (or return as string) only works when ruamel.yaml is loaded. Check if `has_ryaml` is True before using. def write_yaml( data: Any, path: Optional[Path] = None, allow_yamllib: bool = False @@ -1169,7 +1219,7 @@ def __init__( out: Optional[str], verdict: Optional["Verdict"] = None, pass_id: Optional[int] = None, - ): + ) -> None: self.returncode = returncode self.status = status self.duration = duration @@ -1241,7 +1291,7 @@ def setlimits() -> None: # Subclass Popen to get rusage information. class ResourcePopen(subprocess.Popen[bytes]): - rusage: Any # TODO #102: use stricter type than `Any` + rusage: "Optional[resource.struct_rusage]" # If wait4 is available, store resource usage information. if "wait4" in dir(os): @@ -1318,13 +1368,13 @@ def exec_command( if config.args.verbose >= 2: if "cwd" in kwargs: - print("cd", kwargs["cwd"], "; ", end="", file=sys.stderr) + eprint("cd", kwargs["cwd"], "; ", end="") else: - print("cd", Path.cwd(), "; ", end="", file=sys.stderr) - print(*command, end="", file=sys.stderr) + eprint("cd", Path.cwd(), "; ", end="") + eprint(*command, end="") if "stdin" in kwargs: - print(" < ", kwargs["stdin"].name, end="", file=sys.stderr) - print(file=sys.stderr) + eprint(" < ", kwargs["stdin"].name, end="") + eprint() timeout: Optional[int] = None if "timeout" in kwargs: @@ -1392,7 +1442,7 @@ def maybe_crop(s: str) -> str: err = maybe_crop(stderr.decode("utf-8", "replace")) if stderr is not None else None out = maybe_crop(stdout.decode("utf-8", "replace")) if stdout is not None else None - if hasattr(process, "rusage"): + if hasattr(process, "rusage") and process.rusage: duration = process.rusage.ru_utime + process.rusage.ru_stime # It may happen that the Rusage is low, even though a timeout was raised, i.e. when calling sleep(). # To prevent under-reporting the duration, we take the max with wall time in this case. diff --git a/bin/validate.py b/bin/validate.py index 3e640c604..86bacbd54 100644 --- a/bin/validate.py +++ b/bin/validate.py @@ -1,16 +1,17 @@ import re -from util import * -from enum import Enum from collections.abc import Sequence +from enum import Enum from pathlib import Path -from typing import Final, Optional, TYPE_CHECKING +from typing import Any, Final, Optional, TYPE_CHECKING import config import program +from util import ExecResult, ExecStatus, fatal, ProgressBar, validator_exec_code_map if TYPE_CHECKING: # Prevent circular import: https://stackoverflow.com/a/39757388 import run import testcase + from problem import Problem class Mode(Enum): @@ -21,7 +22,7 @@ class Mode(Enum): INVALID = 3 VALID_OUTPUT = 4 - def __str__(self): + def __str__(self) -> str: return { Mode.INPUT: "input", Mode.ANSWER: "answer", @@ -43,7 +44,7 @@ def _to_number(s: str) -> int | float: return float(s) -def _merge_constraints(constraints_path: Path, constraints: ConstraintsDict): +def _merge_constraints(constraints_path: Path, constraints: ConstraintsDict) -> None: # Merge with previous constraints. if constraints_path.is_file(): for line in constraints_path.read_text().splitlines(): @@ -93,17 +94,17 @@ class Validator(program.Program): program.VIVA, ] - def __repr__(self): + def __repr__(self) -> str: return type(self).__name__ + ": " + str(self.path) def __init__( self, - problem, - path, - subdir, - skip_double_build_warning=False, - check_constraints=False, - ): + problem: "Problem", + path: Path, + subdir: str, + skip_double_build_warning: bool = False, + check_constraints: bool = False, + ) -> None: super().__init__( problem, path, @@ -121,7 +122,12 @@ def __init__( self.tmpdir: Path = self.tmpdir.parent / (self.tmpdir.name + "_check_constraints") self.check_constraints = check_constraints - def _run_helper(self, testcase, constraints, args): + def _run_helper( + self, + testcase: "testcase.Testcase", + constraints: Optional[ConstraintsDict], + args: Optional[Sequence[str | Path]], + ) -> tuple[Path, Optional[Path], Sequence[str | Path]]: """Helper method for the run method in subclasses. Return: cwd: a current working directory for this testcase @@ -156,7 +162,7 @@ def _run_helper(self, testcase, constraints, args): # .ctd, .viva, or otherwise called as: ./validator [arguments] < inputfile. # It may not read/write files. - def _run_format_validator(self, testcase, cwd): + def _run_format_validator(self, testcase: "testcase.Testcase", cwd: Path) -> ExecResult: assert self.language in Validator.FORMAT_VALIDATOR_LANGUAGES assert self.run_command is not None, "Validator should be built before running it" @@ -167,7 +173,7 @@ def _run_format_validator(self, testcase, cwd): else: assert False # now also catches OutputValidator - def format_exec_code_map(returncode): + def format_exec_code_map(returncode: int) -> ExecStatus: if returncode == 0: return ExecStatus.ACCEPTED if returncode == 1: @@ -187,14 +193,15 @@ def format_exec_code_map(returncode): if self.language == program.VIVA: # Called as `viva validator.viva testcase.in`. - result = self._exec_command( - self.run_command + [main_path.absolute()], + return self._exec_command( + [*self.run_command, main_path.absolute()], exec_code_map=format_exec_code_map, cwd=cwd, ) - return result - def _exec_helper(self, *args, cwd, **kwargs): + assert False + + def _exec_helper(self, *args: Any, cwd: Path, **kwargs: Any) -> ExecResult: ret = self._exec_command(*args, **kwargs) judgemessage = cwd / "judgemessage.txt" judgeerror = cwd / "judgeerror.txt" @@ -215,7 +222,7 @@ def run( testcase: "testcase.Testcase", mode: Mode, constraints: Optional[ConstraintsDict] = None, - args: Optional[list[str]] = None, + args: Optional[Sequence[str | Path]] = None, ) -> ExecResult: raise Exception("Abstract method") @@ -235,7 +242,7 @@ class InputValidator(Validator): args_key: Final[str] = "input_validator_args" - def __init__(self, problem, path, **kwargs): + def __init__(self, problem: "Problem", path: Path, **kwargs: Any) -> None: super().__init__(problem, path, InputValidator.source_dir, **kwargs) def run( @@ -243,7 +250,7 @@ def run( testcase: "testcase.Testcase", mode: Mode = Mode.INPUT, constraints: Optional[ConstraintsDict] = None, - args: Optional[list[str]] = None, + args: Optional[Sequence[str | Path]] = None, ) -> ExecResult: """ Arguments @@ -266,17 +273,16 @@ def run( if self.language in Validator.FORMAT_VALIDATOR_LANGUAGES: return Validator._run_format_validator(self, testcase, cwd) - invocation = self.run_command.copy() - with testcase.in_path.open("rb") as in_file: ret = self._exec_helper( - invocation + arglist, + [*self.run_command, *arglist], exec_code_map=validator_exec_code_map, stdin=in_file, cwd=cwd, ) if constraints is not None: + assert constraints_path is not None _merge_constraints(constraints_path, constraints) return ret @@ -298,7 +304,7 @@ class AnswerValidator(Validator): # use output_validator_args as well args_key: Final[str] = "output_validator_args" - def __init__(self, problem, path, **kwargs): + def __init__(self, problem: "Problem", path: Path, **kwargs: Any) -> None: super().__init__(problem, path, AnswerValidator.source_dir, **kwargs) def run( @@ -306,7 +312,7 @@ def run( testcase: "testcase.Testcase", mode: Mode = Mode.ANSWER, constraints: Optional[ConstraintsDict] = None, - args: Optional[list[str]] = None, + args: Optional[Sequence[str | Path]] = None, ) -> ExecResult: assert self.run_command is not None, "Validator should be built before running it" @@ -322,17 +328,16 @@ def run( if self.language in Validator.FORMAT_VALIDATOR_LANGUAGES: return Validator._run_format_validator(self, testcase, cwd) - invocation = self.run_command + [testcase.in_path.absolute()] - with testcase.ans_path.open("rb") as ans_file: ret = self._exec_helper( - invocation + arglist, + [*self.run_command, testcase.in_path.absolute(), *arglist], exec_code_map=validator_exec_code_map, stdin=ans_file, cwd=cwd, ) if constraints is not None: + assert constraints_path is not None _merge_constraints(constraints_path, constraints) return ret @@ -351,7 +356,7 @@ class OutputValidator(Validator): args_key: Final[str] = "output_validator_args" - def __init__(self, problem, path, **kwargs): + def __init__(self, problem: "Problem", path: Path, **kwargs: Any) -> None: super().__init__(problem, path, OutputValidator.source_dir, **kwargs) def run( @@ -359,7 +364,7 @@ def run( testcase: "testcase.Testcase", mode: "Mode | run.Run", constraints: Optional[ConstraintsDict] = None, - args: Optional[list[str]] = None, + args: Optional[Sequence[str | Path]] = None, ) -> ExecResult: """ Run this validator on the given testcase. @@ -395,7 +400,6 @@ def run( assert testcase.out_path is not None path = testcase.out_path.absolute() else: - assert mode != Mode.INPUT # mode is actually a Run path = mode.out_path in_path = mode.in_path # relevant for multipass @@ -406,17 +410,17 @@ def run( cwd, constraints_path, arglist = self._run_helper(testcase, constraints, args) if not isinstance(mode, Mode): cwd = mode.feedbackdir - invocation = self.run_command + [in_path, ans_path, cwd] with path.open("rb") as file: ret = self._exec_helper( - invocation + arglist, + [*self.run_command, in_path, ans_path, cwd, *arglist], exec_code_map=validator_exec_code_map, stdin=file, cwd=cwd, ) if constraints is not None: + assert constraints_path is not None _merge_constraints(constraints_path, constraints) return ret @@ -430,7 +434,7 @@ def run( INVALID_BYTES: Final[re.Pattern[bytes]] = re.compile(b"[^\n\x20-\x7e]") -def _has_invalid_byte(bytes, *, other_whitespaces=False): +def _has_invalid_byte(bytes: bytes, *, other_whitespaces: bool = False) -> bool: if other_whitespaces: return INVALID_BYTES_WITH_OTHER.search(bytes) is not None else: @@ -439,14 +443,16 @@ def _has_invalid_byte(bytes, *, other_whitespaces=False): # assumes that the only possible whitespaces are space and newline # allows \n\n -def _has_consecutive_whitespaces(bytes): +def _has_consecutive_whitespaces(bytes: bytes) -> bool: for bad in [b" \n", b" ", b"\n "]: if bytes.find(bad) >= 0: return True return False -def sanity_check(problem, path, bar, strict_whitespace=True): +def sanity_check( + problem: "Problem", path: Path, bar: ProgressBar, strict_whitespace: bool = True +) -> None: """ Does some generic checks on input, answer, or output files of a testcase, including diff --git a/bin/validator_tests.py b/bin/validator_tests.py index c2a724dac..e1d6b8b56 100644 --- a/bin/validator_tests.py +++ b/bin/validator_tests.py @@ -1,5 +1,6 @@ from collections.abc import Callable, Sequence from typing import Final, Optional, TypeVar + from validate import AnswerValidator, AnyValidator, InputValidator, OutputValidator diff --git a/bin/verdicts.py b/bin/verdicts.py index 4a2ecd3b9..8833742da 100644 --- a/bin/verdicts.py +++ b/bin/verdicts.py @@ -2,18 +2,18 @@ import shutil import sys import threading +from collections.abc import Sequence +from colorama import Fore, Style from enum import Enum from pathlib import Path -from typing import Any, Literal, Optional, Sequence, TextIO, TYPE_CHECKING - -from colorama import Fore, Style +from typing import Any, Literal, Optional, TYPE_CHECKING import config import testcase -from util import ITEM_TYPE, ProgressBar +from util import eprint, ITEM_TYPE, ProgressBar if TYPE_CHECKING: - pass + import run class Verdict(Enum): @@ -171,7 +171,7 @@ def __init__( test_cases_list: Sequence[testcase.Testcase], timeout: int, run_until: RunUntil = RunUntil.FIRST_ERROR, - ): + ) -> None: test_cases: set[str] = set(t.name for t in test_cases_list) test_groups: set[str] = set(str(path) for tc in test_cases for path in Path(tc).parents) @@ -202,7 +202,7 @@ def __init__( def __enter__(self) -> None: self.lock.__enter__() - def __exit__(self, *args) -> None: + def __exit__(self, *args: Any) -> None: self.lock.__exit__(*args) def is_test_group(self, node: str) -> bool: @@ -370,7 +370,7 @@ def run_is_needed(self, test_case: str) -> bool: class VerdictTable: class Group: - def __init__(self, length: int, text: str): + def __init__(self, length: int, text: str) -> None: self.length = length self.text = text @@ -379,12 +379,12 @@ def tuple(self) -> tuple[int, str]: def __init__( self, - submissions, + submissions: Sequence["run.Submission"], test_cases: Sequence[testcase.Testcase], width: int = ProgressBar.columns, height: int = shutil.get_terminal_size().lines, max_name_width: int = 50, - ): + ) -> None: self.submissions: list[str] = [s.name for s in submissions] self.test_cases: list[str] = [t.name for t in test_cases] self.samples: set[str] = set(t.name for t in test_cases if t.root == "sample") @@ -431,17 +431,15 @@ def __init__( # dont print table if it fills too much of the screen self.print_without_force = len(lines) * len(self.submissions) + 5 < height if not self.print_without_force: - print( - f"{Fore.YELLOW}WARNING: Overview too large for terminal, skipping live updates{Style.RESET_ALL}", - file=sys.stderr, + eprint( + f"{Fore.YELLOW}WARNING: Overview too large for terminal, skipping live updates{Style.RESET_ALL}" ) - print( + eprint( *lines, f"[times {len(self.submissions)}...]", Style.RESET_ALL, sep="\n", end="\n", - file=sys.stderr, ) def next_submission(self, verdicts: Verdicts) -> None: @@ -464,11 +462,10 @@ def _clear(self, *, force: bool = True) -> None: for printed in self.last_printed ) - print( - f"\033[{lines - 1}A\r\033[0J", + eprint( + f"\033[{lines - 1}A\r", end="", flush=True, - file=sys.stderr, ) self.last_printed = [] @@ -497,10 +494,10 @@ def _print_tree( if printed_lengths is None: printed_lengths = [] if force or self.print_without_force: - printed_text = ["\n" * new_lines] + printed_text = ["\n\033[2K" * new_lines] printed_lengths += [0] * new_lines - max_depth = None + max_depth = config.args.depth show_root = False stack = [(".", "", "", True)] @@ -516,7 +513,7 @@ def _print_tree( ) verdict_len = 1 if verdict in [None, False] else len(str(verdict)) printed_text.append( - f"{Style.DIM}{indent}{prefix}{Style.RESET_ALL}{name}: {verdict_str}\n" + f"{Style.DIM}{indent}{prefix}{Style.RESET_ALL}{name}: {verdict_str}\n\033[K" ) printed_lengths.append(len(indent) + len(prefix) + len(name) + 2 + verdict_len) if max_depth is not None and len(indent) >= 2 * max_depth: @@ -556,7 +553,7 @@ def _print_tree( length, group = grouped_value.tuple() if width >= 0 and printed + 1 + length > width: printed_text.append( - f"\n{Style.DIM}{indent}{pipe} {pipe2} {Style.RESET_ALL}" + f"\n\033[K{Style.DIM}{indent}{pipe} {pipe2} {Style.RESET_ALL}" ) printed_lengths.append(printed) printed = pref_len @@ -567,7 +564,7 @@ def _print_tree( space = " " printed_lengths.append(printed) - printed_text.append("\n") + printed_text.append("\n\033[K") self._clear(force=True) @@ -576,16 +573,15 @@ def _print_tree( (w + ProgressBar.columns - 1) // ProgressBar.columns for w in printed_lengths ) if self.checked_height < height + 5: - print( + eprint( f"\033[0J{Fore.YELLOW}WARNING: Overview too large for terminal, skipping live updates{Style.RESET_ALL}\n", - file=sys.stderr, ) self.print_without_force = False self.checked_height = True if not force and not self.print_without_force: return - print("".join(printed_text), end="", flush=True, file=sys.stderr) + eprint(*printed_text, "\033[0J", sep="", end="", flush=True) self.last_printed = printed_lengths def _print_table( @@ -598,7 +594,7 @@ def _print_table( if printed_lengths is None: printed_lengths = [] if force or self.print_without_force: - printed_text = ["\n" * new_lines] + printed_text = ["\n\033[2K" * new_lines] printed_lengths += [0] * new_lines for s, submission in enumerate(self.submissions): # pad/truncate submission names to not break table layout @@ -620,7 +616,7 @@ def _print_table( for verdict_value in verdicts: length, tmp = verdict_value.tuple() if self.width >= 0 and printed + 1 + length > self.width: - printed_text.append(f"\n{str():{self.name_width + 1}}") + printed_text.append(f"\n\033[K{str():{self.name_width + 1}}") printed_lengths.append(printed) printed = self.name_width + 1 @@ -628,9 +624,9 @@ def _print_table( printed += length + 1 printed_lengths.append(printed) - printed_text.append("\n") + printed_text.append("\n\033[K") self._clear(force=True) - print("".join(printed_text), end="", flush=True, file=sys.stderr) + eprint(*printed_text, "\033[0J", sep="", end="", flush=True) self.last_printed = printed_lengths def ProgressBar( @@ -662,7 +658,7 @@ def __init__( *, items: Optional[Sequence[ITEM_TYPE]], needs_leading_newline: bool, - ): + ) -> None: super().__init__( prefix, max_len, @@ -689,20 +685,14 @@ def __exit__(self, *args: Any) -> None: self.table.print(force=False, printed_lengths=[ProgressBar.columns]) if isinstance(sys.stderr, io.TextIOWrapper): sys.stderr.reconfigure(line_buffering=self.reset_line_buffering) - print(end="", flush=True, file=sys.stderr) + eprint(end="", flush=True) super().__exit__(*args) - def _print( - self, - *objects: Any, - sep: str = "", - end: str = "\n", - file: TextIO = sys.stderr, - flush: bool = True, - ) -> None: + def _print(self, *args: Any, **kwargs: Any) -> None: assert self._is_locked() - # drop all flushes... - print(*objects, sep=sep, end=end, file=file, flush=False) + kwargs.setdefault("sep", "") + kwargs["flush"] = False # drop all flushes... + eprint(*args, **kwargs) def start(self, item: ITEM_TYPE = "") -> "TableProgressBar": from run import Run diff --git a/bin/visualize.py b/bin/visualize.py index 627512bda..6275c1830 100644 --- a/bin/visualize.py +++ b/bin/visualize.py @@ -1,9 +1,9 @@ +from collections.abc import Sequence from pathlib import Path from typing import Any, Final, Optional, TYPE_CHECKING import program - -from util import * +from util import ExecResult if TYPE_CHECKING: # Prevent circular import: https://stackoverflow.com/a/39757388 from problem import Problem @@ -23,7 +23,7 @@ class InputVisualizer(program.Program): args_key: Final[str] = "input_visualizer_args" - def __init__(self, problem: "Problem", path: Path, **kwargs: Any): + def __init__(self, problem: "Problem", path: Path, **kwargs: Any) -> None: super().__init__( problem, path, @@ -35,12 +35,12 @@ def __init__(self, problem: "Problem", path: Path, **kwargs: Any): # Run the visualizer (should create a testcase. file). def run( - self, in_path: Path, ans_path: Path, cwd: Path, args: Optional[list[str]] = None + self, in_path: Path, ans_path: Path, cwd: Path, args: Optional[Sequence[str | Path]] = None ) -> ExecResult: assert self.run_command is not None, "Input Visualizer should be built before running it" return self._exec_command( - self.run_command + [in_path, ans_path] + (args or []), + [*self.run_command, in_path, ans_path, *(args or [])], cwd=cwd, ) @@ -59,7 +59,7 @@ class OutputVisualizer(program.Program): args_key: Final[str] = "output_visualizer_args" - def __init__(self, problem: "Problem", path: Path, **kwargs: Any): + def __init__(self, problem: "Problem", path: Path, **kwargs: Any) -> None: super().__init__( problem, path, @@ -77,14 +77,14 @@ def run( ans_path: Path, out_path: Optional[Path], cwd: Path, - args: Optional[list[str]] = None, + args: Optional[Sequence[str | Path]] = None, ) -> ExecResult: assert self.run_command is not None, "Output Visualizer should be built before running it" assert (out_path is None) == self.problem.interactive, ( "out_path should be None if and only if problem is interactive" ) - command = self.run_command + [in_path, ans_path, cwd] + (args or []) + command = [*self.run_command, in_path, ans_path, cwd, *(args or [])] if out_path is not None: with out_path.open("rb") as out_file: return self._exec_command(command, stdin=out_file, cwd=cwd) diff --git a/doc/workflow.md b/doc/workflow.md new file mode 100644 index 000000000..2892fed20 --- /dev/null +++ b/doc/workflow.md @@ -0,0 +1,225 @@ +# Workflow +This documents aims to show the typical workflow of preparing a problem with BAPCtools and might be useful as a guide. +We start with the creation of a new problem and end after uploading it to DOMjudge. +Along the way, all commands that are used for various stages of problem preparaion are explained. + +> [!CAUTION] +> Do not use BAPCtools on probem packages from untrusted sources. +> Programs are **not** run inside a sandbox. +> Malicious submissions, validators, visualizers, and generators can harm your system. + +## Topics +- [Problem Directory](#problem-directory) + - [Required Files](#required-files) + - [Optional Files](#optional-files) + - [`bt new_problem`](#bt-new_problem) +- [Overview](#overview) + - [`bt stats`](#bt-stats) + - [`bt run -oa[a]`](#bt-run--o--aa-submissions-data) +- [Problem Preparation](#problem-preparation) + - [Submissions](#submissions) + - [Test cases/Generators](#test-cases/generators) + - [Input and Answer Validators](#input-and-answer-validators) + - [Output Validators](#output-validators) + - [Statement and Solution](#statement-and-solution) +- [Finalize](#finalize) +- [Upload](#upload) + + +## Problem Directory +A problem directory is specified by the existence of a `problem.yaml`. +However, to set up a proper problem we need some more sub directories and files. + +#### Required Files +```ini +Problem +├─╴answer_validators/ +│ └─╴... +├─╴data/ +│ ├─╴sample/ +│ └─╴secret/ +├─╴input_validators/ +│ └─╴... +├─╴output_validator/ ; for custom output checking +│ └─╴... +├─╴solution/ +│ └─╴solution..tex +├─╴statement/ +│ └─╴problem..tex +├─╴submissions/ +│ ├─╴accepted/ +│ ├─╴run_time_error/ +│ ├─╴time_limit_exceeded/ +│ └─╴wrong_answer/ +└─╴problem.yaml +``` +> [!IMPORTANT] +> There can be many input/answer validator*s* but only one output validator. +> Therefore, its the only of those directories which does not end with a plural s. +#### Optional Files +```ini +Problem +├─╴data/ +│ ├─╴invalid_input/ +│ ├─╴invalid_answer/ +│ ├─╴invalid_output/ +│ └─╴valid_output/ +┆ +├─╴generators/ +│ ├─╴... +│ └─╴generators.yaml +├─╴input_visualizer/ +│ └─╴... +└─╴output_visualizer/ + └─╴... +``` + +#### `bt new_problem` + +This command will generate a new problem with the right structure. +The command will also generate some example files and write a `problem.yaml` with sensible defaults. +The command will request some information from you: + +- **problem name (en):** the problem name, in English +- **dirname:** the name of the subdirectory that gets created (must have only lowercase letters in [a-z]) +- **author:** your name +- **validation type:** + - **default:** compare output per token (ignoring case and whitespace changes) + - **float:** same as default, but compare numbers with an epsilon (default: 10-6) + - **custom:** your own output validator (has a custom output validator) + - **interactive:** an interactive problem (has a custom output validator) + - **multi-pass:** a multi-pass problem (has a custom output validator) + - **interactive multi-pass:** an interactive multi-pass problem (has a custom output validator) +- **source:** typically, the contest name (optional) +- **source url:** typically, a link to the contest (optional) +- **license:** the license, we encourage to make problems public (cc by-sa) +- **rights owner:** owner of the copyright (if this is not provided, the author is the rights owner) + +> [!TIP] +> For more information regarding these options and their meaning, you can also look at the [problem specification](https://icpc.io/problem-package-format/spec/2025-09.html#problem-metadata). + +## Overview +For any problem and any stage of preparation, it is useful to get an overview of the current state of the problem. +BAPCtools offers two commands to offer such an overview. + +#### `bt stats` +This shows a summary of files and programs that have been added to the problem. +The output should look similiar to this: +```ini +problem time yaml tex sol val: I A O sample secret bad good AC WA TLE subs c(++) py java kt comment +A 1.0 Y 0 0 N N 0 0 0 0 0 0 0 0 0 0 0 0 +------------------------------------------------------------------------------------------------------------------- +TOTAL 1.0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +``` +Most of the columns should be self explanatory, but here is a description of what is displayed: +- **problem:** the problem label followed by the problem directory name +- **time:** the time limit in seconds +- **yaml:** `Y` if `problem.yaml` exists (should always be true) +- **tex:** the number of (LaTeX) problem statement languages +- **sol:** the number of (LaTeX) solution slide languages +- **val I:** `Y` if at least one input validator was found +- **val A:** `Y` if at least one answer validator was found (note that interactive and multi-pass problems do not need such a validator) +- **val O:** `Y` if the output validator was found (note that this must exist if the problem is interactive and/or multi-pass) +- **sample:** the number of sample test cases (BAPCtools encourages to give at least two examples) +- **secret:** the number of secret test cases (BAPCtools encourages to use 30-100 test cases) +- **bad:** the number of invalid test cases (those test cases are intentionally wrong to check that the validators correctly rejects them) +- **AC, WA, TLE:** the number of submissions in the corresponding `accepted`, `time_limit_exceeded`, and `wrong_answer` directories +- **subs:** The total number of submissions (files) in the `submissions/` directory +- **c(++), py, java, kt:** the number of *accpeted* submissions in the corresponding language +- **comment:** the content of the `comment` entry in `problem.yaml` + +#### `bt run -o -a[a] [submissions/...] [data/...]` +This command runs submission and presents their verdict on the testcases. +The output should look similar to this: +```ini +accepted/solution.py: aaaAAAAAAA AAAAAAA +wrong_answer/wrong.py: aaaAAWAAAW WAAAAAA +time_limit_exceeded/brute.cpp: aaaAAAAATT TT----- +run_time_error/bug.java: aaaAARA--- ------- +``` +Each row represents a submission, each column represents a test case. +To make the table easier to read, the test cases are grouped in multiples of 10 and samples are marked with a lowercase letter. + +The entries correspond to the verdict that a submission got on a test case: +- **A:** accepted +- **W:** wrong answer +- **T:** time limit exceeded +- **R:** run time error +- **-:** skipped because of lazy judging + +> [!NOTE] +> Here is a short explenation for the given command line parameters do: +> - **-o:** enable the overview table (if possible, printed with live updates) +> - **-a:** disable lazy judging for WA/RTE submissions +> - **-aa:** completely disable lazy judging +> - **[submissions/...]:** a list of directories/submissions to run +> - **[data/...]:** a list of directories/test cases to use + +## Problem Preparation +Every problem needs the following things: +- [Submissions](#submissions) +- [Test cases/Generators](#test-cases/generators) +- [Input and Answer Validators](#input-and-answer-validators) +- [Output Validators](#output-validators) +- [Statement and Solution](#statement-and-solution) + +> [!TIP] +> The order in which you add these things is up to you. +> However, this guide will use the mentioned order. + +### Submissions +--- +Strictly speaking, only one accepted submission is really required. +However, multiple accepted submission in various languages help determining a good time limit. +Additionally, adding WA submissions and TLE submissions help improving the test cases and the time limit. + +The following commands can be used to run a submission: + +#### `bt test submissions/... [data/...|-i]` +This command will run the selected submission on a given input. +As input to the submission, you can either specify a test case/directory in `data/`, +or you can run the program in interactive mode with `-i`, in which case the console input is passed to the submission. +After running the submission, its output and running time is printed. + +> [!IMPORTANT] +> Note that the output is only printed, it is **not** validated! + +#### `bt run [-G] [submissions/...] [data/...]` +This command will run the selected submission on a given test case. +This will also validate the output of the submission but will not display the output. + +> [!TIP] +> By default `bt run` will try to keep the `data/` directory up-to date, see Test cases/Generators](#testcasesgenerators) for more information. +> If you just want to run the submission you can add `-G` (short for `--no-generate`) to disable this behaviour. + +### Test cases/Generators +--- + - [output validator] + - `bt generate` + +### Input and Answer Validators +--- + - `bt validate` + +### Output Validators +--- + +### Statement and Solution +--- + - `bt pdf` + - `bt solutions` + +## Finalize +--- + - `bt time_limit` + - `bt fuzz` + - `bt generate --reorder` + - `bt constraints` + - `bt validate` + - `bt stats --all` + +## Upload +--- + - `bt zip` + - `bt samplezip` + - `bt export` diff --git a/pyproject.toml b/pyproject.toml index d3abaa40d..08f1bd9e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,12 +3,15 @@ line-length = 100 # Assume Python 3.10 target-version = "py310" +# Source +src = ["bin"] -[tool.ruff.lint] -# Ignore star-imports -# TODO #102: This is definitely fixable, but for now, postponing the clean-up of all imports -ignore = ["F403", "F405"] - +[tool.ruff.lint.isort] +# organize imports in two blocks, non local and local +section-order = ["standard-library", "first-party"] +default-section = "standard-library" +# X,y,Z case insensitive in from ? import X, y, Z +order-by-type = false [tool.pyright] include = ["bin"] diff --git a/support/default_output_validator.cpp b/support/default_output_validator.cpp index 89c12f033..1467878e1 100644 --- a/support/default_output_validator.cpp +++ b/support/default_output_validator.cpp @@ -241,7 +241,7 @@ diff check(const std::filesystem::path& ans_path) { bool equal = false; if (float_absolute_tolerance >= 0) { long double abs = std::abs(given-expected); - diff += "Absolute difference: " + std::to_string(abs); + diff += "Absolute error: " + std::to_string(abs); if (abs <= float_absolute_tolerance) { equal = true; } @@ -249,7 +249,7 @@ diff check(const std::filesystem::path& ans_path) { if (float_relative_tolerance >= 0) { long double rel = std::abs((given-expected)/expected); if (diff != "") diff += ", "; - diff += "Relative difference: " + std::to_string(rel); + diff += "Relative error: " + std::to_string(rel); if (rel <= float_relative_tolerance) { equal = true; } diff --git a/test/problems/alternativeencryption/problem.yaml b/test/problems/alternativeencryption/problem.yaml index 6d535fb82..2f1a35dc8 100644 --- a/test/problems/alternativeencryption/problem.yaml +++ b/test/problems/alternativeencryption/problem.yaml @@ -10,3 +10,6 @@ source: uuid: 8ee7605a-95a3-86c2-3995-0a10d365de46 license: cc by-sa rights_owner: author + +limits: + time_limit: 2 diff --git a/test/test_default_output_validator.py b/test/test_default_output_validator.py index a24d47f76..a904ad8c8 100644 --- a/test/test_default_output_validator.py +++ b/test/test_default_output_validator.py @@ -17,7 +17,7 @@ config.args.verbose = 2 config.args.error = True -config.set_default_args() +config.args.mark_set("verbose", "error") # return list of (flags, ans, out, expected result) diff --git a/test/test_generators_yaml.py b/test/test_generators_yaml.py index 480742a31..6b7be3a62 100644 --- a/test/test_generators_yaml.py +++ b/test/test_generators_yaml.py @@ -7,7 +7,6 @@ import config config.RUNNING_TEST = True -config.set_default_args() class MockSettings: diff --git a/test/test_problem_yaml.py b/test/test_problem_yaml.py index f7716e790..8107d2ea4 100644 --- a/test/test_problem_yaml.py +++ b/test/test_problem_yaml.py @@ -11,7 +11,7 @@ config.args.verbose = 2 config.args.error = True -config.set_default_args() +config.args.mark_set("verbose", "error") # return list of {yaml: {...}, ...} documents diff --git a/test/test_problems.py b/test/test_problems.py index 1a3723847..7c0e12cd3 100644 --- a/test/test_problems.py +++ b/test/test_problems.py @@ -22,14 +22,14 @@ "multipass", "constants", "alternativeencryption", -] + ["hellounix" if not util.is_mac() and not util.is_windows() else []] +] +if not util.is_mac() and not util.is_windows(): + PROBLEMS += ["hellounix"] RUN_DIR = Path.cwd().absolute() -@pytest.fixture(scope="class", params=PROBLEMS) -def setup_problem(request): - problemname = request.param +def _setup_problem(problemname): problem_dir = RUN_DIR / "test/problems" / problemname os.chdir(problem_dir) yield @@ -37,6 +37,11 @@ def setup_problem(request): os.chdir(RUN_DIR) +@pytest.fixture(scope="class", params=PROBLEMS) +def setup_problem(request): + yield from _setup_problem(request.param) + + @pytest.mark.usefixtures("setup_problem") class TestProblem: def test_problem(self): @@ -45,14 +50,7 @@ def test_problem(self): @pytest.fixture(scope="class") def setup_alternativeencryption_problem(request): - problem_dir = RUN_DIR / "test/problems/alternativeencryption" - os.chdir(problem_dir) - try: - tools.test(["tmp", "--clean"]) - yield - finally: - tools.test(["tmp", "--clean"]) - os.chdir(RUN_DIR) + yield from _setup_problem("alternativeencryption") @pytest.mark.usefixtures("setup_alternativeencryption_problem") @@ -67,14 +65,7 @@ def test_bad_check_testing_tool(self): @pytest.fixture(scope="class") def setup_constants_problem(request): - problem_dir = RUN_DIR / "test/problems/constants" - os.chdir(problem_dir) - try: - tools.test(["tmp", "--clean"]) - yield - finally: - tools.test(["tmp", "--clean"]) - os.chdir(RUN_DIR) + yield from _setup_problem("constants") @pytest.mark.usefixtures("setup_constants_problem") @@ -101,14 +92,7 @@ def test_zip(self): @pytest.fixture(scope="class") def setup_identity_problem(request): - problem_dir = RUN_DIR / "test/problems/identity" - os.chdir(problem_dir) - try: - tools.test(["tmp", "--clean"]) - yield - finally: - tools.test(["tmp", "--clean"]) - os.chdir(RUN_DIR) + yield from _setup_problem("identity") @pytest.mark.usefixtures("setup_identity_problem")