From 12d7f6c32200ca929cd8e1bfa825c1621b97d726 Mon Sep 17 00:00:00 2001 From: Daylin Morgan Date: Mon, 16 Oct 2023 14:53:01 -0500 Subject: [PATCH] feat: add preliminary support for pep723 --- examples/pep723.py | 16 + scripts/vendor-tomli.py | 87 ++++ src/viv/viv.py | 858 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 938 insertions(+), 23 deletions(-) create mode 100755 examples/pep723.py create mode 100644 scripts/vendor-tomli.py diff --git a/examples/pep723.py b/examples/pep723.py new file mode 100755 index 0000000..4eb67f4 --- /dev/null +++ b/examples/pep723.py @@ -0,0 +1,16 @@ +#!/usr/bin/env -S viv run -s +# /// pyproject +# [run] +# requires-python = ">=3.11" +# dependencies = [ +# "requests<3", +# "rich", +# ] +# /// + +import requests +from rich import print + +resp = requests.get("https://peps.python.org/api/peps.json") +data = resp.json() +print([(k, v["title"]) for k, v in data.items()][:10]) diff --git a/scripts/vendor-tomli.py b/scripts/vendor-tomli.py new file mode 100644 index 0000000..8318fb6 --- /dev/null +++ b/scripts/vendor-tomli.py @@ -0,0 +1,87 @@ +import re +from pathlib import Path + +FILES = ( + ("types", [[7, 11]]), + ("re", [[14, 107]]), + ("parser", [[20, 691]]), +) + +TOMLI_DELIM = ("##### START VENDORED TOMLI #####", "##### END VENDORED TOMLI #####") + +TOMLI_PREFACE = """ +# MODIFIED FROM https://github.com/hukkin/tomli +# see below for original license +# SPDX-License-Identifier: MIT +# SPDX-FileCopyrightText: 2021 Taneli Hukkinen +# Licensed to PSF under a Contributor Agreement. +""" + +VENDORED_IMPORTS = """ +import string # noqa +from collections.abc import Iterable # noqa +from functools import lru_cache # noqa +from datetime import date, datetime, time, timedelta, timezone, tzinfo # noqa +from types import MappingProxyType # noqa +from typing import IO, Any, Callable, NamedTuple # noqa +""" + +# REMOVE FOR ACTUAL VENDORED VERSION +tomli_text = VENDORED_IMPORTS +for f, slices in FILES: + text = Path(f"./tomli/src/tomli/_{f}.py").read_text() + for indices in slices: + tomli_text = "\n".join( + ( + tomli_text, + # black can add back spaces if it wants + *[ + line + for line in text.splitlines()[slice(*indices)] + if line.strip("\r\n") + ], + ) + ) + +IDENT_PATTERN = r"^(?P[A-Z_]*) =" +FUNC_PATTERN = r"^def (?P[a-zA-Z_]+)\(" + +idents = re.findall(IDENT_PATTERN, tomli_text, re.MULTILINE) +funcs = re.findall(FUNC_PATTERN, tomli_text, re.MULTILINE) + + +# TODO: USE ONE LOOP? +for pat in idents + funcs: + tomli_text = re.sub(f"(? + (?:\.[0-9](?:_?[0-9])*)? # optional fractional part + (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part +) +""", + flags=re.VERBOSE, +) +__tomli__RE_LOCALTIME = re.compile(__tomli___TIME_RE_STR) +__tomli__RE_DATETIME = re.compile( + rf""" +([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 +(?: + [Tt ] + {__tomli___TIME_RE_STR} + (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset +)? +""", + flags=re.VERBOSE, +) + + +def __tomli__match_to_datetime(match: re.Match) -> datetime | date: + """Convert a `__tomli__RE_DATETIME` match to `datetime.datetime` or `datetime.date`. + Raises ValueError if the match does not correspond to a valid date + or datetime. + """ + ( + year_str, + month_str, + day_str, + hour_str, + minute_str, + sec_str, + micros_str, + zulu_time, + offset_sign_str, + offset_hour_str, + offset_minute_str, + ) = match.groups() + year, month, day = int(year_str), int(month_str), int(day_str) + if hour_str is None: + return date(year, month, day) + hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + if offset_sign_str: + tz: tzinfo | None = __tomli__cached_tz( + offset_hour_str, offset_minute_str, offset_sign_str + ) + elif zulu_time: + tz = timezone.utc + else: # local date-time + tz = None + return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) + + +@lru_cache(maxsize=None) +def __tomli__cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: + sign = 1 if sign_str == "+" else -1 + return timezone( + timedelta( + hours=sign * int(hour_str), + minutes=sign * int(minute_str), + ) + ) + + +def __tomli__match_to_localtime(match: re.Match) -> time: + hour_str, minute_str, sec_str, micros_str = match.groups() + micros = int(micros_str.ljust(6, "0")) if micros_str else 0 + return time(int(hour_str), int(minute_str), int(sec_str), micros) + + +def __tomli__match_to_number(match: re.Match, parse_float: ParseFloat) -> Any: + if match.group("floatpart"): + return parse_float(match.group()) + return int(match.group(), 0) + + +__tomli__ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) +# Neither of these sets include quotation mark or backslash. They are +# currently handled as separate cases in the parser functions. +__tomli__ILLEGAL_BASIC_STR_CHARS = __tomli__ASCII_CTRL - frozenset("\t") +__tomli__ILLEGAL_MULTILINE_BASIC_STR_CHARS = __tomli__ASCII_CTRL - frozenset("\t\n") +__tomli__ILLEGAL_LITERAL_STR_CHARS = __tomli__ILLEGAL_BASIC_STR_CHARS +__tomli__ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ( + __tomli__ILLEGAL_MULTILINE_BASIC_STR_CHARS +) +__tomli__ILLEGAL_COMMENT_CHARS = __tomli__ILLEGAL_BASIC_STR_CHARS +__tomli__TOML_WS = frozenset(" \t") +__tomli__TOML_WS_AND_NEWLINE = __tomli__TOML_WS | frozenset("\n") +__tomli__BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") +__tomli__KEY_INITIAL_CHARS = __tomli__BARE_KEY_CHARS | frozenset("\"'") +__tomli__HEXDIGIT_CHARS = frozenset(string.hexdigits) +__tomli__BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( + { + "\\b": "\u0008", # backspace + "\\t": "\u0009", # tab + "\\n": "\u000A", # linefeed + "\\f": "\u000C", # form feed + "\\r": "\u000D", # carriage return + '\\"': "\u0022", # quote + "\\\\": "\u005C", # backslash + } +) + + +class TOMLDecodeError(ValueError): + """An error raised if a document is not valid TOML.""" + + +def __tomli__load( + __fp: IO[bytes], *, parse_float: ParseFloat = float +) -> dict[str, Any]: + """Parse TOML from a binary file object.""" + b = __fp.read() + try: + s = b.decode() + except AttributeError: + raise TypeError( + "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" + ) from None + return __tomli__loads(s, parse_float=parse_float) + + +def __tomli__loads( + __s: str, *, parse_float: ParseFloat = float +) -> dict[str, Any]: # noqa: C901 + """Parse TOML from a string.""" + # The spec allows converting "\r\n" to "\n", even in string + # literals. Let's do so to simplify parsing. + src = __s.replace("\r\n", "\n") + pos = 0 + out = Output(NestedDict(), Flags()) + header: Key = () + parse_float = __tomli__make_safe_parse_float(parse_float) + # Parse one statement at a time + # (typically means one line in TOML source) + while True: + # 1. Skip line leading whitespace + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + # 2. Parse rules. Expect one of the following: + # - end of file + # - end of line + # - comment + # - key/value pair + # - append dict to list (and move to its namespace) + # - create dict (and move to its namespace) + # Skip trailing whitespace when applicable. + try: + char = src[pos] + except IndexError: + break + if char == "\n": + pos += 1 + continue + if char in __tomli__KEY_INITIAL_CHARS: + pos = __tomli__key_value_rule(src, pos, out, header, parse_float) + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + elif char == "[": + try: + second_char: str | None = src[pos + 1] + except IndexError: + second_char = None + out.flags.finalize_pending() + if second_char == "[": + pos, header = __tomli__create_list_rule(src, pos, out) + else: + pos, header = __tomli__create_dict_rule(src, pos, out) + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + elif char != "#": + raise __tomli__suffixed_err(src, pos, "Invalid statement") + # 3. Skip comment + pos = __tomli__skip_comment(src, pos) + # 4. Expect end of line or end of file + try: + char = src[pos] + except IndexError: + break + if char != "\n": + raise __tomli__suffixed_err( + src, pos, "Expected newline or end of document after a statement" + ) + pos += 1 + return out.data.dict + + +class Flags: + """Flags that map to parsed keys/namespaces.""" + + # Marks an immutable namespace (inline array or inline table). + FROZEN = 0 + # Marks a nest that has been explicitly created and can no longer + # be opened using the "[table]" syntax. + EXPLICIT_NEST = 1 + + def __init__(self) -> None: + self._flags: dict[str, dict] = {} + self._pending_flags: set[tuple[Key, int]] = set() + + def add_pending(self, key: Key, flag: int) -> None: + self._pending_flags.add((key, flag)) + + def finalize_pending(self) -> None: + for key, flag in self._pending_flags: + self.set(key, flag, recursive=False) + self._pending_flags.clear() + + def unset_all(self, key: Key) -> None: + cont = self._flags + for k in key[:-1]: + if k not in cont: + return + cont = cont[k]["nested"] + cont.pop(key[-1], None) + + def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 + cont = self._flags + key_parent, key_stem = key[:-1], key[-1] + for k in key_parent: + if k not in cont: + cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont = cont[k]["nested"] + if key_stem not in cont: + cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} + cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) + + def is_(self, key: Key, flag: int) -> bool: + if not key: + return False # document root has no flags + cont = self._flags + for k in key[:-1]: + if k not in cont: + return False + inner_cont = cont[k] + if flag in inner_cont["recursive_flags"]: + return True + cont = inner_cont["nested"] + key_stem = key[-1] + if key_stem in cont: + cont = cont[key_stem] + return flag in cont["flags"] or flag in cont["recursive_flags"] + return False + + +class NestedDict: + def __init__(self) -> None: + # The parsed content of the TOML document + self.dict: dict[str, Any] = {} + + def get_or_create_nest( + self, + key: Key, + *, + access_lists: bool = True, + ) -> dict: + cont: Any = self.dict + for k in key: + if k not in cont: + cont[k] = {} + cont = cont[k] + if access_lists and isinstance(cont, list): + cont = cont[-1] + if not isinstance(cont, dict): + raise KeyError("There is no nest behind this key") + return cont + + def append_nest_to_list(self, key: Key) -> None: + cont = self.get_or_create_nest(key[:-1]) + last_key = key[-1] + if last_key in cont: + list_ = cont[last_key] + if not isinstance(list_, list): + raise KeyError("An object other than list found behind this key") + list_.append({}) + else: + cont[last_key] = [{}] + + +class Output(NamedTuple): + data: NestedDict + flags: Flags + + +def __tomli__skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: + try: + while src[pos] in chars: + pos += 1 + except IndexError: + pass + return pos + + +def __tomli__skip_until( + src: str, + pos: Pos, + expect: str, + *, + error_on: frozenset[str], + error_on_eof: bool, +) -> Pos: + try: + new_pos = src.index(expect, pos) + except ValueError: + new_pos = len(src) + if error_on_eof: + raise __tomli__suffixed_err(src, new_pos, f"Expected {expect!r}") from None + if not error_on.isdisjoint(src[pos:new_pos]): + while src[pos] not in error_on: + pos += 1 + raise __tomli__suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") + return new_pos + + +def __tomli__skip_comment(src: str, pos: Pos) -> Pos: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char == "#": + return __tomli__skip_until( + src, + pos + 1, + "\n", + error_on=__tomli__ILLEGAL_COMMENT_CHARS, + error_on_eof=False, + ) + return pos + + +def __tomli__skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: + while True: + pos_before_skip = pos + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS_AND_NEWLINE) + pos = __tomli__skip_comment(src, pos) + if pos == pos_before_skip: + return pos + + +def __tomli__create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 1 # Skip "[" + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + pos, key = __tomli__parse_key(src, pos) + if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): + raise __tomli__suffixed_err(src, pos, f"Cannot declare {key} twice") + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.get_or_create_nest(key) + except KeyError: + raise __tomli__suffixed_err(src, pos, "Cannot overwrite a value") from None + if not src.startswith("]", pos): + raise __tomli__suffixed_err( + src, pos, "Expected ']' at the end of a table declaration" + ) + return pos + 1, key + + +def __tomli__create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: + pos += 2 # Skip "[[" + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + pos, key = __tomli__parse_key(src, pos) + if out.flags.is_(key, Flags.FROZEN): + raise __tomli__suffixed_err( + src, pos, f"Cannot mutate immutable namespace {key}" + ) + # Free the namespace now that it points to another empty list item... + out.flags.unset_all(key) + # ...but this key precisely is still prohibited from table declaration + out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) + try: + out.data.append_nest_to_list(key) + except KeyError: + raise __tomli__suffixed_err(src, pos, "Cannot overwrite a value") from None + if not src.startswith("]]", pos): + raise __tomli__suffixed_err( + src, pos, "Expected ']]' at the end of an array declaration" + ) + return pos + 2, key + + +def __tomli__key_value_rule( + src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat +) -> Pos: + pos, key, value = __tomli__parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + abs_key_parent = header + key_parent + relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) + for cont_key in relative_path_cont_keys: + # Check that dotted key syntax does not redefine an existing table + if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): + raise __tomli__suffixed_err( + src, pos, f"Cannot redefine namespace {cont_key}" + ) + # Containers in the relative path can't be opened with the table syntax or + # dotted key/value syntax in following table sections. + out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) + if out.flags.is_(abs_key_parent, Flags.FROZEN): + raise __tomli__suffixed_err( + src, pos, f"Cannot mutate immutable namespace {abs_key_parent}" + ) + try: + nest = out.data.get_or_create_nest(abs_key_parent) + except KeyError: + raise __tomli__suffixed_err(src, pos, "Cannot overwrite a value") from None + if key_stem in nest: + raise __tomli__suffixed_err(src, pos, "Cannot overwrite a value") + # Mark inline table and array namespaces recursively immutable + if isinstance(value, (dict, list)): + out.flags.set(header + key, Flags.FROZEN, recursive=True) + nest[key_stem] = value + return pos + + +def __tomli__parse_key_value_pair( + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Key, Any]: + pos, key = __tomli__parse_key(src, pos) + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != "=": + raise __tomli__suffixed_err( + src, pos, "Expected '=' after a key in a key/value pair" + ) + pos += 1 + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + pos, value = __tomli__parse_value(src, pos, parse_float) + return pos, key, value + + +def __tomli__parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: + pos, key_part = __tomli__parse_key_part(src, pos) + key: Key = (key_part,) + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + while True: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char != ".": + return pos, key + pos += 1 + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + pos, key_part = __tomli__parse_key_part(src, pos) + key += (key_part,) + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + + +def __tomli__parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: + try: + char: str | None = src[pos] + except IndexError: + char = None + if char in __tomli__BARE_KEY_CHARS: + start_pos = pos + pos = __tomli__skip_chars(src, pos, __tomli__BARE_KEY_CHARS) + return pos, src[start_pos:pos] + if char == "'": + return __tomli__parse_literal_str(src, pos) + if char == '"': + return __tomli__parse_one_line_basic_str(src, pos) + raise __tomli__suffixed_err(src, pos, "Invalid initial character for a key part") + + +def __tomli__parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 + return __tomli__parse_basic_str(src, pos, multiline=False) + + +def __tomli__parse_array( + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, list]: + pos += 1 + array: list = [] + pos = __tomli__skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + while True: + pos, val = __tomli__parse_value(src, pos, parse_float) + array.append(val) + pos = __tomli__skip_comments_and_array_ws(src, pos) + c = src[pos : pos + 1] + if c == "]": + return pos + 1, array + if c != ",": + raise __tomli__suffixed_err(src, pos, "Unclosed array") + pos += 1 + pos = __tomli__skip_comments_and_array_ws(src, pos) + if src.startswith("]", pos): + return pos + 1, array + + +def __tomli__parse_inline_table( + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, dict]: + pos += 1 + nested_dict = NestedDict() + flags = Flags() + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + if src.startswith("}", pos): + return pos + 1, nested_dict.dict + while True: + pos, key, value = __tomli__parse_key_value_pair(src, pos, parse_float) + key_parent, key_stem = key[:-1], key[-1] + if flags.is_(key, Flags.FROZEN): + raise __tomli__suffixed_err( + src, pos, f"Cannot mutate immutable namespace {key}" + ) + try: + nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) + except KeyError: + raise __tomli__suffixed_err(src, pos, "Cannot overwrite a value") from None + if key_stem in nest: + raise __tomli__suffixed_err( + src, pos, f"Duplicate inline table key {key_stem!r}" + ) + nest[key_stem] = value + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + c = src[pos : pos + 1] + if c == "}": + return pos + 1, nested_dict.dict + if c != ",": + raise __tomli__suffixed_err(src, pos, "Unclosed inline table") + if isinstance(value, (dict, list)): + flags.set(key, Flags.FROZEN, recursive=True) + pos += 1 + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + + +def __tomli__parse_basic_str_escape( + src: str, pos: Pos, *, multiline: bool = False +) -> tuple[Pos, str]: + escape_id = src[pos : pos + 2] + pos += 2 + if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: + # Skip whitespace until next non-whitespace character or end of + # the doc. Error if non-whitespace is found before newline. + if escape_id != "\\\n": + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS) + try: + char = src[pos] + except IndexError: + return pos, "" + if char != "\n": + raise __tomli__suffixed_err(src, pos, "Unescaped '\\' in a string") + pos += 1 + pos = __tomli__skip_chars(src, pos, __tomli__TOML_WS_AND_NEWLINE) + return pos, "" + if escape_id == "\\u": + return __tomli__parse_hex_char(src, pos, 4) + if escape_id == "\\U": + return __tomli__parse_hex_char(src, pos, 8) + try: + return pos, __tomli__BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] + except KeyError: + raise __tomli__suffixed_err(src, pos, "Unescaped '\\' in a string") from None + + +def __tomli__parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: + return __tomli__parse_basic_str_escape(src, pos, multiline=True) + + +def __tomli__parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: + hex_str = src[pos : pos + hex_len] + if len(hex_str) != hex_len or not __tomli__HEXDIGIT_CHARS.issuperset(hex_str): + raise __tomli__suffixed_err(src, pos, "Invalid hex value") + pos += hex_len + hex_int = int(hex_str, 16) + if not __tomli__is_unicode_scalar_value(hex_int): + raise __tomli__suffixed_err( + src, pos, "Escaped character is not a Unicode scalar value" + ) + return pos, chr(hex_int) + + +def __tomli__parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: + pos += 1 # Skip starting apostrophe + start_pos = pos + pos = __tomli__skip_until( + src, pos, "'", error_on=__tomli__ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True + ) + return pos + 1, src[start_pos:pos] # Skip ending apostrophe + + +def __tomli__parse_multiline_str( + src: str, pos: Pos, *, literal: bool +) -> tuple[Pos, str]: + pos += 3 + if src.startswith("\n", pos): + pos += 1 + if literal: + delim = "'" + end_pos = __tomli__skip_until( + src, + pos, + "'''", + error_on=__tomli__ILLEGAL_MULTILINE_LITERAL_STR_CHARS, + error_on_eof=True, + ) + result = src[pos:end_pos] + pos = end_pos + 3 + else: + delim = '"' + pos, result = __tomli__parse_basic_str(src, pos, multiline=True) + # Add at maximum two extra apostrophes/quotes if the end sequence + # is 4 or 5 chars long instead of just 3. + if not src.startswith(delim, pos): + return pos, result + pos += 1 + if not src.startswith(delim, pos): + return pos, result + delim + pos += 1 + return pos, result + (delim * 2) + + +def __tomli__parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: + if multiline: + error_on = __tomli__ILLEGAL_MULTILINE_BASIC_STR_CHARS + parse_escapes = __tomli__parse_basic_str_escape_multiline + else: + error_on = __tomli__ILLEGAL_BASIC_STR_CHARS + parse_escapes = __tomli__parse_basic_str_escape + result = "" + start_pos = pos + while True: + try: + char = src[pos] + except IndexError: + raise __tomli__suffixed_err(src, pos, "Unterminated string") from None + if char == '"': + if not multiline: + return pos + 1, result + src[start_pos:pos] + if src.startswith('"""', pos): + return pos + 3, result + src[start_pos:pos] + pos += 1 + continue + if char == "\\": + result += src[start_pos:pos] + pos, parsed_escape = parse_escapes(src, pos) + result += parsed_escape + start_pos = pos + continue + if char in error_on: + raise __tomli__suffixed_err(src, pos, f"Illegal character {char!r}") + pos += 1 + + +def __tomli__parse_value( # noqa: C901 + src: str, pos: Pos, parse_float: ParseFloat +) -> tuple[Pos, Any]: + try: + char: str | None = src[pos] + except IndexError: + char = None + # IMPORTANT: order conditions based on speed of checking and likelihood + # Basic strings + if char == '"': + if src.startswith('"""', pos): + return __tomli__parse_multiline_str(src, pos, literal=False) + return __tomli__parse_one_line_basic_str(src, pos) + # Literal strings + if char == "'": + if src.startswith("'''", pos): + return __tomli__parse_multiline_str(src, pos, literal=True) + return __tomli__parse_literal_str(src, pos) + # Booleans + if char == "t": + if src.startswith("true", pos): + return pos + 4, True + if char == "f": + if src.startswith("false", pos): + return pos + 5, False + # Arrays + if char == "[": + return __tomli__parse_array(src, pos, parse_float) + # Inline tables + if char == "{": + return __tomli__parse_inline_table(src, pos, parse_float) + # Dates and times + datetime_match = __tomli__RE_DATETIME.match(src, pos) + if datetime_match: + try: + datetime_obj = __tomli__match_to_datetime(datetime_match) + except ValueError as e: + raise __tomli__suffixed_err(src, pos, "Invalid date or datetime") from e + return datetime_match.end(), datetime_obj + localtime_match = __tomli__RE_LOCALTIME.match(src, pos) + if localtime_match: + return localtime_match.end(), __tomli__match_to_localtime(localtime_match) + # Integers and "normal" floats. + # The regex will greedily match any type starting with a decimal + # char, so needs to be located after handling of dates and times. + number_match = __tomli__RE_NUMBER.match(src, pos) + if number_match: + return number_match.end(), __tomli__match_to_number(number_match, parse_float) + # Special floats + first_three = src[pos : pos + 3] + if first_three in {"inf", "nan"}: + return pos + 3, parse_float(first_three) + first_four = src[pos : pos + 4] + if first_four in {"-inf", "+inf", "-nan", "+nan"}: + return pos + 4, parse_float(first_four) + raise __tomli__suffixed_err(src, pos, "Invalid value") + + +def __tomli__suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: + """Return a `TOMLDecodeError` where error message is suffixed with + coordinates in source.""" + + def coord_repr(src: str, pos: Pos) -> str: + if pos >= len(src): + return "end of document" + line = src.count("\n", 0, pos) + 1 + if line == 1: + column = pos + 1 + else: + column = pos - src.rindex("\n", 0, pos) + return f"line {line}, column {column}" + + return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") + + +def __tomli__is_unicode_scalar_value(codepoint: int) -> bool: + return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) + + +def __tomli__make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: + """A decorator to make `parse_float` safe. + `parse_float` must not return dicts or lists, because these types + would be mixed with parsed TOML tables and arrays, thus confusing + the parser. The returned decorated callable raises `ValueError` + instead of returning illegal types. + """ + # The default `float` callable never returns illegal types. Optimize it. + if parse_float is float: + return float + + def safe_parse_float(float_str: str) -> Any: + float_value = parse_float(float_str) + if isinstance(float_value, (dict, list)): + raise ValueError("parse_float must not return dicts or lists") + return float_value + + return safe_parse_float + + +##### END VENDORED TOMLI ##### + + +# fmt: on + + +# fmt: on class Spinner: @@ -89,7 +879,7 @@ class Spinner: def spinner_task(self) -> None: while self.busy: self.write_next() - time.sleep(self.delay) + sleep(self.delay) self.remove_spinner() def __enter__(self) -> None: @@ -1230,27 +2020,47 @@ def uses_viv(txt: str) -> bool: ) -DEPENDENCY_BLOCK_MARKER = r"(?i)^#\s+script\s+dependencies:\s*$" +METADATA_BLOCK = ( + r"(?m)^# /// (?P[a-zA-Z0-9-]+)$\s(?P(^#(| .*)$\s)+)^# ///$" +) -def read_dependency_block(txt: str) -> Generator[str, None, None]: - lines = iter(txt.splitlines()) - for line in lines: - if re.match(DEPENDENCY_BLOCK_MARKER, line): - for line in lines: - if not line.startswith("#"): - break - # Remove comments. An inline comment is introduced by - # a hash, which must be preceded and followed by a - # space. The initial hash will be skipped as it has - # no space before it. - line = line.split(" # ", maxsplit=1)[0] - line = line[1:].strip() - if not line: - continue - # let pip handle the requirement errors - yield line - break +def read_metadata_block(script: str) -> dict | None: + name = "pyproject" + matches = list( + filter(lambda m: m.group("type") == name, re.finditer(METADATA_BLOCK, script)) + ) + if len(matches) > 1: + raise ValueError(f"Multiple {name} blocks found") + elif len(matches) == 1: + return __tomli__loads( + "\n".join((line[2:] for line in matches[0].group(0).splitlines()[1:-1])) + ) + else: + return None + + +# DEPENDENCY_BLOCK_MARKER = r"(?i)^#\s+script\s+dependencies:\s*$" +# +# def read_dependency_block(txt: str) -> Generator[str, None, None]: +# lines = iter(txt.splitlines()) +# for line in lines: +# if re.match(DEPENDENCY_BLOCK_MARKER, line): +# for line in lines: +# if not line.startswith("#"): +# break +# # Remove comments. An inline comment is introduced by +# # a hash, which must be preceded and followed by a +# # space. The initial hash will be skipped as it has +# # no space before it. +# line = line.split(" # ", maxsplit=1)[0] +# line = line[1:].strip() +# if not line: +# continue +# # let pip handle the requirement errors +# yield line +# break +# def _parse_date(txt: str) -> datetime: @@ -1758,7 +2568,9 @@ class Viv: script_text = fetch_script(script) viv_used = uses_viv(script_text) - deps = list(read_dependency_block(script_text)) + deps = ( + read_metadata_block(script_text).get("run", {}).get("dependencies", []) + ) if viv_used and deps: error(