instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
int64 0
0
| environment_setup_commit
stringclasses 89
values | FAIL_TO_PASS
sequencelengths 1
4.94k
| PASS_TO_PASS
sequencelengths 0
7.82k
| meta
dict | created_at
unknown | license
stringclasses 8
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
OCA__oca-github-bot-215 | diff --git a/src/oca_github_bot/tasks/migration_issue_bot.py b/src/oca_github_bot/tasks/migration_issue_bot.py
index 510c435..b4911a7 100644
--- a/src/oca_github_bot/tasks/migration_issue_bot.py
+++ b/src/oca_github_bot/tasks/migration_issue_bot.py
@@ -39,7 +39,7 @@ def _set_lines_issue(gh_pr_user_login, gh_pr_number, issue_body, module):
if added: # Bypass the checks for faster completion
lines.append(line)
continue
- groups = re.match(rf"^- \[( |x)\] {module}( |\r)", line)
+ groups = re.match(rf"^- \[( |x)\] \b{module}\b", line)
if groups: # Line found
# Get the Old PR value
regex = r"\#(\d*)"
| OCA/oca-github-bot | 0d64b24e33fd339fcc5415c38421948ac511b6aa | diff --git a/tests/test_migration_issue_bot.py b/tests/test_migration_issue_bot.py
index 1cd703d..5ace8f7 100644
--- a/tests/test_migration_issue_bot.py
+++ b/tests/test_migration_issue_bot.py
@@ -53,6 +53,14 @@ def test_set_lines_issue(gh):
f"- [x] {module} - By @{gh_pr_user_login} - #{gh_pr_number}\n"
f"- [ ] z_module_1 - By @pedrobaeza - #2",
),
+ (
+ f"Issue with list containing the module with no PR\n"
+ f"- [x] {module}\n"
+ f"- [ ] z_module_1 - By @pedrobaeza - #2",
+ f"Issue with list containing the module with no PR\n"
+ f"- [x] {module} - By @{gh_pr_user_login} - #{gh_pr_number}\n"
+ f"- [ ] z_module_1 - By @pedrobaeza - #2",
+ ),
(
"Issue with no list",
f"Issue with no list\n"
| ocabot migration duplicating lines in migration issue?
See for example https://github.com/OCA/server-env/issues/120 where the issue was created correctly but now lines with references to a PR are duplicated. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_migration_issue_bot.py::test_set_lines_issue"
] | [
"tests/test_migration_issue_bot.py::test_create_or_find_branch_milestone",
"tests/test_migration_issue_bot.py::test_find_issue"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-10-10T10:17:07Z" | mit |
|
OCA__oca-github-bot-59 | diff --git a/HISTORY.rst b/HISTORY.rst
index 0f9ae33..08d2ad2 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,6 +1,10 @@
next
~~~~
+**Features**
+
+- Improved command parser (#53)
+
**Bug fixes**
- Do not attempt to build wheels for uninstallable addons.
diff --git a/src/oca_github_bot/commands.py b/src/oca_github_bot/commands.py
index e377d69..7aba1e7 100644
--- a/src/oca_github_bot/commands.py
+++ b/src/oca_github_bot/commands.py
@@ -6,7 +6,7 @@ import re
from .tasks import merge_bot
BOT_COMMAND_RE = re.compile(
- r"/ocabot +(?P<command>\w+)( +(?P<options>.*?))? *$", re.MULTILINE
+ r"/ocabot[ \t]+(?P<command>\w+)(?P<options>[ \t\w]*)(\W|\r?$)", re.MULTILINE
)
@@ -23,6 +23,7 @@ class InvalidOptionsError(Exception):
class BotCommand:
def __init__(self, name, options):
self.name = name
+ self.options = options
self.parse_options(options)
@classmethod
@@ -46,8 +47,8 @@ class BotCommandMerge(BotCommand):
def parse_options(self, options):
if not options:
return
- if options in ("major", "minor", "patch"):
- self.bumpversion = options
+ if len(options) == 1 and options[0] in ("major", "minor", "patch"):
+ self.bumpversion = options[0]
else:
raise InvalidOptionsError(self.name, options)
@@ -60,4 +61,6 @@ class BotCommandMerge(BotCommand):
def parse_commands(text):
""" Parse a text and return an iterator of BotCommand objects. """
for mo in BOT_COMMAND_RE.finditer(text):
- yield BotCommand.create(mo.group("command"), mo.group("options"))
+ yield BotCommand.create(
+ mo.group("command"), mo.group("options").strip().split()
+ )
| OCA/oca-github-bot | 0ab1c02e2ef25e2b3231a326e04c31b40710ce6f | diff --git a/tests/test_commands.py b/tests/test_commands.py
index 1195b53..2091988 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -22,11 +22,38 @@ def test_parse_command_multi():
/ocabot merge major
/ocabot merge patch
/ocabot merge patch
+ /ocabot merge, please
+ /ocabot merge minor, please
+ /ocabot merge minor, please
+ /ocabot merge.
+ /ocabot merge patch. blah
+ /ocabot merge minor # ignored
...
"""
)
)
- assert len(cmds) == 3
+ assert [(cmd.name, cmd.options) for cmd in cmds] == [
+ ("merge", ["major"]),
+ ("merge", ["patch"]),
+ ("merge", ["patch"]),
+ ("merge", []),
+ ("merge", ["minor"]),
+ ("merge", ["minor"]),
+ ("merge", []),
+ ("merge", ["patch"]),
+ ("merge", ["minor"]),
+ ]
+
+
+def test_parse_command_2():
+ cmds = list(
+ parse_commands(
+ "Great contribution, thanks!\r\n\r\n"
+ "/ocabot merge\r\n\r\n"
+ "Please forward port it to 12.0."
+ )
+ )
+ assert [(cmd.name, cmd.options) for cmd in cmds] == [("merge", [])]
def test_parse_command_merge():
| merge bot: parse command inside a greater message
For now, the merge bot only reacts to the merge command putting that comment in an isolated message (example of greater message: https://github.com/OCA/account-invoicing/pull/552#issuecomment-507203903). | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_commands.py::test_parse_command_multi",
"tests/test_commands.py::test_parse_command_2"
] | [
"tests/test_commands.py::test_parse_command_not_a_command",
"tests/test_commands.py::test_parse_command_merge"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2019-07-31T11:18:29Z" | mit |
|
Oefenweb__python-untraceables-5 | diff --git a/bin/randomize-ids b/bin/randomize-ids
index a6f70bb..8dbd993 100755
--- a/bin/randomize-ids
+++ b/bin/randomize-ids
@@ -155,6 +155,7 @@ def run_sql(args):
database = args.database
foreign_key_checks = args.foreign_key_checks
+ unique_checks = args.unique_checks
if sys.stdin.isatty():
cli_utility.print_e('Could not read any data from stdin')
@@ -165,6 +166,12 @@ def run_sql(args):
foreign_key_checks_off = [query_utility.get_foreign_key_checks(foreign_key_checks)]
foreign_key_checks_on = [query_utility.get_foreign_key_checks(not foreign_key_checks)]
+ unique_checks_off = []
+ unique_checks_on = []
+ if not foreign_key_checks:
+ unique_checks_off = [query_utility.get_unique_checks(unique_checks)]
+ unique_checks_on = [query_utility.get_unique_checks(not unique_checks)]
+
statements_from_stdin = mysql_utility.split_file(sys.stdin)
connection = cursor = None
@@ -173,7 +180,9 @@ def run_sql(args):
connection.autocommit(True)
cursor = mysql_utility.get_cursor(connection)
- statements = chain(iter(foreign_key_checks_off), statements_from_stdin, iter(foreign_key_checks_on))
+ statements = chain(iter(foreign_key_checks_off), iter(unique_checks_off),
+ statements_from_stdin,
+ iter(unique_checks_on), iter(foreign_key_checks_on))
for statement in statements:
stripped_statement = statement.strip()
if stripped_statement != '':
@@ -272,6 +281,15 @@ def main():
dest='foreign_key_checks',
help='Whether or not to enable FOREIGN_KEY_CHECKS')
run_sql_parser.set_defaults(foreign_key_checks=True)
+ run_sql_parser.add_argument('--unique-checks',
+ action='store_true',
+ dest='unique_checks',
+ help='Whether or not to enable UNIQUE_CHECKS')
+ run_sql_parser.add_argument('--no-unique-checks',
+ action='store_false',
+ dest='unique_checks',
+ help='Whether or not to enable UNIQUE_CHECKS')
+ run_sql_parser.set_defaults(foreign_key_checks=True)
parser.add_argument('-v', '--verbose', action='store_true', help='Be more verbose')
diff --git a/untraceables/utilities/query.py b/untraceables/utilities/query.py
index 4fe0f7a..0ec4e24 100644
--- a/untraceables/utilities/query.py
+++ b/untraceables/utilities/query.py
@@ -72,6 +72,19 @@ def get_foreign_key_checks(enabled):
return 'SET FOREIGN_KEY_CHECKS={0:d}'.format(enabled)
+def get_unique_checks(enabled):
+ """
+ Gets the query the enable / disable UNIQUE_CHECKS.
+
+ :type bool
+ :param enabled: Whether or not to enable
+ :rtype str
+ :return A query
+ """
+
+ return 'SET UNIQUE_CHECKS={0:d}'.format(enabled)
+
+
def get_randomize(database, table, columns, column, mapping_database, mapping_table):
"""
Gets the queries to randomize a table / column in a given database.
| Oefenweb/python-untraceables | 78fbaf9c974f80a8a614211fe4598ffac0e21f69 | diff --git a/untraceables/test/test_query.py b/untraceables/test/test_query.py
index e09065e..8b2fa23 100644
--- a/untraceables/test/test_query.py
+++ b/untraceables/test/test_query.py
@@ -63,6 +63,23 @@ class TestQuery(unittest.TestCase):
actual = query_utility.get_foreign_key_checks(0)
self.assertEqual(expected, actual)
+ def test_get_unique_checks(self):
+ """
+ Tests `get_unique_checks`.
+ """
+
+ actual = query_utility.get_unique_checks(True)
+ expected = 'SET UNIQUE_CHECKS=1'
+ self.assertEqual(expected, actual)
+ actual = query_utility.get_unique_checks(1)
+ self.assertEqual(expected, actual)
+
+ actual = query_utility.get_unique_checks(False)
+ expected = 'SET UNIQUE_CHECKS=0'
+ self.assertEqual(expected, actual)
+ actual = query_utility.get_unique_checks(0)
+ self.assertEqual(expected, actual)
+
def test_get_randomize(self):
"""
Tests `get_randomize`.
| Make it possible to disable UNIQUE_CHECKS when using run-sql
Using a similar option as `--no-foreign-key-checks`, for instance `--no-unique-checks` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"untraceables/test/test_query.py::TestQuery::test_get_unique_checks"
] | [
"untraceables/test/test_query.py::TestQuery::test_get_foreign_key_checks",
"untraceables/test/test_query.py::TestQuery::test_get_max_id",
"untraceables/test/test_query.py::TestQuery::test_get_randomize",
"untraceables/test/test_query.py::TestQuery::test_get_show_columns",
"untraceables/test/test_query.py::TestQuery::test_get_show_table_columns"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2017-04-07T13:48:15Z" | mit |
|
OnroerendErfgoed__skosprovider_rdf-119 | diff --git a/HISTORY.rst b/HISTORY.rst
index b0e2a7b..3c5a727 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,3 +1,9 @@
+1.3.0 (??-12-2022)
+------------------
+
+- Don't export local id as dcterms.identifier when it's equal to the URI (#117)
+- Add support for Python 3.10 and Python 3.11 (#120)
+
1.2.0 (11-10-2022)
------------------
diff --git a/setup.py b/setup.py
index b97c9d9..b917507 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@ requires = [
setup(
name='skosprovider_rdf',
- version='1.2.0',
+ version='1.3.0',
description='skosprovider_rdf',
long_description=README + '\n\n' + HISTORY,
long_description_content_type='text/x-rst',
diff --git a/skosprovider_rdf/utils.py b/skosprovider_rdf/utils.py
index ea19f7b..1914a80 100644
--- a/skosprovider_rdf/utils.py
+++ b/skosprovider_rdf/utils.py
@@ -142,7 +142,8 @@ def _add_c(graph, provider, id):
c = provider.get_by_id(id)
subject = URIRef(c.uri)
_add_in_dataset(graph, subject, provider)
- graph.add((subject, DCTERMS.identifier, Literal(c.id)))
+ if c.id != c.uri:
+ graph.add((subject, DCTERMS.identifier, Literal(c.id)))
conceptscheme = URIRef(provider.concept_scheme.uri)
graph.add((subject, SKOS.inScheme, conceptscheme))
_add_labels(graph, c, subject)
| OnroerendErfgoed/skosprovider_rdf | 9a68fb35e971caac8d7df45e6371f3132a85e9f2 | diff --git a/tests/test_utils.py b/tests/test_utils.py
index 86b9f11..446b318 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -2,6 +2,7 @@ import pytest
from rdflib import Graph
from rdflib import Namespace
from rdflib.namespace import DCTERMS
+from rdflib.namespace import DC
from rdflib.namespace import RDF
from rdflib.namespace import SKOS
from rdflib.term import Literal
@@ -255,6 +256,26 @@ class TestRDFDumperProducts:
graph_dump = utils.rdf_dumper(products_provider)
assert isinstance(graph_dump, Graph)
+ def test_dump_rdf_no_uri_as_local_identifier(self, products_provider, caplog):
+ caplog.set_level(logging.DEBUG)
+ graph_dump = utils.rdf_dumper(products_provider)
+
+ prod_uri = 'http://www.prodcuts.com/Product'
+ jewel_uri = 'http://www.products.com/Jewellery'
+ perfume_uri = 'http://www.products.com/Perfume'
+
+ prod = URIRef(prod_uri)
+ jewel = URIRef(jewel_uri)
+ perfume = URIRef(perfume_uri)
+
+ log.debug(graph_dump.serialize(format='turtle'))
+
+ assert (prod, DCTERMS.identifier, Literal(prod_uri)) not in graph_dump
+ assert (prod, DC.identifier, Literal(prod_uri)) not in graph_dump
+ assert (jewel, DCTERMS.identifier, Literal(jewel_uri)) not in graph_dump
+ assert (jewel, DC.identifier, Literal(jewel_uri)) not in graph_dump
+ assert (perfume, DCTERMS.identifier, Literal(perfume_uri)) not in graph_dump
+ assert (perfume, DC.identifier, Literal(perfume_uri)) not in graph_dump
class TestRDFDumperTrees:
| Don't export local id as dcterms.identifier when it's the URI
When reading a SKOS file, we assign the URI to the id attribute because our skosprovider expects an internal identifier. But, when exporting the provider to SKOS again, we export this id to dcterms:identifier. This creates extra information in the SKOS file that isn't completely. Solution: when exporting to SKOS, if id is equal to uri, don't generate dcterms:identifier statements. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_utils.py::TestRDFDumperProducts::test_dump_rdf_no_uri_as_local_identifier"
] | [
"tests/test_utils.py::TestRDFDumperMaterials::test_dump_dictionary_to_rdf",
"tests/test_utils.py::TestRDFDumperMaterials::test_dump_collections_roundtrip",
"tests/test_utils.py::TestRDFDumperMaterials::test_dump_concept_with_superordinates",
"tests/test_utils.py::TestRDFDumperProducts::test_dump_rdf_to_rdf",
"tests/test_utils.py::TestRDFDumperProducts::test_dump_rdf_compare_type",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_tree_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_larch_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_chestnut_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_oak_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_one_id_to_rdf_and_reload",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_conceptscheme_tree_to_rdf"
] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2022-12-15T10:18:13Z" | mit |
|
Open-EO__openeo-pg-parser-python-7 | diff --git a/src/openeo_pg_parser_python/translate_process_graph.py b/src/openeo_pg_parser_python/translate_process_graph.py
index bbbffde..3982dcd 100644
--- a/src/openeo_pg_parser_python/translate_process_graph.py
+++ b/src/openeo_pg_parser_python/translate_process_graph.py
@@ -216,10 +216,12 @@ def link_nodes(graph):
def translate_graph(pg_filepath):
if isinstance(pg_filepath, str):
- if os.path.isfile(pg_filepath):
- pg_dict = load(open(pg_filepath))
+ pg_dict = load(open(pg_filepath))
elif isinstance(pg_filepath, dict):
pg_dict = pg_filepath
+ else:
+ raise ValueError("'pg_filepath must either be file path to a JSON file or a dictionary.'")
+
nodes = OrderedDict()
nodes, _, _, _ = walk_pg_graph(nodes, pg_dict)
# create graph object
| Open-EO/openeo-pg-parser-python | fbc4daccbc518e19099a72b9891530c57de9fc17 | diff --git a/tests/test_translate_pg.py b/tests/test_translate_pg.py
new file mode 100644
index 0000000..20140b8
--- /dev/null
+++ b/tests/test_translate_pg.py
@@ -0,0 +1,21 @@
+import unittest
+from openeo_pg_parser_python.translate_process_graph import translate_graph
+
+
+class PGTranslateTester(unittest.TestCase):
+ """ Responsible for testing the translation of an openEO process graph. """
+
+ def setUp(self):
+ """ Specifies paths to the test data. """
+
+ self.pg_test_1_filepath = r"process_graphs/test_1.json"
+
+ def test_pg_not_found(self):
+ pg_filepath = r"process_graphs/does_not_exist.json"
+ try:
+ translate_graph(pg_filepath)
+ except FileNotFoundError:
+ assert True
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| Unclear error when JSON file not found
Currently, when the input JSON file (or related python dict) is not found, the following error is returned:
`UnboundLocalError: local variable 'pg_dict' referenced before assignment`
This is not clear and very unfriendly to debug, add check if input file exists or if input dict is not empty. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_translate_pg.py::PGTranslateTester::test_pg_not_found"
] | [] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2019-12-05T10:35:30Z" | mit |
|
Open-EO__openeo-python-client-230 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 53f9ab9..831591f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
([#225](https://github.com/Open-EO/openeo-python-client/issues/225), [openeo-api#410](https://github.com/Open-EO/openeo-api/issues/410))
- Add `DataCube.dimension_labels()` (EP-4008)
- Add `Connection.load_result()` (EP-4008)
+- Add proper support for child callbacks in `fit_curve` and `predict_curve` ([#229](https://github.com/Open-EO/openeo-python-client/issues/229))
### Changed
diff --git a/openeo/rest/datacube.py b/openeo/rest/datacube.py
index 213923d..1468349 100644
--- a/openeo/rest/datacube.py
+++ b/openeo/rest/datacube.py
@@ -785,7 +785,7 @@ class DataCube(ImageCollection, _FromNodeMixin):
as `apply`, `apply_dimension`, `reduce`, ....)
:param process: process id string, PGNode or callable that uses the ProcessBuilder mechanism to build a process
- :parameter parameter_mapping: mapping of child (callback) parameters names to parent process parameter names
+ :param parent_parameters: list of parameter names defined for child process
:return:
"""
@@ -895,7 +895,7 @@ class DataCube(ImageCollection, _FromNodeMixin):
Add a reduce process with given reducer callback along given dimension
:param dimension: the label of the dimension to reduce
- :param reducer: a callback function that creates a process graph, see :ref:`callbackfunctions`
+ :param reducer: "child callback" function, see :ref:`callbackfunctions`
"""
# TODO: check if dimension is valid according to metadata? #116
# TODO: #125 use/test case for `reduce_dimension_binary`?
@@ -1680,8 +1680,7 @@ class DataCube(ImageCollection, _FromNodeMixin):
arguments["options"] = options
return self.process(process_id="sar_backscatter", arguments=arguments)
-
- def fit_curve(self, parameters, function, dimension ):
+ def fit_curve(self, parameters: list, function: Union[str, PGNode, typing.Callable], dimension: str):
"""
EXPERIMENTAL: https://github.com/Open-EO/openeo-processes/pull/240
Use non-linear least squares to fit a model function `y = f(x, parameters)` to data.
@@ -1689,32 +1688,34 @@ class DataCube(ImageCollection, _FromNodeMixin):
The process throws an `InvalidValues` exception if invalid values are encountered.
Invalid values are finite numbers (see also ``is_valid()``).
- @param parameters:
- @param function:
- @param dimension:
- @return:
+ :param parameters:
+ :param function: "child callback" function, see :ref:`callbackfunctions`
+ :dimension:
"""
return self.process(process_id="fit_curve", arguments={
"data": THIS,
"parameters": parameters,
- "function": function,
+ "function": self._get_callback(function, parent_parameters=["x", "parameters"]),
"dimension": dimension
})
- def predict_curve(self, parameters, function, dimension, labels = None ):
+ def predict_curve(
+ self, parameters: list, function: Union[str, PGNode, typing.Callable], dimension: str,
+ labels=None
+ ):
"""
EXPERIMENTAL: https://github.com/Open-EO/openeo-processes/pull/240
Predict values using a model function and pre-computed parameters.
- @param parameters:
- @param function:
- @param dimension:
+ :param parameters:
+ :param function: "child callback" function, see :ref:`callbackfunctions`
+ :dimension:
@return:
"""
return self.process(process_id="predict_curve", arguments={
"data": THIS,
"parameters": parameters,
- "function": function,
+ "function": self._get_callback(function, parent_parameters=["x", "parameters"]),
"dimension": dimension,
"labels": labels
})
| Open-EO/openeo-python-client | 7f15d6ecb839cd2916fa5bae59953cd647d49337 | diff --git a/tests/rest/datacube/test_datacube100.py b/tests/rest/datacube/test_datacube100.py
index 77cc452..68da969 100644
--- a/tests/rest/datacube/test_datacube100.py
+++ b/tests/rest/datacube/test_datacube100.py
@@ -1174,3 +1174,100 @@ def test_dimension_labels_invalid(con100):
# Don't validate when no metadata
cube = con100.load_collection("S2", fetch_metadata=False).dimension_labels("unv6lidd")
assert cube.flat_graph()["dimensionlabels1"]["arguments"]["dimension"] == "unv6lidd"
+
+
+def test_fit_curve_callback(con100: Connection):
+ from openeo.processes import array_element
+ def model(x, parameters):
+ return array_element(parameters, 0) + array_element(parameters, 1) * x
+
+ img = con100.load_collection("S2")
+ res = img.fit_curve(parameters=[0, 0], function=model, dimension="t")
+ expected = {
+ 'loadcollection1': {
+ 'process_id': 'load_collection',
+ 'arguments': {'id': 'S2', 'spatial_extent': None, 'temporal_extent': None},
+ },
+ 'fitcurve1': {
+ 'process_id': 'fit_curve',
+ 'arguments': {
+ 'data': {'from_node': 'loadcollection1'},
+ 'parameters': [0, 0],
+ 'function': {
+ 'process_graph': {
+ 'arrayelement1': {
+ 'process_id': 'array_element',
+ 'arguments': {'data': {'from_parameter': 'parameters'}, 'index': 0},
+ },
+ 'arrayelement2': {
+ 'process_id': 'array_element',
+ 'arguments': {'data': {'from_parameter': 'parameters'}, 'index': 1},
+ },
+ 'multiply1': {
+ 'process_id': 'multiply',
+ 'arguments': {'x': {'from_node': 'arrayelement2'}, 'y': {'from_parameter': 'x'}},
+ },
+ 'add1': {
+ 'process_id': 'add',
+ 'arguments': {'x': {'from_node': 'arrayelement1'}, 'y': {'from_node': 'multiply1'}},
+ 'result': True
+ },
+ }
+ },
+ 'dimension': 't',
+ },
+ 'result': True
+ },
+ }
+ assert res.graph == expected
+
+
+def test_predict_curve_callback(con100: Connection):
+ from openeo.processes import array_element, cos
+ def model(x, parameters):
+ return array_element(parameters, 0) * cos(array_element(parameters, 1) * x)
+
+ img = con100.load_collection("S2")
+ res = img.predict_curve(parameters=[0, 0], function=model, dimension="t")
+ expected = {
+ 'loadcollection1': {
+ 'process_id': 'load_collection',
+ 'arguments': {'id': 'S2', 'spatial_extent': None, 'temporal_extent': None},
+ },
+ 'predictcurve1': {
+ 'process_id': 'predict_curve',
+ 'arguments': {
+ 'data': {'from_node': 'loadcollection1'},
+ 'parameters': [0, 0],
+ 'function': {
+ 'process_graph': {
+ 'arrayelement1': {
+ 'process_id': 'array_element',
+ 'arguments': {'data': {'from_parameter': 'parameters'}, 'index': 0},
+ },
+ 'arrayelement2': {
+ 'process_id': 'array_element',
+ 'arguments': {'data': {'from_parameter': 'parameters'}, 'index': 1},
+ },
+ 'multiply1': {
+ 'process_id': 'multiply',
+ 'arguments': {'x': {'from_node': 'arrayelement2'}, 'y': {'from_parameter': 'x'}},
+ },
+ 'cos1': {
+ 'process_id': 'cos',
+ 'arguments': {'x': {'from_node': "multiply1"}},
+ },
+ 'multiply2': {
+ 'process_id': 'multiply',
+ 'arguments': {'x': {'from_node': 'arrayelement1'}, 'y': {'from_node': 'cos1'}},
+ 'result': True
+ },
+ }
+ },
+ 'dimension': 't',
+ 'labels': None,
+ },
+ 'result': True
+ },
+ }
+ assert res.graph == expected
| Proper callable support in fit_curve and related
from https://github.com/openEOPlatform/documentation/issues/10
```python
curve_fitting = l2a_bands.fit_curve(
parameters=[1,1,1], # Initial guess of the parameters
dimension="t", # Fit the function along the temporal dimension
function=l2a_bands._get_callback(fit_function, parent_parameters=["data","parameters"])
)
```
it's unfortunate that you have to call a private method `l2a_bands._get_callback` like that, we should make sure that you can just do `function=fit_function` directly | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/rest/datacube/test_datacube100.py::test_fit_curve_callback",
"tests/rest/datacube/test_datacube100.py::test_predict_curve_callback"
] | [
"tests/rest/datacube/test_datacube100.py::test_datacube_graph",
"tests/rest/datacube/test_datacube100.py::test_datacube_flat_graph",
"tests/rest/datacube/test_datacube100.py::test_datacube_legacy_flatten",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_kwargs[kwargs0-expected0]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_kwargs[kwargs1-expected1]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_kwargs[kwargs2-expected2]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_kwargs[kwargs3-expected3]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_kwargs[kwargs4-expected4]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_kwargs[kwargs5-expected5]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_kwargs[kwargs6-expected6]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_parameter",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_positional_args[args0-expected0]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_positional_args[args1-expected1]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_positional_args[args2-expected2]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_positional_args[args3-expected3]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_positional_args[args4-expected4]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_positional_args[args5-expected5]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_positional_args[args6-expected6]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_legacy_positional_args",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs[args0-kwargs0-expected0]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs[args1-kwargs1-expected1]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs[args2-kwargs2-expected2]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs[args3-kwargs3-expected3]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs[args4-kwargs4-expected4]",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs_conflict[args0-kwargs0-Don't",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs_conflict[args1-kwargs1-Don't",
"tests/rest/datacube/test_datacube100.py::test_filter_bbox_args_and_kwargs_conflict[args2-kwargs2-Don't",
"tests/rest/datacube/test_datacube100.py::test_filter_spatial",
"tests/rest/datacube/test_datacube100.py::test_aggregate_spatial_basic",
"tests/rest/datacube/test_datacube100.py::test_aggregate_spatial_types[polygon0-expected_geometries0]",
"tests/rest/datacube/test_datacube100.py::test_aggregate_spatial_types[polygon1-expected_geometries1]",
"tests/rest/datacube/test_datacube100.py::test_aggregate_spatial_types[polygon2-expected_geometries2]",
"tests/rest/datacube/test_datacube100.py::test_aggregate_spatial_types[polygon3-expected_geometries3]",
"tests/rest/datacube/test_datacube100.py::test_aggregate_spatial_types[polygon4-expected_geometries4]",
"tests/rest/datacube/test_datacube100.py::test_aggregate_spatial_with_crs",
"tests/rest/datacube/test_datacube100.py::test_aggregate_temporal",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_basic",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_types[polygon0-expected_mask0]",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_types[polygon1-expected_mask1]",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_types[polygon2-expected_mask2]",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_types[polygon3-expected_mask3]",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_types[polygon4-expected_mask4]",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_with_crs",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_parameter",
"tests/rest/datacube/test_datacube100.py::test_mask_polygon_path",
"tests/rest/datacube/test_datacube100.py::test_mask_raster",
"tests/rest/datacube/test_datacube100.py::test_merge_cubes",
"tests/rest/datacube/test_datacube100.py::test_resample_spatial",
"tests/rest/datacube/test_datacube100.py::test_ndvi_simple",
"tests/rest/datacube/test_datacube100.py::test_ndvi_args",
"tests/rest/datacube/test_datacube100.py::test_rename_dimension",
"tests/rest/datacube/test_datacube100.py::test_reduce_dimension",
"tests/rest/datacube/test_datacube100.py::test_reduce_dimension_binary",
"tests/rest/datacube/test_datacube100.py::test_reduce_dimension_name",
"tests/rest/datacube/test_datacube100.py::test_metadata_load_collection_100",
"tests/rest/datacube/test_datacube100.py::test_apply_absolute_pgnode",
"tests/rest/datacube/test_datacube100.py::test_load_collection_properties",
"tests/rest/datacube/test_datacube100.py::test_load_collection_properties_process_builder_function",
"tests/rest/datacube/test_datacube100.py::test_load_collection_temporalextent_process_builder_function",
"tests/rest/datacube/test_datacube100.py::test_apply_dimension_temporal_cumsum_with_target",
"tests/rest/datacube/test_datacube100.py::test_apply_dimension_modify_bands",
"tests/rest/datacube/test_datacube100.py::test_apply_neighborhood_udf",
"tests/rest/datacube/test_datacube100.py::test_filter_spatial_callback",
"tests/rest/datacube/test_datacube100.py::test_custom_process_kwargs_datacube",
"tests/rest/datacube/test_datacube100.py::test_custom_process_kwargs_datacube_pg",
"tests/rest/datacube/test_datacube100.py::test_custom_process_kwargs_this",
"tests/rest/datacube/test_datacube100.py::test_custom_process_kwargs_namespaced",
"tests/rest/datacube/test_datacube100.py::test_custom_process_arguments_datacube",
"tests/rest/datacube/test_datacube100.py::test_custom_process_arguments_datacube_pg",
"tests/rest/datacube/test_datacube100.py::test_custom_process_arguments_this",
"tests/rest/datacube/test_datacube100.py::test_custom_process_arguments_namespacd",
"tests/rest/datacube/test_datacube100.py::test_save_user_defined_process",
"tests/rest/datacube/test_datacube100.py::test_save_user_defined_process_public",
"tests/rest/datacube/test_datacube100.py::test_save_result_format",
"tests/rest/datacube/test_datacube100.py::test_to_json",
"tests/rest/datacube/test_datacube100.py::test_to_json_compact",
"tests/rest/datacube/test_datacube100.py::test_sar_backscatter_defaults",
"tests/rest/datacube/test_datacube100.py::test_sar_backscatter_custom",
"tests/rest/datacube/test_datacube100.py::test_sar_backscatter_coefficient_none",
"tests/rest/datacube/test_datacube100.py::test_sar_backscatter_coefficient_invalid",
"tests/rest/datacube/test_datacube100.py::test_datacube_from_process",
"tests/rest/datacube/test_datacube100.py::test_datacube_from_process_namespace",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_datacube_from_flat_graph_minimal",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_datacube_from_json_minimal_string",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_datacube_from_json_minimal_file[str]",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_datacube_from_json_minimal_file[Path]",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_datacube_from_json_minimal_http",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_process_dict_wrapper",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_minimal",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_cube",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_udp",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_parameter_again",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_no_params",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_missing_params",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_default[kwargs0-100]",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_default[kwargs1-100]",
"tests/rest/datacube/test_datacube100.py::TestDataCubeFromFlatGraph::test_parameter_substitution_default[kwargs2-86]",
"tests/rest/datacube/test_datacube100.py::test_send_nan_json",
"tests/rest/datacube/test_datacube100.py::test_dimension_labels",
"tests/rest/datacube/test_datacube100.py::test_dimension_labels_invalid"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-09-06T14:49:37Z" | apache-2.0 |
|
Open-EO__openeo-python-client-231 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 831591f..d475e07 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Add `DataCube.dimension_labels()` (EP-4008)
- Add `Connection.load_result()` (EP-4008)
- Add proper support for child callbacks in `fit_curve` and `predict_curve` ([#229](https://github.com/Open-EO/openeo-python-client/issues/229))
+- `ProcessBuilder`: Add support for `array_element(data, n)` through `data[n]` syntax ([#228](https://github.com/Open-EO/openeo-python-client/issues/228))
### Changed
diff --git a/openeo/internal/processes/generator.py b/openeo/internal/processes/generator.py
index 834cd90..a53dc32 100644
--- a/openeo/internal/processes/generator.py
+++ b/openeo/internal/processes/generator.py
@@ -107,6 +107,7 @@ def collect_processes(sources: List[Union[Path, str]]) -> List[Process]:
def generate_process_py(processes: List[Process], output=sys.stdout, argv=None):
oo_src = textwrap.dedent("""
+ import builtins
from openeo.internal.processes.builder import ProcessBuilderBase, UNSET
@@ -142,6 +143,12 @@ def generate_process_py(processes: List[Process], output=sys.stdout, argv=None):
def __pow__(self, other) -> 'ProcessBuilder':
return self.power(other)
+ def __getitem__(self, key) -> 'ProcessBuilder':
+ if isinstance(key, builtins.int):
+ return self.array_element(index=key)
+ else:
+ return self.array_element(label=key)
+
""")
fun_src = textwrap.dedent("""
# Public shortcut
diff --git a/openeo/processes.py b/openeo/processes.py
index 417984e..3bbb21e 100644
--- a/openeo/processes.py
+++ b/openeo/processes.py
@@ -4,6 +4,7 @@
# Used command line arguments:
# openeo/internal/processes/generator.py ../openeo-processes/ ../openeo-processes/proposals/ --output openeo/processes.py
+import builtins
from openeo.internal.processes.builder import ProcessBuilderBase, UNSET
@@ -39,6 +40,12 @@ class ProcessBuilder(ProcessBuilderBase):
def __pow__(self, other) -> 'ProcessBuilder':
return self.power(other)
+ def __getitem__(self, key) -> 'ProcessBuilder':
+ if isinstance(key, builtins.int):
+ return self.array_element(index=key)
+ else:
+ return self.array_element(label=key)
+
def absolute(self) -> 'ProcessBuilder':
"""
Absolute value
| Open-EO/openeo-python-client | e0a58e85978ed06cdc0647f68a98c35bcc92fdc9 | diff --git a/tests/rest/datacube/test_processbuilder.py b/tests/rest/datacube/test_processbuilder.py
index 9528a20..405aa97 100644
--- a/tests/rest/datacube/test_processbuilder.py
+++ b/tests/rest/datacube/test_processbuilder.py
@@ -275,10 +275,10 @@ def test_apply_dimension_bandmath_lambda(con100):
def test_apply_dimension_time_to_bands(con100):
- from openeo.processes import array_concat,quantiles,sd,mean
+ from openeo.processes import array_concat, quantiles, sd, mean
im = con100.load_collection("S2")
res = im.apply_dimension(
- process=lambda d: array_concat(quantiles(d,[0.25,0.5,0.75]), [sd(d),mean(d)]),
+ process=lambda d: array_concat(quantiles(d, [0.25, 0.5, 0.75]), [sd(d), mean(d)]),
dimension="t",
target_dimension="bands"
)
@@ -382,3 +382,81 @@ def test_merge_cubes_max_lambda(con100):
im2 = con100.load_collection("MASK")
res = im1.merge_cubes(other=im2, overlap_resolver=lambda data: data.max())
assert res.graph == load_json_resource('data/1.0.0/merge_cubes_max.json')
+
+
+def test_getitem_array_element_index(con100):
+ im = con100.load_collection("S2")
+
+ def callback(data: ProcessBuilder):
+ return data[1] + data[2]
+
+ res = im.reduce_dimension(reducer=callback, dimension="bands")
+
+ assert res.flat_graph() == {
+ "loadcollection1": {
+ "process_id": "load_collection",
+ "arguments": {"id": "S2", "spatial_extent": None, "temporal_extent": None},
+ },
+ "reducedimension1": {
+ "process_id": "reduce_dimension",
+ "arguments": {
+ "data": {"from_node": "loadcollection1"},
+ "dimension": "bands",
+ "reducer": {"process_graph": {
+ "arrayelement1": {
+ "process_id": "array_element",
+ "arguments": {"data": {"from_parameter": "data"}, "index": 1},
+ },
+ "arrayelement2": {
+ "process_id": "array_element",
+ "arguments": {"data": {"from_parameter": "data"}, "index": 2},
+ },
+ "add1": {
+ "process_id": "add",
+ "arguments": {"x": {"from_node": "arrayelement1"}, "y": {"from_node": "arrayelement2"}},
+ "result": True
+ },
+ }}
+ },
+ "result": True
+ }
+ }
+
+
+def test_getitem_array_element_label(con100):
+ im = con100.load_collection("S2")
+
+ def callback(data: ProcessBuilder):
+ return data["red"] + data["green"]
+
+ res = im.reduce_dimension(reducer=callback, dimension="bands")
+
+ assert res.flat_graph() == {
+ "loadcollection1": {
+ "process_id": "load_collection",
+ "arguments": {"id": "S2", "spatial_extent": None, "temporal_extent": None},
+ },
+ "reducedimension1": {
+ "process_id": "reduce_dimension",
+ "arguments": {
+ "data": {"from_node": "loadcollection1"},
+ "dimension": "bands",
+ "reducer": {"process_graph": {
+ "arrayelement1": {
+ "process_id": "array_element",
+ "arguments": {"data": {"from_parameter": "data"}, "label": "red"},
+ },
+ "arrayelement2": {
+ "process_id": "array_element",
+ "arguments": {"data": {"from_parameter": "data"}, "label": "green"},
+ },
+ "add1": {
+ "process_id": "add",
+ "arguments": {"x": {"from_node": "arrayelement1"}, "y": {"from_node": "arrayelement2"}},
+ "result": True
+ },
+ }}
+ },
+ "result": True
+ }
+ }
| ProcessBuilder __getitem__ support
from https://github.com/openEOPlatform/documentation/issues/10
```python
def fit_function(x:ProcessBuilder, parameters):
pi = math.pi
a0 = array_element(parameters, 0)
a1 = array_element(parameters, 1)
a2 = array_element(parameters, 2)
return a0 + a1*cos(2*pi/31557600*x) + a2*sin(2*pi/31557600*x) # 31557600 are the seconds in one year
```
It would be nice to allow writing `parameters[0]` instead of `array_element(parameters, 0)` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/rest/datacube/test_processbuilder.py::test_getitem_array_element_index",
"tests/rest/datacube/test_processbuilder.py::test_getitem_array_element_label"
] | [
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_absolute_str",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_absolute_pgnode",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_absolute_lambda_method",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_absolute_function",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_absolute_custom_function",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_chain_lambda_method",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_chain_lambda_functions",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_chain_lambda_mixed_and_operator",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_chain_custom_function_methods",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_chain_custom_function_functions",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_chain_custom_function_mixed_and_operator",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_chain_pgnode",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_math_lambda",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_math_lambda_reflected",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_math_custom_function",
"tests/rest/datacube/test_processbuilder.py::test_apply_callback_math_custom_function_reflected",
"tests/rest/datacube/test_processbuilder.py::test_apply_neighborhood_trim_str",
"tests/rest/datacube/test_processbuilder.py::test_apply_neighborhood_trim_pgnode",
"tests/rest/datacube/test_processbuilder.py::test_apply_neighborhood_trim_callable",
"tests/rest/datacube/test_processbuilder.py::test_apply_neighborhood_trim_lambda",
"tests/rest/datacube/test_processbuilder.py::test_apply_neighborhood_udf_callback",
"tests/rest/datacube/test_processbuilder.py::test_apply_neighborhood_complex_callback",
"tests/rest/datacube/test_processbuilder.py::test_apply_dimension_max_str",
"tests/rest/datacube/test_processbuilder.py::test_apply_dimension_max_pgnode",
"tests/rest/datacube/test_processbuilder.py::test_apply_dimension_max_callable",
"tests/rest/datacube/test_processbuilder.py::test_apply_dimension_max_lambda",
"tests/rest/datacube/test_processbuilder.py::test_apply_dimension_interpolate_lambda",
"tests/rest/datacube/test_processbuilder.py::test_apply_dimension_bandmath_lambda",
"tests/rest/datacube/test_processbuilder.py::test_apply_dimension_time_to_bands",
"tests/rest/datacube/test_processbuilder.py::test_reduce_dimension_max_str",
"tests/rest/datacube/test_processbuilder.py::test_reduce_dimension_max_pgnode",
"tests/rest/datacube/test_processbuilder.py::test_reduce_dimension_max_callable",
"tests/rest/datacube/test_processbuilder.py::test_reduce_dimension_max_lambda",
"tests/rest/datacube/test_processbuilder.py::test_reduce_dimension_bandmath_lambda",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_add_str",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_add_pgnode",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_add_callable",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_add_lambda",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_max_str",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_max_pgnode",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_max_callable",
"tests/rest/datacube/test_processbuilder.py::test_merge_cubes_max_lambda"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-09-06T16:04:05Z" | apache-2.0 |
|
OpenFreeEnergy__openfe-26 | diff --git a/.gitignore b/.gitignore
index 525811b7..e13f9d46 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,6 @@
+# custom ignores
+.xxrun
+
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
diff --git a/openfe/setup/scorer.py b/openfe/setup/scorer.py
index de12be0d..f4e3d082 100644
--- a/openfe/setup/scorer.py
+++ b/openfe/setup/scorer.py
@@ -1,4 +1,74 @@
+# This code is part of OpenFE and is licensed under the MIT license.
+# For details, see https://github.com/OpenFreeEnergy/openfe
+
+"""
+Scorers
+=======
+
+A scorer is used to determine a score (and optionally, annotations) for a
+given AtomMapping.
+"""
+
+from typing import NamedTuple, Dict, Any, Union
+
+
+class ScoreAnnotation(NamedTuple):
+ """Container for a score from a mapping and any associated annotations.
+
+ Parameters
+ ----------
+ score : float
+ The score, or ``None`` if there is no score associated
+ annotation : Dict[str, Any]
+ Mapping of annotation label to annotation value for any annotations
+ """
+ score: Union[float, None]
+ annotation: Dict[str, Any]
+
+
class Scorer:
- def __call__(self, atommapping):
- return 0.0
+ """Abstract base class for Scorers.
+
+ To implement a subclass, you must implement the ``score`` method. If
+ your ``Scorer`` only returns annotations, then return None from the
+ ``score`` method. You may optionally implement the ``annotation``
+ method. The default ``annotation`` returns an empty dictionary.
+
+ Use a ``Scorer`` by calling it as a function.
+ """
+ def score(self, atommapping) -> Union[float, None]:
+ """Calculate the score for an AtomMapping.
+
+ Parameters
+ ----------
+ atommapping : AtomMapping
+ AtomMapping to score
+
+ Returns
+ -------
+ Union[float, None] :
+ The score, or ``None`` if no score is calculated
+ """
+ raise NotImplementedError(
+ "'Scorer' is an abstract class and should not be used directly. "
+ "Please use a specific subclass of 'Scorer'."
+ )
+
+ def annotation(self, atommapping) -> Dict[str, Any]:
+ """Create annotation dict for an AtomMapping.
+
+ Parameters
+ ----------
+ atommapping : AtomMapping
+ Atommapping to annotate
+
+ Returns
+ -------
+ Dict[str, Any] :
+ Mapping of annotation labels to annotation values
+ """
+ return {}
+ def __call__(self, atommapping) -> ScoreAnnotation:
+ return ScoreAnnotation(score=self.score(atommapping),
+ annotation=self.annotation(atommapping))
| OpenFreeEnergy/openfe | 3ab18e7b98d09980b4c8c19320762bee9abcb244 | diff --git a/openfe/tests/setup/__init__.py b/openfe/tests/setup/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/openfe/tests/setup/conftest.py b/openfe/tests/setup/conftest.py
new file mode 100644
index 00000000..d74d1b89
--- /dev/null
+++ b/openfe/tests/setup/conftest.py
@@ -0,0 +1,8 @@
+import pytest
+
+
[email protected]
+def mock_atommapping():
+ """Mock to functions that take an AtomMapping as input"""
+ # TODO: add internal structure of this once we have an AtomMapping class
+ return "foo" # current tests using this just need a placeholder
diff --git a/openfe/tests/setup/test_scorer.py b/openfe/tests/setup/test_scorer.py
new file mode 100644
index 00000000..faf00255
--- /dev/null
+++ b/openfe/tests/setup/test_scorer.py
@@ -0,0 +1,40 @@
+import pytest
+from openfe.setup.scorer import Scorer
+
+
+class ConcreteScorer(Scorer):
+ """Test implementation of Scorer with a score"""
+ def score(self, atommapping):
+ return 3.14
+
+
+class ConcreteAnnotator(Scorer):
+ """Test implementation of Scorer with a custom annotation"""
+ def score(self, atommapping):
+ return None
+
+ def annotation(self, atommapping):
+ return {'annotation': 'data'}
+
+
+class TestScorer:
+ def test_abstract_error(self, mock_atommapping):
+ scorer = Scorer()
+ with pytest.raises(NotImplementedError, match="'Scorer'.*abstract"):
+ scorer(mock_atommapping)
+
+ def test_concrete_scorer(self, mock_atommapping):
+ # The ConcreteScorer class should give the implemented value for the
+ # score and the default empty dict for the annotation.
+ scorer = ConcreteScorer()
+ result = scorer(mock_atommapping)
+ assert result.score == 3.14
+ assert result.annotation == {}
+
+ def test_concrete_annotator(self, mock_atommapping):
+ # The ConcreteAnnotator class should give the implemented (None)
+ # value for the score and the implemented value of the annotation.
+ scorer = ConcreteAnnotator()
+ result = scorer(mock_atommapping)
+ assert result.score is None
+ assert result.annotation == {'annotation': 'data'}
| Implement Scorer Base Implementation
```python
s = Scorer(stuff)
m = Mapper.suggest_mapping(m1, m2)
f : float = s.score(m)
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"openfe/tests/setup/test_scorer.py::TestScorer::test_abstract_error",
"openfe/tests/setup/test_scorer.py::TestScorer::test_concrete_scorer",
"openfe/tests/setup/test_scorer.py::TestScorer::test_concrete_annotator"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2022-01-26T16:13:56Z" | mit |
|
OpenGeoVis__omfvista-13 | diff --git a/omfvista/lineset.py b/omfvista/lineset.py
index 5461ea6..2b978dc 100644
--- a/omfvista/lineset.py
+++ b/omfvista/lineset.py
@@ -7,15 +7,13 @@ __all__ = [
__displayname__ = 'Line Set'
-import vtk
-from vtk.util import numpy_support as nps
-import pyvista
-
import numpy as np
+import pyvista
from omfvista.utilities import add_data
-def line_set_to_vtk(lse):
+
+def line_set_to_vtk(lse, origin=(0.0, 0.0, 0.0)):
"""Convert the line set to a :class:`pyvista.PolyData` data object.
Args:
@@ -24,46 +22,23 @@ def line_set_to_vtk(lse):
Return:
:class:`pyvista.PolyData`
"""
+ ids = np.array(lse.geometry.segments).reshape(-1, 2).astype(np.int_)
+ lines = np.c_[np.full(len(ids), 2, dtype=np.int_), ids]
+
+ output = pyvista.PolyData()
+ output.points = np.array(lse.geometry.vertices)
+ output.lines = lines
- output = vtk.vtkPolyData()
- cells = vtk.vtkCellArray()
- pts = vtk.vtkPoints()
-
- # Make a data array for grouping the line segments
- indexArr = vtk.vtkIntArray()
- indexArr.SetNumberOfValues(lse.geometry.num_cells)
- indexArr.SetName('Line Index')
-
- # Generate VTK Points from the vertices
- pts.SetNumberOfPoints(lse.geometry.num_nodes)
- pts.SetData(nps.numpy_to_vtk(lse.geometry.vertices))
-
- last = lse.geometry.segments[0][0]
- segi = 0
- for i in range(len(lse.geometry.segments)):
- # Create a VTK Line cell for each segment
- seg = lse.geometry.segments[i]
- aLine = vtk.vtkLine()
- aLine.GetPointIds().SetId(0, seg[0])
- aLine.GetPointIds().SetId(1, seg[1])
- cells.InsertNextCell(aLine)
- # Group segments by connectivity:
- if seg[0] != last:
- segi += 1
- last = seg[1]
- indexArr.SetValue(i, segi)
-
- # Generate the output
- output.SetPoints(pts)
- output.SetLines(cells)
- output.GetCellData().AddArray(indexArr)
+ indices = output.connectivity().cell_arrays['RegionId']
+ output['Line Index'] = indices
# Now add data to lines:
add_data(output, lse.data)
# TODO: if subtype is borehole make a tube
- return pyvista.wrap(output)
+ output.points += np.array(origin)
+ return output
line_set_to_vtk.__displayname__ = 'Line Set to VTK'
diff --git a/omfvista/pointset.py b/omfvista/pointset.py
index 487ec77..60ea4c8 100644
--- a/omfvista/pointset.py
+++ b/omfvista/pointset.py
@@ -7,16 +7,13 @@ __all__ = [
__displayname__ = 'Point Set'
-import vtk
-from vtk.util import numpy_support as nps
-import pyvista
-
import numpy as np
+import pyvista
from omfvista.utilities import add_data, add_textures
-def point_set_to_vtk(pse):
+def point_set_to_vtk(pse, origin=(0.0, 0.0, 0.0)):
"""Convert the point set to a :class:`pyvista.PolyData` data object.
Args:
@@ -25,34 +22,16 @@ def point_set_to_vtk(pse):
Return:
:class:`pyvista.PolyData`
"""
-
- points = pse.geometry.vertices
- npoints = pse.geometry.num_nodes
-
- # Make VTK cells array
- cells = np.hstack((np.ones((npoints, 1)),
- np.arange(npoints).reshape(-1, 1)))
- cells = np.ascontiguousarray(cells, dtype=np.int64)
- cells = np.reshape(cells, (2*npoints))
- vtkcells = vtk.vtkCellArray()
- vtkcells.SetCells(npoints, nps.numpy_to_vtk(cells, deep=True, array_type=vtk.VTK_ID_TYPE))
-
- # Convert points to vtk object
- pts = vtk.vtkPoints()
- pts.SetNumberOfPoints(pse.geometry.num_nodes)
- pts.SetData(nps.numpy_to_vtk(points))
-
- # Create polydata
- output = vtk.vtkPolyData()
- output.SetPoints(pts)
- output.SetVerts(vtkcells)
+ points = np.array(pse.geometry.vertices)
+ output = pyvista.PolyData(points)
# Now add point data:
add_data(output, pse.data)
add_textures(output, pse.textures, pse.name)
- return pyvista.wrap(output)
+ output.points += np.array(origin)
+ return output
point_set_to_vtk.__displayname__ = 'Point Set to VTK'
diff --git a/omfvista/surface.py b/omfvista/surface.py
index 8d9ab01..a860a08 100644
--- a/omfvista/surface.py
+++ b/omfvista/surface.py
@@ -9,51 +9,31 @@ __all__ = [
__displayname__ = 'Surface'
-import vtk
-from vtk.util import numpy_support as nps
-import pyvista
-import omf
-
-
import numpy as np
+import omf
+import pyvista
from omfvista.utilities import check_orientation, check_orthogonal
from omfvista.utilities import add_data, add_textures
-def surface_geom_to_vtk(surfgeom):
- """Convert the triangulated surface to a :class:`pyvista.UnstructuredGrid`
+
+def surface_geom_to_vtk(surfgeom, origin=(0.0, 0.0, 0.0)):
+ """Convert the triangulated surface to a :class:`pyvista.PolyData`
object
Args:
surfgeom (:class:`omf.surface.SurfaceGeometry`): the surface geomotry to
convert
"""
-
- output = vtk.vtkUnstructuredGrid()
- pts = vtk.vtkPoints()
- cells = vtk.vtkCellArray()
-
- # Generate the points
- pts.SetNumberOfPoints(surfgeom.num_nodes)
- pts.SetData(nps.numpy_to_vtk(surfgeom.vertices))
-
- # Generate the triangle cells
- cellConn = surfgeom.triangles.array
- cellsMat = np.concatenate(
- (np.ones((cellConn.shape[0], 1), dtype=np.int64)*cellConn.shape[1], cellConn),
- axis=1).ravel()
- cells = vtk.vtkCellArray()
- cells.SetNumberOfCells(cellConn.shape[0])
- cells.SetCells(cellConn.shape[0],
- nps.numpy_to_vtk(cellsMat, deep=True, array_type=vtk.VTK_ID_TYPE))
-
- # Add to output
- output.SetPoints(pts)
- output.SetCells(vtk.VTK_TRIANGLE, cells)
- return pyvista.wrap(output)
+ pts = np.array(surfgeom.vertices)
+ tris = np.array(surfgeom.triangles.array)
+ faces = np.c_[np.full(len(tris), 3), tris]
+ output = pyvista.PolyData(pts, faces)
+ output.points += np.array(origin)
+ return output
-def surface_grid_geom_to_vtk(surfgridgeom):
+def surface_grid_geom_to_vtk(surfgridgeom, origin=(0.0, 0.0, 0.0)):
"""Convert the 2D grid to a :class:`pyvista.StructuredGrid` object.
Args:
@@ -63,8 +43,6 @@ def surface_grid_geom_to_vtk(surfgridgeom):
"""
surfgridgeom._validate_mesh()
- output = vtk.vtkStructuredGrid()
-
axis_u = np.array(surfgridgeom.axis_u)
axis_v = np.array(surfgridgeom.axis_v)
axis_w = np.cross(axis_u, axis_v)
@@ -81,8 +59,6 @@ def surface_grid_geom_to_vtk(surfgridgeom):
z = np.array([oz])
- output.SetDimensions(len(x), len(y), len(z))
-
# Build out all nodes in the mesh
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
xx, yy, zz, = xx.ravel('F'), yy.ravel('F'), zz.ravel('F')
@@ -92,16 +68,16 @@ def surface_grid_geom_to_vtk(surfgridgeom):
# Rotate the points based on the axis orientations
points = points.dot(rotation_mtx)
- # Convert points to vtk object
- pts = vtk.vtkPoints()
- pts.SetNumberOfPoints(len(points))
- pts.SetData(nps.numpy_to_vtk(points))
# Now build the output
- output.SetPoints(pts)
+ output = pyvista.StructuredGrid()
+ output.points = points
+ output.dimensions = len(x), len(y), len(z)
+
+ output.points += np.array(origin)
+ return output
- return pyvista.wrap(output)
-def surface_to_vtk(surfel):
+def surface_to_vtk(surfel, origin=(0.0, 0.0, 0.0)):
"""Convert the surface to a its appropriate VTK data object type.
Args:
@@ -116,7 +92,7 @@ def surface_to_vtk(surfel):
elif isinstance(geom, omf.surface.SurfaceGridGeometry):
builder = surface_grid_geom_to_vtk
- output = builder(geom)
+ output = builder(geom, origin=origin)
# Now add point data:
add_data(output, surfel.data)
diff --git a/omfvista/utilities.py b/omfvista/utilities.py
index 0f7490d..30148b0 100644
--- a/omfvista/utilities.py
+++ b/omfvista/utilities.py
@@ -7,8 +7,6 @@ __all__ = [
import numpy as np
-import vtk
-from vtk.util import numpy_support as nps
import pyvista
from PIL import Image
@@ -42,14 +40,7 @@ def check_orthogonal(axis_u, axis_v, axis_w):
def add_data(output, data):
"""Adds data arrays to an output VTK data object"""
for d in data:
- arr = d.array.array
- c = nps.numpy_to_vtk(num_array=arr)
- c.SetName(d.name)
- loc = d.location
- if loc == 'vertices':
- output.GetPointData().AddArray(c)
- else:
- output.GetCellData().AddArray(c)
+ output[d.name] = np.array(d.array.array)
return output
@@ -60,14 +51,8 @@ def add_textures(output, textures, elname):
for i, tex in enumerate(textures):
# Now map the coordinates for the texture
- m = vtk.vtkTextureMapToPlane()
- m.SetInputDataObject(output)
- m.SetOrigin(tex.origin)
- m.SetPoint1(tex.origin + tex.axis_u)
- m.SetPoint2(tex.origin + tex.axis_v)
- m.Update()
+ tmp = output.texture_map_to_plane(origin=tex.origin, point_u=tex.origin + tex.axis_u, point_v=tex.origin + tex.axis_v)
# Grab the texture coordinates
- tmp = m.GetOutputDataObject(0)
tcoord = tmp.GetPointData().GetTCoords()
name = tex.name
if name is None or name == '':
diff --git a/omfvista/volume.py b/omfvista/volume.py
index e1176de..7334a1b 100644
--- a/omfvista/volume.py
+++ b/omfvista/volume.py
@@ -8,21 +8,18 @@ __all__ = [
__displayname__ = 'Volume'
-import vtk
-from vtk.util import numpy_support as nps
+import numpy as np
import pyvista
from omfvista.utilities import check_orientation, check_orthogonal
-import numpy as np
-
def get_volume_shape(vol):
"""Returns the shape of a gridded volume"""
return ( len(vol.tensor_u), len(vol.tensor_v), len(vol.tensor_w))
-def volume_grid_geom_to_vtk(volgridgeom):
+def volume_grid_geom_to_vtk(volgridgeom, origin=(0.0, 0.0, 0.0)):
"""Convert the 3D gridded volume to a :class:`pyvista.StructuredGrid`
(or a :class:`pyvista.RectilinearGrid` when apprropriate) object contatining
the 2D surface.
@@ -45,17 +42,9 @@ def volume_grid_geom_to_vtk(volgridgeom):
# If axis orientations are standard then use a vtkRectilinearGrid
if check_orientation(volgridgeom.axis_u, volgridgeom.axis_v, volgridgeom.axis_w):
- output = vtk.vtkRectilinearGrid()
- output.SetDimensions(len(x), len(y), len(z)) # note this subtracts 1
- output.SetXCoordinates(nps.numpy_to_vtk(num_array=x))
- output.SetYCoordinates(nps.numpy_to_vtk(num_array=y))
- output.SetZCoordinates(nps.numpy_to_vtk(num_array=z))
- return pyvista.wrap(output)
+ return pyvista.RectilinearGrid(x + origin[0], y + origin[1], z + origin[2])
# Otherwise use a vtkStructuredGrid
- output = vtk.vtkStructuredGrid()
- output.SetDimensions(len(x), len(y), len(z)) # note this subtracts 1
-
# Build out all nodes in the mesh
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
points = np.c_[xx.ravel('F'), yy.ravel('F'), zz.ravel('F')]
@@ -64,17 +53,14 @@ def volume_grid_geom_to_vtk(volgridgeom):
rotation_mtx = np.array([volgridgeom.axis_u, volgridgeom.axis_v, volgridgeom.axis_w])
points = points.dot(rotation_mtx)
- # Convert points to vtk object
- pts = vtk.vtkPoints()
- pts.SetNumberOfPoints(len(points))
- pts.SetData(nps.numpy_to_vtk(points))
- # Now build the output
- output.SetPoints(pts)
-
- return pyvista.wrap(output)
+ output = pyvista.StructuredGrid()
+ output.points = points
+ output.dimensions = len(x), len(y), len(z)
+ output.points += np.array(origin)
+ return output
-def volume_to_vtk(volelement):
+def volume_to_vtk(volelement, origin=(0.0, 0.0, 0.0)):
"""Convert the volume element to a VTK data object.
Args:
@@ -82,20 +68,14 @@ def volume_to_vtk(volelement):
convert
"""
- output = volume_grid_geom_to_vtk(volelement.geometry)
+ output = volume_grid_geom_to_vtk(volelement.geometry, origin=origin)
shp = get_volume_shape(volelement.geometry)
# Add data to output
for data in volelement.data:
arr = data.array.array
arr = np.reshape(arr, shp).flatten(order='F')
- c = nps.numpy_to_vtk(num_array=arr, deep=True)
- c.SetName(data.name)
- loc = data.location
- if loc == 'vertices':
- output.GetPointData().AddArray(c)
- else:
- output.GetCellData().AddArray(c)
- return pyvista.wrap(output)
+ output[data.name] = arr
+ return output
# Now set up the display names for the docs
diff --git a/omfvista/wrapper.py b/omfvista/wrapper.py
index 5566a16..e5c2147 100644
--- a/omfvista/wrapper.py
+++ b/omfvista/wrapper.py
@@ -50,12 +50,14 @@ __all__ = [
__displayname__ = 'Wrapper'
+import numpy as np
import omf
-import omfvista
import pyvista
+import omfvista
+
-def wrap(data):
+def wrap(data, origin=(0.0, 0.0, 0.0)):
"""Wraps the OMF data object/project as a VTK data object. This is the
primary function that an end user will harness.
@@ -100,7 +102,10 @@ def wrap(data):
# get the class name
key = data.__class__.__name__
try:
- return wrappers[key](data)
+ if key != 'Project':
+ return wrappers[key](data, origin=origin)
+ else:
+ return wrappers[key](data)
except KeyError:
raise RuntimeError('Data of type ({}) is not supported currently.'.format(key))
@@ -111,8 +116,9 @@ def project_to_vtk(project):
"""
# Iterate over the elements and add converted VTK objects a MultiBlock
data = pyvista.MultiBlock()
+ origin = np.array(project.origin)
for i, e in enumerate(project.elements):
- d = omfvista.wrap(e)
+ d = omfvista.wrap(e, origin=origin)
data[i, e.name] = d
return data
| OpenGeoVis/omfvista | 1fdbaa786e7dd632675b34c348925add9b39c521 | diff --git a/tests/element_test.py b/tests/element_test.py
index 99fcc74..4cf5fdc 100644
--- a/tests/element_test.py
+++ b/tests/element_test.py
@@ -222,7 +222,7 @@ class TestElements(unittest.TestCase):
def test_wrap_surface(self):
surf = omfvista.wrap(SURFACE)
- self.assertTrue(isinstance(surf, pyvista.UnstructuredGrid))
+ self.assertTrue(isinstance(surf, pyvista.PolyData))
self.assertEqual(surf.n_arrays, len(SURFACE.data))
self.assertEqual(surf.n_cells, SURFACE.geometry.num_cells)
self.assertEqual(surf.n_points, SURFACE.geometry.num_nodes)
| omf Project origin value is ignored.
Great library.
According to the [omf docs](https://omf.readthedocs.io/en/latest/content/projects.html) all geometry should be offset by the Project origin. omfvista does not appear to do this.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/element_test.py::TestElements::test_wrap_pointset",
"tests/element_test.py::TestElements::test_wrap_surface",
"tests/element_test.py::TestElements::test_wrap_volume"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-03T05:12:40Z" | bsd-3-clause |
|
OpenMDAO__dymos-1018 | diff --git a/dymos/utils/introspection.py b/dymos/utils/introspection.py
index 73497d24..775fa29b 100644
--- a/dymos/utils/introspection.py
+++ b/dymos/utils/introspection.py
@@ -227,7 +227,10 @@ def _configure_constraint_introspection(phase):
con['shape'] = control_shape
con['units'] = control_units if con['units'] is None else con['units']
- con['constraint_path'] = f'timeseries.{prefix}{var}'
+ if birkhoff and constraint_type in ('initial', 'final'):
+ con['constraint_path'] = f'boundary_vals.{var}'
+ else:
+ con['constraint_path'] = f'timeseries.{prefix}{var}'
elif var_type in ['indep_polynomial_control', 'input_polynomial_control']:
prefix = 'polynomial_controls:' if phase.timeseries_options['use_prefix'] else ''
@@ -235,7 +238,10 @@ def _configure_constraint_introspection(phase):
control_units = phase.polynomial_control_options[var]['units']
con['shape'] = control_shape
con['units'] = control_units if con['units'] is None else con['units']
- con['constraint_path'] = f'timeseries.{prefix}{var}'
+ if birkhoff and constraint_type in ('initial', 'final'):
+ con['constraint_path'] = f'boundary_vals.{var}'
+ else:
+ con['constraint_path'] = f'timeseries.{prefix}{var}'
elif var_type == 'control_rate':
prefix = 'control_rates:' if phase.timeseries_options['use_prefix'] else ''
@@ -245,7 +251,10 @@ def _configure_constraint_introspection(phase):
con['shape'] = control_shape
con['units'] = get_rate_units(control_units, time_units, deriv=1) \
if con['units'] is None else con['units']
- con['constraint_path'] = f'timeseries.{prefix}{var}'
+ if birkhoff and constraint_type in ('initial', 'final'):
+ con['constraint_path'] = f'boundary_vals.{var}'
+ else:
+ con['constraint_path'] = f'timeseries.{prefix}{var}'
elif var_type == 'control_rate2':
prefix = 'control_rates:' if phase.timeseries_options['use_prefix'] else ''
@@ -255,7 +264,10 @@ def _configure_constraint_introspection(phase):
con['shape'] = control_shape
con['units'] = get_rate_units(control_units, time_units, deriv=2) \
if con['units'] is None else con['units']
- con['constraint_path'] = f'timeseries.{prefix}{var}'
+ if birkhoff and constraint_type in ('initial', 'final'):
+ con['constraint_path'] = f'boundary_vals.{var}'
+ else:
+ con['constraint_path'] = f'timeseries.{prefix}{var}'
elif var_type == 'polynomial_control_rate':
prefix = 'polynomial_control_rates:' if phase.timeseries_options['use_prefix'] else ''
@@ -265,7 +277,10 @@ def _configure_constraint_introspection(phase):
con['shape'] = control_shape
con['units'] = get_rate_units(control_units, time_units, deriv=1) \
if con['units'] is None else con['units']
- con['constraint_path'] = f'timeseries.{prefix}{var}'
+ if birkhoff and constraint_type in ('initial', 'final'):
+ con['constraint_path'] = f'boundary_vals.{var}'
+ else:
+ con['constraint_path'] = f'timeseries.{prefix}{var}'
elif var_type == 'polynomial_control_rate2':
prefix = 'polynomial_control_rates:' if phase.timeseries_options['use_prefix'] else ''
@@ -275,7 +290,10 @@ def _configure_constraint_introspection(phase):
con['shape'] = control_shape
con['units'] = get_rate_units(control_units, time_units, deriv=2) \
if con['units'] is None else con['units']
- con['constraint_path'] = f'timeseries.{prefix}{var}'
+ if birkhoff and constraint_type in ('initial', 'final'):
+ con['constraint_path'] = f'boundary_vals.{var}'
+ else:
+ con['constraint_path'] = f'timeseries.{prefix}{var}'
elif var_type == 'timeseries_exec_comp_output':
con['shape'] = (1,)
@@ -290,7 +308,11 @@ def _configure_constraint_introspection(phase):
con['shape'] = meta['shape']
con['units'] = meta['units']
- con['constraint_path'] = f'timeseries.{con["constraint_name"]}'
+
+ if birkhoff and constraint_type in ('initial', 'final'):
+ con['constraint_path'] = f'boundary_vals.{var}'
+ else:
+ con['constraint_path'] = f'timeseries.{con["constraint_name"]}'
def configure_controls_introspection(control_options, ode, time_units='s'):
| OpenMDAO/dymos | 37bb5dd5e11467af36d0364d771ed7be4dbc18cc | diff --git a/dymos/examples/brachistochrone/test/test_brachistochrone_birkhoff_constraints.py b/dymos/examples/brachistochrone/test/test_brachistochrone_birkhoff_constraints.py
new file mode 100644
index 00000000..f47c5cfe
--- /dev/null
+++ b/dymos/examples/brachistochrone/test/test_brachistochrone_birkhoff_constraints.py
@@ -0,0 +1,251 @@
+import unittest
+
+import openmdao.api as om
+import dymos as dm
+
+from openmdao.utils.assert_utils import assert_near_equal
+from openmdao.utils.testing_utils import use_tempdirs
+from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
+
+
+@use_tempdirs
+class TestBrachistochroneBirkhoffConstraints(unittest.TestCase):
+
+ def test_brachistochrone_control_prefix(self):
+
+ p = om.Problem(model=om.Group())
+
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.options['optimizer'] = 'SLSQP'
+ p.driver.declare_coloring(tol=1.0E-12)
+
+ grid = dm.BirkhoffGrid(num_segments=1, nodes_per_seg=25, grid_type='lgl')
+ tx = dm.Birkhoff(grid=grid)
+
+ traj = dm.Trajectory()
+ phase = dm.Phase(ode_class=BrachistochroneODE, transcription=tx)
+ phase.timeseries_options['use_prefix'] = True
+ p.model.add_subsystem('traj', traj)
+ traj.add_phase('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=False)
+ phase.add_state('y', fix_initial=True, fix_final=False)
+
+ # Note that by omitting the targets here Dymos will automatically attempt to connect
+ # to a top-level input named 'v' in the ODE, and connect to nothing if it's not found.
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_control('theta',
+ continuity=True, rate_continuity=True,
+ units='deg')
+
+ phase.add_parameter('g', targets=['g'], units='m/s**2')
+
+ phase.add_path_constraint('theta', lower=0.01, upper=179.9)
+ phase.add_boundary_constraint('theta', loc='final', lower=0.01, upper=179.9)
+
+ phase.add_boundary_constraint('x', loc='final', equals=10)
+ phase.add_boundary_constraint('y', loc='final', equals=5)
+ # Minimize time at the end of the phase
+ phase.add_objective('time_phase', loc='final', scaler=10)
+
+ p.set_solver_print(0)
+
+ p.setup()
+
+ p['traj.phase0.t_initial'] = 0.0
+ p['traj.phase0.t_duration'] = 1.5
+
+ p['traj.phase0.initial_states:x'] = 0.0
+ p['traj.phase0.initial_states:y'] = 10.0
+ p['traj.phase0.initial_states:v'] = 0.0
+
+ p['traj.phase0.states:x'] = phase.interp('x', [0, 10])
+ p['traj.phase0.states:y'] = phase.interp('y', [10, 5])
+ p['traj.phase0.states:v'] = phase.interp('v', [0, 9.9])
+ p['traj.phase0.controls:theta'] = phase.interp('theta', [5, 100])
+ p['traj.phase0.parameters:g'] = 9.80665
+
+ p.run_driver()
+ assert_near_equal(p.get_val('traj.phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-4)
+
+ def test_brachistochrone_control_no_prefix(self):
+
+ p = om.Problem(model=om.Group())
+
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.options['optimizer'] = 'SLSQP'
+ p.driver.declare_coloring(tol=1.0E-12)
+
+ grid = dm.BirkhoffGrid(num_segments=1, nodes_per_seg=25, grid_type='lgl')
+ tx = dm.Birkhoff(grid=grid)
+
+ traj = dm.Trajectory()
+ phase = dm.Phase(ode_class=BrachistochroneODE, transcription=tx)
+ phase.timeseries_options['use_prefix'] = False
+ p.model.add_subsystem('traj', traj)
+ traj.add_phase('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=False)
+ phase.add_state('y', fix_initial=True, fix_final=False)
+
+ # Note that by omitting the targets here Dymos will automatically attempt to connect
+ # to a top-level input named 'v' in the ODE, and connect to nothing if it's not found.
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_control('theta',
+ continuity=True, rate_continuity=True,
+ units='deg')
+
+ phase.add_parameter('g', targets=['g'], units='m/s**2')
+
+ phase.add_path_constraint('theta', lower=0.01, upper=179.9)
+ phase.add_boundary_constraint('theta', loc='final', lower=0.01, upper=179.9)
+
+ phase.add_boundary_constraint('x', loc='final', equals=10)
+ phase.add_boundary_constraint('y', loc='final', equals=5)
+ # Minimize time at the end of the phase
+ phase.add_objective('time_phase', loc='final', scaler=10)
+
+ p.set_solver_print(0)
+
+ p.setup()
+
+ p['traj.phase0.t_initial'] = 0.0
+ p['traj.phase0.t_duration'] = 1.5
+
+ p['traj.phase0.initial_states:x'] = 0.0
+ p['traj.phase0.initial_states:y'] = 10.0
+ p['traj.phase0.initial_states:v'] = 0.0
+
+ p['traj.phase0.states:x'] = phase.interp('x', [0, 10])
+ p['traj.phase0.states:y'] = phase.interp('y', [10, 5])
+ p['traj.phase0.states:v'] = phase.interp('v', [0, 9.9])
+ p['traj.phase0.controls:theta'] = phase.interp('theta', [5, 100])
+ p['traj.phase0.parameters:g'] = 9.80665
+
+ p.run_driver()
+ assert_near_equal(p.get_val('traj.phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-4)
+
+ def test_brachistochrone_ode_prefix(self):
+
+ p = om.Problem(model=om.Group())
+
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.options['optimizer'] = 'SLSQP'
+ p.driver.declare_coloring(tol=1.0E-12)
+
+ grid = dm.BirkhoffGrid(num_segments=1, nodes_per_seg=25, grid_type='lgl')
+ tx = dm.Birkhoff(grid=grid)
+
+ traj = dm.Trajectory()
+ phase = dm.Phase(ode_class=BrachistochroneODE, transcription=tx)
+ phase.timeseries_options['use_prefix'] = True
+ p.model.add_subsystem('traj', traj)
+ traj.add_phase('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=False)
+ phase.add_state('y', fix_initial=True, fix_final=False)
+
+ # Note that by omitting the targets here Dymos will automatically attempt to connect
+ # to a top-level input named 'v' in the ODE, and connect to nothing if it's not found.
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_control('theta',
+ continuity=True, rate_continuity=True,
+ units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', targets=['g'], units='m/s**2')
+
+ phase.add_boundary_constraint('x', loc='final', equals=10)
+ phase.add_boundary_constraint('y', loc='final', equals=5)
+ phase.add_boundary_constraint('check', loc='final', lower=-50, upper=50)
+ phase.add_path_constraint('check', upper=100, lower=-100)
+ # Minimize time at the end of the phase
+ phase.add_objective('time_phase', loc='final', scaler=10)
+
+ p.set_solver_print(0)
+
+ p.setup()
+
+ p['traj.phase0.t_initial'] = 0.0
+ p['traj.phase0.t_duration'] = 1.5
+
+ p['traj.phase0.initial_states:x'] = 0.0
+ p['traj.phase0.initial_states:y'] = 10.0
+ p['traj.phase0.initial_states:v'] = 0.0
+
+ p['traj.phase0.states:x'] = phase.interp('x', [0, 10])
+ p['traj.phase0.states:y'] = phase.interp('y', [10, 5])
+ p['traj.phase0.states:v'] = phase.interp('v', [0, 9.9])
+ p['traj.phase0.controls:theta'] = phase.interp('theta', [5, 100])
+ p['traj.phase0.parameters:g'] = 9.80665
+
+ p.run_driver()
+ assert_near_equal(p.get_val('traj.phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-4)
+
+ def test_brachistochrone_ode_no_prefix(self):
+
+ p = om.Problem(model=om.Group())
+
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.options['optimizer'] = 'SLSQP'
+ p.driver.declare_coloring(tol=1.0E-12)
+
+ grid = dm.BirkhoffGrid(num_segments=1, nodes_per_seg=25, grid_type='lgl')
+ tx = dm.Birkhoff(grid=grid)
+
+ traj = dm.Trajectory()
+ phase = dm.Phase(ode_class=BrachistochroneODE, transcription=tx)
+ phase.timeseries_options['use_prefix'] = False
+ p.model.add_subsystem('traj', traj)
+ traj.add_phase('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=False)
+ phase.add_state('y', fix_initial=True, fix_final=False)
+
+ # Note that by omitting the targets here Dymos will automatically attempt to connect
+ # to a top-level input named 'v' in the ODE, and connect to nothing if it's not found.
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_control('theta',
+ continuity=True, rate_continuity=True,
+ units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', targets=['g'], units='m/s**2')
+
+ phase.add_boundary_constraint('x', loc='final', equals=10)
+ phase.add_boundary_constraint('y', loc='final', equals=5)
+ phase.add_boundary_constraint('check', loc='final', lower=-50, upper=50)
+ phase.add_path_constraint('check', upper=100, lower=-100)
+
+ # Minimize time at the end of the phase
+ phase.add_objective('time_phase', loc='final', scaler=10)
+
+ p.set_solver_print(0)
+
+ p.setup()
+
+ p['traj.phase0.t_initial'] = 0.0
+ p['traj.phase0.t_duration'] = 1.5
+
+ p['traj.phase0.initial_states:x'] = 0.0
+ p['traj.phase0.initial_states:y'] = 10.0
+ p['traj.phase0.initial_states:v'] = 0.0
+
+ p['traj.phase0.states:x'] = phase.interp('x', [0, 10])
+ p['traj.phase0.states:y'] = phase.interp('y', [10, 5])
+ p['traj.phase0.states:v'] = phase.interp('v', [0, 9.9])
+ p['traj.phase0.controls:theta'] = phase.interp('theta', [5, 100])
+ p['traj.phase0.parameters:g'] = 9.80665
+
+ p.run_driver()
+ assert_near_equal(p.get_val('traj.phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-4)
| Overlapping constraints in Birkhoff transcription
### Description
Adding a boundary constraint and a path constraint for variables of any type other than state causes the error.
The lines of code when added to the brachistochrone problem throw the error
`RuntimeError: <model> <class Group>: Indices for aliases ['traj.phases.phase0->final_boundary_constraint->theta', 'traj.phases.phase0->path_constraint->theta'] are overlapping constraint/objective 'traj.phases.phase0.timeseries.timeseries_comp.theta'.`
### Example
phase.add_path_constraint('theta', lower=0.01, upper=179.9)
phase.add_boundary_constraint('theta', loc='final', lower=0.01, upper=179.9)
### Dymos Version
1.9.2.dev0
### Relevant environment information
_No response_ | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"dymos/examples/brachistochrone/test/test_brachistochrone_birkhoff_constraints.py::TestBrachistochroneBirkhoffConstraints::test_brachistochrone_control_no_prefix",
"dymos/examples/brachistochrone/test/test_brachistochrone_birkhoff_constraints.py::TestBrachistochroneBirkhoffConstraints::test_brachistochrone_control_prefix",
"dymos/examples/brachistochrone/test/test_brachistochrone_birkhoff_constraints.py::TestBrachistochroneBirkhoffConstraints::test_brachistochrone_ode_no_prefix",
"dymos/examples/brachistochrone/test/test_brachistochrone_birkhoff_constraints.py::TestBrachistochroneBirkhoffConstraints::test_brachistochrone_ode_prefix"
] | [] | {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-11-15T17:47:13Z" | apache-2.0 |
|
OpenMDAO__dymos-1021 | diff --git a/dymos/transcriptions/pseudospectral/birkhoff.py b/dymos/transcriptions/pseudospectral/birkhoff.py
index 08d3aabb..df914ead 100644
--- a/dymos/transcriptions/pseudospectral/birkhoff.py
+++ b/dymos/transcriptions/pseudospectral/birkhoff.py
@@ -211,34 +211,24 @@ class Birkhoff(TranscriptionBase):
"""
super(Birkhoff, self).configure_polynomial_controls(phase)
- ode_inputs = get_promoted_vars(self._get_ode(phase), 'input')
-
for name, options in phase.polynomial_control_options.items():
- targets = get_targets(ode=ode_inputs, name=name, user_targets=options['targets'])
- if targets:
- phase.connect(f'polynomial_control_values:{name}',
- [f'ode_all.{t}' for t in targets])
- phase.connect(f'polynomial_control_values:{name}',
- [f'boundary_vals.{t}' for t in targets],
+ if options['targets']:
+ phase.connect(f'polynomial_control_values:{name}', [f'ode_all.{t}' for t in options['targets']])
+ phase.connect(f'polynomial_control_values:{name}', [f'boundary_vals.{t}' for t in options['targets']],
src_indices=om.slicer[[0, -1], ...])
- targets = get_targets(ode=phase.ode_all, name=f'{name}_rate',
- user_targets=options['rate_targets'])
- if targets:
+ if options['rate_targets']:
phase.connect(f'polynomial_control_rates:{name}_rate',
- [f'ode_all.{t}' for t in targets])
+ [f'ode_all.{t}' for t in options['rate_targets']])
phase.connect(f'polynomial_control_rates:{name}_rate',
- [f'boundary_vals.{t}' for t in targets],
+ [f'boundary_vals.{t}' for t in options['rate_targets']],
src_indices=om.slicer[[0, -1], ...])
- targets = get_targets(ode=phase.ode_all, name=f'{name}_rate2',
- user_targets=options['rate2_targets'],
- src_indices=om.slicer[[0, -1], ...])
- if targets:
+ if options['rate2_targets']:
phase.connect(f'polynomial_control_rates:{name}_rate2',
- [f'ode_all.{t}' for t in targets])
+ [f'ode_all.{t}' for t in options['rate2_targets']])
phase.connect(f'polynomial_control_rates:{name}_rate2',
- [f'boundary_vals.{t}' for t in targets],
+ [f'boundary_vals.{t}' for t in options['rate2_targets']],
src_indices=om.slicer[[0, -1], ...])
def setup_ode(self, phase):
| OpenMDAO/dymos | bc67ea54d56a77a9ba0259ed85aa7f4c14d5c320 | diff --git a/dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py b/dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py
index d5a87e48..e3047f54 100644
--- a/dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py
+++ b/dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py
@@ -177,6 +177,99 @@ class TestBrachistochronePolynomialControl(unittest.TestCase):
plt.show()
+ def test_brachistochrone_polynomial_control_birkhoff(self):
+ import numpy as np
+ import matplotlib
+ matplotlib.use('Agg')
+ import matplotlib.pyplot as plt
+ import openmdao.api as om
+ from openmdao.utils.assert_utils import assert_near_equal
+ import dymos as dm
+ from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
+
+ p = om.Problem(model=om.Group())
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.declare_coloring()
+
+ phase = dm.Phase(ode_class=BrachistochroneODE,
+ transcription=dm.Birkhoff(grid=dm.BirkhoffGrid(num_segments=1, nodes_per_seg=15)))
+
+ p.model.add_subsystem('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=True)
+
+ phase.add_state('y', fix_initial=True, fix_final=True)
+
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_polynomial_control('theta', order=1, units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665)
+
+ # Minimize time at the end of the phase
+ phase.add_objective('time', loc='final', scaler=10)
+
+ p.model.linear_solver = om.DirectSolver()
+
+ p.setup()
+
+ p['phase0.t_initial'] = 0.0
+ p['phase0.t_duration'] = 2.0
+
+ p.set_val('phase0.initial_states:x', 0.0)
+ p.set_val('phase0.initial_states:y', 10.0)
+ p.set_val('phase0.initial_states:v', 0.0)
+ p.set_val('phase0.final_states:x', 10.0)
+ p.set_val('phase0.final_states:y', 5.0)
+ p.set_val('phase0.final_states:v', 9.9)
+
+ p.set_val('phase0.states:x', phase.interp('x', [0, 10]))
+ p.set_val('phase0.states:y', phase.interp('y', [10, 5]))
+ p.set_val('phase0.states:v', phase.interp('v', [0, 9.9]))
+ p.set_val('phase0.polynomial_controls:theta', phase.interp('theta', [5, 100]))
+
+ # Solve for the optimal trajectory
+ p.run_driver()
+
+ # Test the results
+ assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)
+
+ # Generate the explicitly simulated trajectory
+ exp_out = phase.simulate()
+
+ fig, ax = plt.subplots()
+ fig.suptitle('Brachistochrone Solution')
+
+ x_imp = p.get_val('phase0.timeseries.x')
+ y_imp = p.get_val('phase0.timeseries.y')
+
+ x_exp = exp_out.get_val('phase0.timeseries.x')
+ y_exp = exp_out.get_val('phase0.timeseries.y')
+
+ ax.plot(x_imp, y_imp, 'ro', label='solution')
+ ax.plot(x_exp, y_exp, 'b-', label='simulated')
+
+ ax.set_xlabel('x (m)')
+ ax.set_ylabel('y (m)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ fig, ax = plt.subplots()
+
+ t_imp = p.get_val('phase0.timeseries.time')
+ theta_imp = p.get_val('phase0.timeseries.theta')
+
+ ax.plot(t_imp, theta_imp, 'ro', label='solution')
+
+ ax.set_xlabel('time (s)')
+ ax.set_ylabel(r'$\theta$ (deg)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ plt.show()
+
@use_tempdirs
class TestBrachistochronePolynomialControlBoundaryConstrained(unittest.TestCase):
@@ -357,6 +450,102 @@ class TestBrachistochronePolynomialControlBoundaryConstrained(unittest.TestCase)
plt.show()
+ def test_brachistochrone_polynomial_control_birkhoff(self):
+ import numpy as np
+ import matplotlib
+ matplotlib.use('Agg')
+ import matplotlib.pyplot as plt
+ import openmdao.api as om
+ from openmdao.utils.assert_utils import assert_near_equal
+ import dymos as dm
+ from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
+
+ p = om.Problem(model=om.Group())
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.declare_coloring()
+
+ phase = dm.Phase(ode_class=BrachistochroneODE,
+ transcription=dm.Birkhoff(grid=dm.BirkhoffGrid(num_segments=3, nodes_per_seg=5)))
+
+ p.model.add_subsystem('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=True)
+
+ phase.add_state('y', fix_initial=True, fix_final=True)
+
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_polynomial_control('theta', order=1, units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665)
+
+ phase.add_boundary_constraint('theta', loc='initial', lower=0, upper=1.0)
+ phase.add_boundary_constraint('theta', loc='final', upper=105.0)
+
+ # Minimize time at the end of the phase
+ phase.add_objective('time', loc='final', scaler=10)
+
+ p.model.linear_solver = om.DirectSolver()
+
+ p.setup()
+
+ p['phase0.t_initial'] = 0.0
+ p['phase0.t_duration'] = 2.0
+
+ p.set_val('phase0.initial_states:x', 0.0)
+ p.set_val('phase0.initial_states:y', 10.0)
+ p.set_val('phase0.initial_states:v', 0.0)
+ p.set_val('phase0.final_states:x', 10.0)
+ p.set_val('phase0.final_states:y', 5.0)
+ p.set_val('phase0.final_states:v', 9.9)
+
+ p.set_val('phase0.states:x', phase.interp('x', [0, 10]))
+ p.set_val('phase0.states:y', phase.interp('y', [10, 5]))
+ p.set_val('phase0.states:v', phase.interp('v', [0, 9.9]))
+ p.set_val('phase0.polynomial_controls:theta', phase.interp('theta', [1, 100]))
+
+ # Solve for the optimal trajectory
+ p.run_driver()
+
+ # Test the results
+ assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)
+
+ # Generate the explicitly simulated trajectory
+ exp_out = phase.simulate()
+
+ fig, ax = plt.subplots()
+ fig.suptitle('Brachistochrone Solution')
+
+ x_imp = p.get_val('phase0.timeseries.x')
+ y_imp = p.get_val('phase0.timeseries.y')
+
+ x_exp = exp_out.get_val('phase0.timeseries.x')
+ y_exp = exp_out.get_val('phase0.timeseries.y')
+
+ ax.plot(x_imp, y_imp, 'ro', label='solution')
+ ax.plot(x_exp, y_exp, 'b-', label='simulated')
+
+ ax.set_xlabel('x (m)')
+ ax.set_ylabel('y (m)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ fig, ax = plt.subplots()
+
+ t_imp = p.get_val('phase0.timeseries.time')
+ theta_imp = p.get_val('phase0.timeseries.theta')
+
+ ax.plot(t_imp, theta_imp, 'ro', label='solution')
+
+ ax.set_xlabel('time (s)')
+ ax.set_ylabel(r'$\theta$ (deg)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ plt.show()
+
@use_tempdirs
class TestBrachistochronePolynomialControlPathConstrained(unittest.TestCase):
@@ -533,6 +722,101 @@ class TestBrachistochronePolynomialControlPathConstrained(unittest.TestCase):
plt.show()
+ def test_brachistochrone_polynomial_control_birkhoff(self):
+ import numpy as np
+ import matplotlib
+ matplotlib.use('Agg')
+ import matplotlib.pyplot as plt
+ import openmdao.api as om
+ from openmdao.utils.assert_utils import assert_near_equal
+ import dymos as dm
+ from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
+
+ p = om.Problem(model=om.Group())
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.declare_coloring()
+
+ phase = dm.Phase(ode_class=BrachistochroneODE,
+ transcription=dm.Birkhoff(grid=dm.BirkhoffGrid(num_segments=1, nodes_per_seg=15)))
+
+ p.model.add_subsystem('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=True)
+
+ phase.add_state('y', fix_initial=True, fix_final=True)
+
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_polynomial_control('theta', order=1, units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665)
+
+ phase.add_path_constraint('theta', lower=1, upper=120)
+
+ # Minimize time at the end of the phase
+ phase.add_objective('time', loc='final', scaler=10)
+
+ p.model.linear_solver = om.DirectSolver()
+
+ p.setup()
+
+ p['phase0.t_initial'] = 0.0
+ p['phase0.t_duration'] = 2.0
+
+ p.set_val('phase0.initial_states:x', 0.0)
+ p.set_val('phase0.initial_states:y', 10.0)
+ p.set_val('phase0.initial_states:v', 0.0)
+ p.set_val('phase0.final_states:x', 10.0)
+ p.set_val('phase0.final_states:y', 5.0)
+ p.set_val('phase0.final_states:v', 9.9)
+
+ p.set_val('phase0.states:x', phase.interp('x', [0, 10]))
+ p.set_val('phase0.states:y', phase.interp('y', [10, 5]))
+ p.set_val('phase0.states:v', phase.interp('v', [0, 9.9]))
+ p.set_val('phase0.polynomial_controls:theta', phase.interp('theta', [5, 100]))
+
+ # Solve for the optimal trajectory
+ p.run_driver()
+
+ # Test the results
+ assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)
+
+ # Generate the explicitly simulated trajectory
+ exp_out = phase.simulate()
+
+ fig, ax = plt.subplots()
+ fig.suptitle('Brachistochrone Solution')
+
+ x_imp = p.get_val('phase0.timeseries.x')
+ y_imp = p.get_val('phase0.timeseries.y')
+
+ x_exp = exp_out.get_val('phase0.timeseries.x')
+ y_exp = exp_out.get_val('phase0.timeseries.y')
+
+ ax.plot(x_imp, y_imp, 'ro', label='solution')
+ ax.plot(x_exp, y_exp, 'b-', label='simulated')
+
+ ax.set_xlabel('x (m)')
+ ax.set_ylabel('y (m)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ fig, ax = plt.subplots()
+
+ t_imp = p.get_val('phase0.timeseries.time')
+ theta_imp = p.get_val('phase0.timeseries.theta')
+
+ ax.plot(t_imp, theta_imp, 'ro', label='solution')
+
+ ax.set_xlabel('time (s)')
+ ax.set_ylabel(r'$\theta$ (deg)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ plt.show()
+
@use_tempdirs
class TestBrachistochronePolynomialControlRatePathConstrained(unittest.TestCase):
@@ -713,6 +997,101 @@ class TestBrachistochronePolynomialControlRatePathConstrained(unittest.TestCase)
plt.show()
+ def test_brachistochrone_polynomial_control_birkhoff(self):
+ import numpy as np
+ import matplotlib
+ matplotlib.use('Agg')
+ import matplotlib.pyplot as plt
+ import openmdao.api as om
+ from openmdao.utils.assert_utils import assert_near_equal
+ import dymos as dm
+ from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
+
+ p = om.Problem(model=om.Group())
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.declare_coloring()
+
+ phase = dm.Phase(ode_class=BrachistochroneODE,
+ transcription=dm.Birkhoff(grid=dm.BirkhoffGrid(num_segments=1, nodes_per_seg=15)))
+
+ p.model.add_subsystem('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=True)
+
+ phase.add_state('y', fix_initial=True, fix_final=True)
+
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_polynomial_control('theta', order=1, units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665)
+
+ phase.add_path_constraint('theta_rate', lower=0, upper=120)
+
+ # Minimize time at the end of the phase
+ phase.add_objective('time', loc='final', scaler=10)
+
+ p.model.linear_solver = om.DirectSolver()
+
+ p.setup()
+
+ p['phase0.t_initial'] = 0.0
+ p['phase0.t_duration'] = 2.0
+
+ p.set_val('phase0.initial_states:x', 0.0)
+ p.set_val('phase0.initial_states:y', 10.0)
+ p.set_val('phase0.initial_states:v', 0.0)
+ p.set_val('phase0.final_states:x', 10.0)
+ p.set_val('phase0.final_states:y', 5.0)
+ p.set_val('phase0.final_states:v', 9.9)
+
+ p.set_val('phase0.states:x', phase.interp('x', [0, 10]))
+ p.set_val('phase0.states:y', phase.interp('y', [10, 5]))
+ p.set_val('phase0.states:v', phase.interp('v', [0, 9.9]))
+ p.set_val('phase0.polynomial_controls:theta', phase.interp('theta', [5, 100]))
+
+ # Solve for the optimal trajectory
+ p.run_driver()
+
+ # Test the results
+ assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)
+
+ # Generate the explicitly simulated trajectory
+ exp_out = phase.simulate()
+
+ fig, ax = plt.subplots()
+ fig.suptitle('Brachistochrone Solution')
+
+ x_imp = p.get_val('phase0.timeseries.x')
+ y_imp = p.get_val('phase0.timeseries.y')
+
+ x_exp = exp_out.get_val('phase0.timeseries.x')
+ y_exp = exp_out.get_val('phase0.timeseries.y')
+
+ ax.plot(x_imp, y_imp, 'ro', label='solution')
+ ax.plot(x_exp, y_exp, 'b-', label='simulated')
+
+ ax.set_xlabel('x (m)')
+ ax.set_ylabel('y (m)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ fig, ax = plt.subplots()
+
+ t_imp = p.get_val('phase0.timeseries.time')
+ theta_imp = p.get_val('phase0.timeseries.theta')
+
+ ax.plot(t_imp, theta_imp, 'ro', label='solution')
+
+ ax.set_xlabel('time (s)')
+ ax.set_ylabel(r'$\theta$ (deg)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ plt.show()
+
@use_tempdirs
class TestBrachistochronePolynomialControlRate2PathConstrained(unittest.TestCase):
@@ -893,6 +1272,101 @@ class TestBrachistochronePolynomialControlRate2PathConstrained(unittest.TestCase
plt.show()
+ def test_brachistochrone_polynomial_control_birkhoff(self):
+ import numpy as np
+ import matplotlib
+ matplotlib.use('Agg')
+ import matplotlib.pyplot as plt
+ import openmdao.api as om
+ from openmdao.utils.assert_utils import assert_near_equal
+ import dymos as dm
+ from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
+
+ p = om.Problem(model=om.Group())
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.declare_coloring()
+
+ phase = dm.Phase(ode_class=BrachistochroneODE,
+ transcription=dm.Birkhoff(grid=dm.BirkhoffGrid(num_segments=1, nodes_per_seg=15)))
+
+ p.model.add_subsystem('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=True)
+
+ phase.add_state('y', fix_initial=True, fix_final=True)
+
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_polynomial_control('theta', order=1, units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665)
+
+ phase.add_path_constraint('theta_rate2', lower=-0.01, upper=0.01)
+
+ # Minimize time at the end of the phase
+ phase.add_objective('time', loc='final', scaler=10)
+
+ p.model.linear_solver = om.DirectSolver()
+
+ p.setup()
+
+ p['phase0.t_initial'] = 0.0
+ p['phase0.t_duration'] = 2.0
+
+ p.set_val('phase0.initial_states:x', 0.0)
+ p.set_val('phase0.initial_states:y', 10.0)
+ p.set_val('phase0.initial_states:v', 0.0)
+ p.set_val('phase0.final_states:x', 10.0)
+ p.set_val('phase0.final_states:y', 5.0)
+ p.set_val('phase0.final_states:v', 9.9)
+
+ p.set_val('phase0.states:x', phase.interp('x', [0, 10]))
+ p.set_val('phase0.states:y', phase.interp('y', [10, 5]))
+ p.set_val('phase0.states:v', phase.interp('v', [0, 9.9]))
+ p.set_val('phase0.polynomial_controls:theta', phase.interp('theta', [5, 100]))
+
+ # Solve for the optimal trajectory
+ p.run_driver()
+
+ # Test the results
+ assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)
+
+ # Generate the explicitly simulated trajectory
+ exp_out = phase.simulate()
+
+ fig, ax = plt.subplots()
+ fig.suptitle('Brachistochrone Solution')
+
+ x_imp = p.get_val('phase0.timeseries.x')
+ y_imp = p.get_val('phase0.timeseries.y')
+
+ x_exp = exp_out.get_val('phase0.timeseries.x')
+ y_exp = exp_out.get_val('phase0.timeseries.y')
+
+ ax.plot(x_imp, y_imp, 'ro', label='solution')
+ ax.plot(x_exp, y_exp, 'b-', label='simulated')
+
+ ax.set_xlabel('x (m)')
+ ax.set_ylabel('y (m)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ fig, ax = plt.subplots()
+
+ t_imp = p.get_val('phase0.timeseries.time')
+ theta_imp = p.get_val('phase0.timeseries.theta')
+
+ ax.plot(t_imp, theta_imp, 'ro', label='solution')
+
+ ax.set_xlabel('time (s)')
+ ax.set_ylabel(r'$\theta$ (deg)')
+ ax.grid(True)
+ ax.legend(loc='upper right')
+
+ plt.show()
+
@use_tempdirs
class TestBrachistochronePolynomialControlSimulation(unittest.TestCase):
@@ -1011,6 +1485,74 @@ class TestBrachistochronePolynomialControlSimulation(unittest.TestCase):
assert_near_equal(theta_exp[0], theta_imp[0])
assert_near_equal(theta_exp[-1], theta_imp[-1])
+ def test_brachistochrone_polynomial_control_birkhoff(self):
+ import numpy as np
+ import matplotlib
+ matplotlib.use('Agg')
+ import matplotlib.pyplot as plt
+ import openmdao.api as om
+ from openmdao.utils.assert_utils import assert_near_equal
+ import dymos as dm
+ from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
+
+ p = om.Problem(model=om.Group())
+ p.driver = om.ScipyOptimizeDriver()
+ p.driver.declare_coloring()
+
+ phase = dm.Phase(ode_class=BrachistochroneODE,
+ transcription=dm.Birkhoff(grid=dm.BirkhoffGrid(num_segments=1, nodes_per_seg=15)))
+
+ p.model.add_subsystem('phase0', phase)
+
+ phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
+
+ phase.add_state('x', fix_initial=True, fix_final=True)
+
+ phase.add_state('y', fix_initial=True, fix_final=True)
+
+ phase.add_state('v', fix_initial=True, fix_final=False)
+
+ phase.add_polynomial_control('theta', order=1, units='deg', lower=0.01, upper=179.9)
+
+ phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665)
+
+ # Minimize time at the end of the phase
+ phase.add_objective('time', loc='final', scaler=10)
+
+ p.model.linear_solver = om.DirectSolver()
+
+ p.setup()
+
+ p['phase0.t_initial'] = 0.0
+ p['phase0.t_duration'] = 2.0
+
+ p.set_val('phase0.initial_states:x', 0.0)
+ p.set_val('phase0.initial_states:y', 10.0)
+ p.set_val('phase0.initial_states:v', 0.0)
+ p.set_val('phase0.final_states:x', 10.0)
+ p.set_val('phase0.final_states:y', 5.0)
+ p.set_val('phase0.final_states:v', 9.9)
+
+ p.set_val('phase0.states:x', phase.interp('x', [0, 10]))
+ p.set_val('phase0.states:y', phase.interp('y', [10, 5]))
+ p.set_val('phase0.states:v', phase.interp('v', [0, 9.9]))
+ p.set_val('phase0.polynomial_controls:theta', phase.interp('theta', [5, 100]))
+
+ # Solve for the optimal trajectory
+ p.run_driver()
+
+ # Test the results
+ assert_near_equal(p.get_val('phase0.timeseries.time')[-1], 1.8016, tolerance=1.0E-3)
+
+ # Generate the explicitly simulated trajectory
+ exp_out = phase.simulate()
+
+ theta_imp = p.get_val('phase0.timeseries.theta')
+ theta_exp = exp_out.get_val('phase0.timeseries.theta')
+
+ assert_near_equal(theta_exp[0], theta_imp[0])
+ assert_near_equal(theta_exp[-1], theta_imp[-1])
+
if __name__ == '__main__': # pragma: no cover
unittest.main()
| Polynomial controls don't work with Birkhoff transcription
### Description
Attempting to use a polynomial control causes an error during configure. This occurs only in the Birkhoff transcription and only with polynomial controls
`AttributeError: 'Phase' object has no attribute 'ode_all'`
### Example
https://github.com/OpenMDAO/dymos/blob/master/dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py
### Dymos Version
1.9.2.-dev
### Relevant environment information
_No response_ | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControl::test_brachistochrone_polynomial_control_birkhoff",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlBoundaryConstrained::test_brachistochrone_polynomial_control_birkhoff",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlPathConstrained::test_brachistochrone_polynomial_control_birkhoff",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlRatePathConstrained::test_brachistochrone_polynomial_control_birkhoff",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlRate2PathConstrained::test_brachistochrone_polynomial_control_birkhoff",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlSimulation::test_brachistochrone_polynomial_control_birkhoff"
] | [
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControl::test_brachistochrone_polynomial_control_gauss_lobatto",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControl::test_brachistochrone_polynomial_control_radau",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlBoundaryConstrained::test_brachistochrone_polynomial_control_gauss_lobatto",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlBoundaryConstrained::test_brachistochrone_polynomial_control_radau",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlPathConstrained::test_brachistochrone_polynomial_control_gauss_lobatto",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlPathConstrained::test_brachistochrone_polynomial_control_radau",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlRatePathConstrained::test_brachistochrone_polynomial_control_gauss_lobatto",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlRatePathConstrained::test_brachistochrone_polynomial_control_radau",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlRate2PathConstrained::test_brachistochrone_polynomial_control_gauss_lobatto",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlRate2PathConstrained::test_brachistochrone_polynomial_control_radau",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlSimulation::test_brachistochrone_polynomial_control_gauss_lobatto",
"dymos/examples/brachistochrone/test/test_doc_brachistochrone_polynomial_controls.py::TestBrachistochronePolynomialControlSimulation::test_brachistochrone_polynomial_control_radau"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2023-11-16T16:52:49Z" | apache-2.0 |
|
Ouranosinc__xclim-336 | diff --git a/HISTORY.rst b/HISTORY.rst
index 66f7cba4..556b131c 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -5,6 +5,7 @@ History
0.13.x (2020-01-10)
-------------------
* Reverted #311, removing the `_rolling` util function. Added optimal keywords to `rolling()` calls.
+* Fixed `ensembles.create_ensemble` errors for builds against xarray master branch.
0.12.x-beta (2019-11-18)
------------------------
diff --git a/xclim/ensembles.py b/xclim/ensembles.py
index ab602bc4..8080f212 100644
--- a/xclim/ensembles.py
+++ b/xclim/ensembles.py
@@ -7,7 +7,6 @@ from typing import Union
import numpy as np
import pandas as pd
-import scipy
import scipy.stats
import xarray as xr
from sklearn.cluster import KMeans
@@ -314,15 +313,16 @@ def _ens_align_datasets(
if time_flag:
- ds["time"] = xr.decode_cf(ds).time
-
- ds["time"].values = pd.to_datetime(
+ cal1 = xr.decode_cf(ds).time
+ ds.drop_vars("time")
+ ds["time"] = pd.to_datetime(
{
- "year": ds.time.dt.year,
- "month": ds.time.dt.month,
- "day": ds.time.dt.day,
+ "year": cal1.time.dt.year,
+ "month": cal1.time.dt.month,
+ "day": cal1.time.dt.day,
}
- )
+ ).values
+
# if dataset does not have the same time steps pad with nans
if ds.time.min() > time_all.min() or ds.time.max() < time_all.max():
coords = {}
| Ouranosinc/xclim | 26d99831e33e989da59b0a7e2471e66231111784 | diff --git a/tests/test_ensembles.py b/tests/test_ensembles.py
index 55d11020..3bd9d27f 100644
--- a/tests/test_ensembles.py
+++ b/tests/test_ensembles.py
@@ -58,6 +58,7 @@ class TestEnsembleStats:
def test_create_ensemble(self):
ens = ensembles.create_ensemble(self.nc_files_simple)
assert len(ens.realization) == len(self.nc_files_simple)
+ assert len(ens.time) == 151
# create again using xr.Dataset objects
ds_all = []
@@ -94,6 +95,7 @@ class TestEnsembleStats:
assert len(ens.realization) == len(self.nc_files)
assert ens.time.dt.year.min() == 1950
assert ens.time.dt.year.max() == 2100
+ assert len(ens.time) == 151
ii = [i for i, s in enumerate(self.nc_files) if "1970-2050" in s]
# assert padded with nans
| build failing against xarray master
ensembles creation tests seems to be failing with the travis build w/ xarray@master
We might want to take a minute to try to see why and try to get ahead of this... | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_ensembles.py::TestEnsembleStats::test_calc_mean_std_min_max",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc[True]",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc_nans",
"tests/test_ensembles.py::TestEnsembleStats::test_create_ensemble",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc_blocks",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc[False]"
] | [
"tests/test_ensembles.py::TestEnsembleStats::test_no_time",
"tests/test_ensembles.py::TestEnsembleStats::test_checktimes"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2020-01-07T17:41:34Z" | apache-2.0 |
|
Ouranosinc__xclim-339 | diff --git a/HISTORY.rst b/HISTORY.rst
index 2da52984..50dd6f92 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -10,6 +10,7 @@ History
* Fixed `ensembles.create_ensemble` errors for builds against xarray master branch.
* Reformatted code to make better use of Python3.6 conventions (f-strings and object signatures).
* Fixed randomly failing tests of `checks.missing_any`.
+* Improvement of `ensemble.ensemble_percentile` and `ensemble.create_ensemble`.
0.12.x-beta (2019-11-18)
------------------------
diff --git a/xclim/ensembles.py b/xclim/ensembles.py
index ddbccd9a..f333face 100644
--- a/xclim/ensembles.py
+++ b/xclim/ensembles.py
@@ -36,6 +36,7 @@ def create_ensemble(
a new dimension (name:'realization'). In the case where input files have unequal time dimensions, the output
ensemble Dataset is created for maximum time-step interval of all input files. Before concatenation, datasets not
covering the entire time span have their data padded with NaN values.
+ Dataset and variable attributes of the first dataset are copied to the resulting dataset.
Parameters
----------
@@ -48,7 +49,7 @@ def create_ensemble(
Only applicable when "datasets" is a sequence of file paths.
xr_kwargs :
- Any keyword arguments to be given to xarray when opening the files.
+ Any keyword arguments to be given to `xr.open_dataset` when opening the files (or to `xr.open_mfdataset` if mf_flag is True)
Returns
-------
@@ -74,19 +75,16 @@ def create_ensemble(
>>> datasets.append(glob.glob('/dir2/*.nc'))
>>> ens = ensembles.create_ensemble(datasets, mf_flag=True)
"""
-
- dim = "realization"
-
time_flag, time_all = _ens_checktimes(datasets, mf_flag, **xr_kwargs)
- ds1 = _ens_align_datasets(datasets, mf_flag, time_flag, time_all)
+ ds = _ens_align_datasets(datasets, mf_flag, time_flag, time_all, **xr_kwargs)
- for v in list(ds1[0].data_vars):
- list1 = [ds[v] for ds in ds1]
- data = xr.concat(list1, dim=dim)
- if v == list(ds1[0].data_vars)[0]:
- ens = xr.Dataset(data_vars=None, coords=data.coords, attrs=ds1[0].attrs)
- ens[v] = data
+ dim = xr.IndexVariable("realization", np.arange(len(ds)), attrs={"axis": "E"})
+
+ ens = xr.concat(ds, dim)
+ for vname, var in ds[0].variables.items():
+ ens[vname].attrs.update(**var.attrs)
+ ens.attrs.update(**ds[0].attrs)
return ens
@@ -142,12 +140,11 @@ def ensemble_mean_std_max_min(ens: xr.Dataset) -> xr.Dataset:
def ensemble_percentiles(
ens: xr.Dataset,
values: Tuple[int, int, int] = (10, 50, 90),
- time_block: Optional[int] = None,
+ keep_chunk_size: Optional[bool] = None,
) -> xr.Dataset:
"""Calculate ensemble statistics between a results from an ensemble of climate simulations.
- Returns a Dataset containing ensemble statistics for input climate simulations.
- Alternatively calculate ensemble percentiles (default) or ensemble mean and standard deviation.
+ Returns a Dataset containing ensemble percentiles for input climate simulations.
Parameters
----------
@@ -155,9 +152,11 @@ def ensemble_percentiles(
Ensemble dataset (see xclim.ensembles.create_ensemble).
values : Tuple[int, int, int]
Percentile values to calculate. Default: (10, 50, 90).
- time_block : Optional[int]
- For large ensembles, iteratively calculate percentiles in time-step blocks (n==time_block).
- If not defined, the function tries to estimate an appropriate value.
+ keep_chunk_size : Optional[bool]
+ For ensembles using dask arrays, all chunks along the 'realization' axis are merged.
+ If True, the dataset is rechunked along the dimension with the largest chunks, so that the chunks keep the same size (approx)
+ If False, no shrinking is performed, resulting in much larger chunks
+ If not defined, the function decides which is best
Returns
-------
@@ -177,34 +176,58 @@ def ensemble_percentiles(
Calculate non-default percentiles (25th and 75th)
>>> ens_percs = ensembles.ensemble_percentiles(ens, values=(25, 50, 75))
>>> print(ens_percs['tas_p25'])
- Calculate by time blocks (n=10) if ensemble size is too large to load in memory
- >>> ens_percs = ensembles.ensemble_percentiles(ens, time_block=10)
+ If the original array has many small chunks, it might be more efficient to do:
+ >>> ens_percs = ensembles.ensemble_percentiles(ens, keep_chunk_size=False)
>>> print(ens_percs['tas_p25'])
"""
ds_out = ens.drop_vars(names=set(ens.data_vars))
- dims = list(ens.dims)
for v in ens.data_vars:
- # Percentile calculation requires load to memory : automate size for large ensemble objects
- if not time_block:
- time_block = round(
- 2e8 / (ens[v].size / ens[v].shape[dims.index("time")]), -1
- ) # 2E8
-
- if time_block > len(ens[v].time):
- out = _calc_percentiles_simple(ens, v, values)
-
+ # Percentile calculation forbids any chunks along realization
+ if len(ens.chunks.get("realization", [])) > 1:
+ if keep_chunk_size is None:
+ # Enable smart rechunking is chunksize exceed 2E8 elements after merging along realization
+ keep_chunk_size = (
+ np.prod(ens[v].isel(realization=0).data.chunksize)
+ * ens.realization.size
+ > 2e8
+ )
+ if keep_chunk_size:
+ # Smart rechunk on dimension where chunks are the largest
+ chkDim, chks = max(
+ ens.chunks.items(),
+ key=lambda kv: 0 if kv[0] == "realization" else max(kv[1]),
+ )
+ var = ens[v].chunk(
+ {"realization": -1, chkDim: len(chks) * ens.realization.size,}
+ )
+ else:
+ var = ens[v].chunk({"realization": -1})
else:
- # loop through blocks
- warnings.warn(
- f"Large ensemble size detected: statistics will be"
- f" calculated in blocks of {int(time_block)} time-steps.",
- UserWarning,
- stacklevel=2,
+ var = ens[v]
+
+ for p in values:
+ perc = xr.apply_ufunc(
+ _calc_perc,
+ var,
+ input_core_dims=[["realization"]],
+ output_core_dims=[[]],
+ keep_attrs=True,
+ kwargs=dict(p=p),
+ dask="parallelized",
+ output_dtypes=[ens[v].dtype],
)
- out = _calc_percentiles_blocks(ens, v, values, time_block)
- for vv in out.data_vars:
- ds_out[vv] = out[vv]
+
+ perc.name = f"{v}_p{p:02d}"
+ ds_out[perc.name] = perc
+
+ if "description" in ds_out[perc.name].attrs:
+ ds_out[perc.name].attrs[
+ "description"
+ ] = f"{ds_out[perc.name].attrs['description']} : {p}th percentile of ensemble"
+ else:
+ ds_out[perc.name].attrs["description"] = f"{p}th percentile of ensemble"
+
return ds_out
@@ -341,98 +364,33 @@ def _ens_align_datasets(
return ds_all
-def _calc_percentiles_simple(ens, v, values):
- ds_out = ens.drop_vars(names=set(ens.data_vars))
- dims = list(ens[v].dims)
- outdims = [x for x in dims if "realization" not in x]
-
- # print('loading ensemble data to memory')
- arr = ens[v].load() # percentile calc requires loading the array
- coords = {}
- for c in outdims:
- coords[c] = arr[c]
- for p in values:
- outvar = v + "_p" + str(p)
-
- out1 = _calc_perc(arr, p)
-
- ds_out[outvar] = xr.DataArray(out1, dims=outdims, coords=coords)
- ds_out[outvar].attrs = ens[v].attrs
- if "description" in ds_out[outvar].attrs.keys():
- ds_out[outvar].attrs[
- "description"
- ] = f"{ds_out[outvar].attrs['description']} : {p}th percentile of ensemble"
- else:
- ds_out[outvar].attrs["description"] = f"{p}th percentile of ensemble"
- return ds_out
-
-
-def _calc_percentiles_blocks(ens, v, values, time_block):
- ds_out = ens.drop_vars(names=set(ens.data_vars))
- dims = list(ens[v].dims)
- outdims = [x for x in dims if "realization" not in x]
-
- blocks = list(range(0, len(ens.time) + 1, int(time_block)))
- if blocks[-1] != len(ens[v].time):
- blocks.append(len(ens[v].time))
- arr_p_all = {}
- for t in range(0, len(blocks) - 1):
- # print('Calculating block ', t + 1, ' of ', len(blocks) - 1)
- time_sel = slice(blocks[t], blocks[t + 1])
- arr = (
- ens[v].isel(time=time_sel).load()
- ) # percentile calc requires loading the array
- coords = {}
- for c in outdims:
- coords[c] = arr[c]
- for p in values:
-
- out1 = _calc_perc(arr, p)
-
- if t == 0:
- arr_p_all[str(p)] = xr.DataArray(out1, dims=outdims, coords=coords)
- else:
- arr_p_all[str(p)] = xr.concat(
- [
- arr_p_all[str(p)],
- xr.DataArray(out1, dims=outdims, coords=coords),
- ],
- dim="time",
- )
- for p in values:
- outvar = v + "_p" + str(p)
- ds_out[outvar] = arr_p_all[str(p)]
- ds_out[outvar].attrs = ens[v].attrs
- if "description" in ds_out[outvar].attrs.keys():
- ds_out[outvar].attrs[
- "description"
- ] = f"{ds_out[outvar].attrs['description']} : {p}th percentile of ensemble"
+def _calc_perc(arr, p=50):
+ """Ufunc-like computing a percentile over the last axis of the array.
- else:
- ds_out[outvar].attrs["description"] = f"{p}th percentile of ensemble"
+ Processes cases with invalid values separately, which makes it more efficent than np.nanpercentile for array with only a few invalid points.
- return ds_out
-
-
-def _calc_perc(arr, p):
- dims = arr.dims
- # make sure realization is the first dimension
- if dims.index("realization") != 0:
- arr = arr.transpose(
- "realization", *[dim for dim in dims if dim != "realization"]
- )
+ Parameters
+ ----------
+ arr : np.array
+ Percentile is computed over the last axis.
+ p : scalar
+ Percentile to compute, between 0 and 100. (the default is 50)
- nan_count = np.isnan(arr).sum(axis=0)
- out = np.percentile(arr, p, axis=0)
- if np.any((nan_count > 0) & (nan_count < arr.shape[0])):
- arr1 = arr.values.reshape(arr.shape[0], int(arr.size / arr.shape[0]))
- # only use nanpercentile where we need it (slow performace compared to standard) :
- nan_index = np.where((nan_count > 0) & (nan_count < arr.shape[0]))
+ Returns
+ -------
+ np.array
+ """
+ nan_count = np.isnan(arr).sum(axis=-1)
+ out = np.percentile(arr, p, axis=-1)
+ nans = (nan_count > 0) & (nan_count < arr.shape[-1])
+ if np.any(nans):
+ arr1 = arr.reshape(int(arr.size / arr.shape[-1]), arr.shape[-1])
+ # only use nanpercentile where we need it (slow performance compared to standard) :
+ nan_index = np.where(nans)
t = np.ravel_multi_index(nan_index, nan_count.shape)
out[np.unravel_index(t, nan_count.shape)] = np.nanpercentile(
- arr1[:, t], p, axis=0
+ arr1[t, :], p, axis=-1
)
-
return out
| Ouranosinc/xclim | 0790e8c7cc4b5d933a034f1a6da1cdc1072c2e01 | diff --git a/tests/test_ensembles.py b/tests/test_ensembles.py
index 60f94f58..34aa4aa7 100644
--- a/tests/test_ensembles.py
+++ b/tests/test_ensembles.py
@@ -137,10 +137,13 @@ class TestEnsembleStats:
out1 = ensembles.ensemble_percentiles(ens, values=(25, 75))
assert np.all(out1["tg_mean_p75"] > out1["tg_mean_p25"])
- def test_calc_perc_blocks(self):
+ @pytest.mark.parametrize("keep_chunk_size", [False, True, None])
+ def test_calc_perc_dask(self, keep_chunk_size):
ens = ensembles.create_ensemble(self.nc_files_simple)
- out1 = ensembles.ensemble_percentiles(ens)
- out2 = ensembles.ensemble_percentiles(ens, values=(10, 50, 90), time_block=10)
+ out2 = ensembles.ensemble_percentiles(
+ ens.chunk({"time": 2}), values=(10, 50, 90), keep_chunk_size=keep_chunk_size
+ )
+ out1 = ensembles.ensemble_percentiles(ens.load())
np.testing.assert_array_equal(out1["tg_mean_p10"], out2["tg_mean_p10"])
np.testing.assert_array_equal(out1["tg_mean_p50"], out2["tg_mean_p50"])
np.testing.assert_array_equal(out1["tg_mean_p90"], out2["tg_mean_p90"])
| Optimize/improve xclim ensemble stats utilities
There are currently functional versions of the following utilities but they could use improvement / polishing:
```python
xclim.utils.utils.create_ensemble()
xclim.utils.ensemble_percentiles()
xclim.utils.calc_percentiles_simple()
xclim.utils.calc_percentiles_blocks()
xclim.utils.calc_perc()
```
**Ideas to explore:**
For ensemble creation :
- create dimension using IndexVariable (instead of just a string) and make sure that it has attribute axis='E'.
**Try to achieve dask enabled percentile calculation (remove 'time_block' method):**
- try DataArray.reduce(np.nanpercentile, dim='realization') - Check performance (nanpercentile seems slow)
- use DataArray.apply(xclim.utils.calc_perc, dim='realization') - 'calc_perc' only applies nanpercentile where needed ... should technically be faster
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc_dask[False]",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc_dask[True]",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc_dask[None]"
] | [
"tests/test_ensembles.py::TestEnsembleStats::test_checktimes",
"tests/test_ensembles.py::TestEnsembleStats::test_create_ensemble",
"tests/test_ensembles.py::TestEnsembleStats::test_no_time",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc[False]",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc[True]",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_perc_nans",
"tests/test_ensembles.py::TestEnsembleStats::test_calc_mean_std_min_max"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-01-08T23:14:03Z" | apache-2.0 |
|
OxfordIonTrapGroup__oitg-38 | diff --git a/oitg/threshold.py b/oitg/threshold.py
index b74c080..7d4bf82 100644
--- a/oitg/threshold.py
+++ b/oitg/threshold.py
@@ -1,114 +1,156 @@
import numpy as np
-from scipy.optimize import least_squares
+from scipy.optimize import minimize
from scipy.stats import poisson
+from scipy.special import gammainc
-def calc_target_bin_time(bright_rate, dark_rate, p_error_target, p_bright=0.5):
- """Calculate optimal threshold bin time for target error chance
+def optimise_readout(bright_rate, dark_rate, dark_to_bright_rate=(1 / 1.168),
+ p_bright=0.5):
+ """Calculate optimal threshold bin time & threshold count
The calculation assumes both bright and dark counts are
Poisson distributed and gives a threshold minimising the error probability
- The calculation neglects de-shelving and accidental shelving during the
- readout bin time. It is therefore not suitable for P(error) < 2e-4.
+ The calculation accounts for de-shelving during the readout bin time.
See thesis: Alice Burrell, 2010
:param bright_rate: expected bright count rate in $s^-1$
:param dark_rate: expected dark count ratein $s^-1$
- :param p_error_target: target error probability
+ :param dark_to_bright_rate: dark state decay to become bright in $s^-1$.
+ As this function seeks the global minimum error, the rate must not be
+ zero. Default of 1/1.168 is the Calcium D5/2 shelf decay rate.
:param p_bright: probability of encountering a bright state (default=0.5)
- :returns: (target_t_bin [s], threshold_rate [$s^-1$])"""
- def residuals(t_bin):
- return (calc_p_error(bright_rate, dark_rate, t_bin[0], p_bright=p_bright) -
- p_error_target)
+ :returns: (target_t_bin [s], threshold_count, p_error)"""
+ thresh = 1
+ error_last = 1.
+ t_bin, error = optimise_t_bin(
+ bright_rate, dark_rate, thresh, dark_to_bright_rate, p_bright)
+ while error_last > error:
+ error_last = error
+ t_bin_last = t_bin
+ thresh += 1
+ t_bin, error = optimise_t_bin(
+ bright_rate, dark_rate, thresh, dark_to_bright_rate, p_bright)
- t_bin_init = 1e-3
- result = least_squares(residuals,
- t_bin_init,
- bounds=(0, np.inf),
- x_scale=(1e-3, ),
- f_scale=(0.1 * p_error_target)**2)
+ return (t_bin_last, thresh - 1, error_last)
- thresh_rate = calc_thresh_rate(bright_rate,
- dark_rate,
- p_bright=p_bright,
- t_bin=result['x'][0])
- return result['x'][0], thresh_rate
+def optimise_treshold(bright_rate, dark_rate, t_bin, dark_to_bright_rate=0.,
+ p_bright=0.5):
+ """Calculate optimal threshold threshold count for a given bin time
+ The calculation accounts for de-shelving during the readout bin time.
+ See thesis: Alice Burrell, 2010
-def calc_p_error(bright_rate, dark_rate, t_bin, p_bright=0.5):
- """Assumes exact threshold count is evaluated as dark
+ :param bright_rate: expected bright count rate in $s^-1$
+ :param dark_rate: expected dark count ratein $s^-1$
+ :param dark_to_bright_rate: dark state decay to become bright in $s^-1$
+ :param p_bright: probability of encountering a bright state (default=0.5)
- The calculation neglects de-shelving and accidental shelving during the
- readout bin time. It is therefore not suitable for P(error) < 2e-4.
+ :returns: (target_t_bin [s], threshold_count, p_error)"""
+ # de-shelving increases the count rate of dark states (non-poissonian)
+ # The no-de-shelving threshold is therefore a lower bound
+ thresh_min = poisson_optimal_thresh_count(
+ bright_rate * t_bin, dark_rate * t_bin, p_bright)
+ thresh_max = bright_rate * t_bin
+
+ # thresholds are discrete - could implement discreet optimisation, but the
+ # range of values is small in practice. Therefore we can try all options
+ thresh_vec = np.arange(thresh_min, thresh_max + 1, dtype=np.int_)
+ err_vec = np.array([calc_p_error(bright_rate, dark_rate, t_bin,
+ i, dark_to_bright_rate,
+ p_bright=p_bright)
+ for i in thresh_vec])
+
+ min_idx = np.argmin(err_vec)
+ return thresh_vec[min_idx], err_vec[min_idx]
+
+
+def optimise_t_bin(bright_rate, dark_rate, thresh_count, dark_to_bright_rate=0.,
+ p_bright=0.5):
+ """Calculate optimal threshold bin time for a given threshold count
+
+ The calculation accounts for de-shelving during the readout bin time.
See thesis: Alice Burrell, 2010
:param bright_rate: expected bright count rate in $s^-1$
:param dark_rate: expected dark count ratein $s^-1$
+ :param dark_to_bright_rate: dark state decay to become bright in $s^-1$
+ :param p_bright: probability of encountering a bright state (default=0.5)
+
+ :returns: (target_t_bin [s], threshold_count, p_error)"""
+ t_bin_init = thresh_count * (0.5 / dark_rate + 0.5 / bright_rate)
+ t_scale = 1e-4
+ err_scale = 1e-6
+
+ def p_error(x):
+ return calc_p_error(bright_rate, dark_rate, x[0] * t_scale,
+ thresh_count, dark_to_bright_rate,
+ p_bright=p_bright) / err_scale
+
+ result = minimize(p_error,
+ np.array([t_bin_init / t_scale]),
+ bounds=((1 / bright_rate / t_scale, np.inf),))
+
+ return result.x[0] * t_scale, result.fun * err_scale
+
+
+def calc_p_error(bright_rate, dark_rate, t_bin, thresh_count,
+ dark_to_bright_rate=0., p_bright=0.5):
+ """Calculate error probability for Poisson statistics with de-shelving
+
+ See thesis: Alice Burrell, 2010
+
+ :param bright_rate: expected bright count rate in $s^-1$
+ :param dark_rate: expected dark count rate in $s^-1$
:param t_bin: integration time in s.
+ :param thresh_count: threshold count for discriminating bright/dark state
+ (Assumes the exact threshold count is evaluated as dark)
+ :param dark_to_bright_rate: dark state decay to become bright in $s^-1$
:param p_bright: probability of encountering a bright state (default=0.5)
"""
- thresh_rate = calc_thresh_rate(bright_rate,
- dark_rate,
- p_bright=p_bright,
- t_bin=t_bin)
- thresh_count = np.ceil(thresh_rate * t_bin).astype(np.int_)
+ def p_n_given_dark(n):
+ "Burrell Eqns 3.2 & 3.6"
+ if dark_to_bright_rate == 0.:
+ return poisson.pmf(n, mu=dark_rate * t_bin)
+
+ rb_tau = (bright_rate - dark_rate) / dark_to_bright_rate
+ eps = bright_rate * t_bin / rb_tau
+
+ x_n = np.exp(-eps) * np.power(rb_tau / (rb_tau - 1), n) / (rb_tau - 1)
+ x_n *= (gammainc(n + 1, eps * (rb_tau - 1))
+ - gammainc(n + 1, dark_rate * t_bin / rb_tau * (rb_tau - 1)))
+
+ d_n = poisson.pmf(n, mu=dark_rate * t_bin) * np.exp(
+ -t_bin * dark_to_bright_rate) + x_n
+ return d_n
- n_vec = np.arange(thresh_count + 1, dtype=np.int_)
+ # up-to & including thresh_count
+ n_vec = np.arange(np.round(thresh_count) + 1, dtype=np.int_)
- p_error = (1 - p_bright) * (1 -
- np.sum(poisson.pmf(n_vec[:-1], mu=dark_rate * t_bin)))
+ p_error = (1 - p_bright) * (1 - np.sum(p_n_given_dark(n_vec[:-1])))
p_error += p_bright * np.sum(poisson.pmf(n_vec, mu=bright_rate * t_bin))
return p_error
-def calc_thresh_rate(bright_rate, dark_rate, t_bin=1e-3, p_bright=0.5):
- """Optimal threshold rate for distinguishing bright and dark states
+def poisson_optimal_thresh_count(mean_bright, mean_dark, p_bright=0.5):
+ """Optimal threshold rate in the absence of de-shelving
- The calculation assumes both bright and dark counts are
- Poisson distributed and gives a threshold minimising the error probability.
+ The calculation assumes both bright and dark counts are Poisson distributed
+ and gives a threshold minimising the error probability.
The calculation neglects de-shelving and accidental shelving during the
readout bin time. It is therefore not suitable for P(error) < 2e-4.
See thesis: Alice Burrell, 2010
- :param bright_rate: expected bright count rate in $s^-1$
- :param dark_rate: expected dark count ratein $s^-1$
- :param t_bin: integration time in s. Only relevant for p_bright!=0.5
- (default=1e-3)
+ :param mean_bright: expected counts if the ion started in a bright state
+ :param mean_dark: expected counts if the ion started in a dark state
:param p_bright: probability of encountering a bright state
(default=0.5)
- :returns: threshold_rate /$s^-1$
+ :returns: threshold_count
"""
- thresh = np.log(p_bright / (1 - p_bright)) / t_bin + bright_rate - dark_rate
- thresh /= np.log(bright_rate / dark_rate)
- return thresh
-
-
-if __name__ == "__main__":
- test_threshhold = True
- test_calc_p_error = True
- test_bin_time = True
-
- bright = 4e4
- dark = 2e4
- p_bright = 0.5
- error_target = 1e-3
- t_bin = 2e-3
-
- if test_threshhold:
- print(calc_thresh_rate(bright, dark, t_bin=t_bin, p_bright=p_bright))
-
- if test_calc_p_error:
- print("p_error_calc", calc_p_error(bright, dark, t_bin, p_bright))
-
- if test_bin_time:
- t_bin, thresh_rate = calc_target_bin_time(bright,
- dark,
- error_target,
- p_bright=p_bright)
- print("t_bin, thresh_rate", t_bin, thresh_rate)
- print("p_error for this bin", calc_p_error(bright, dark, t_bin, p_bright))
+ thresh = np.log(p_bright / (1 - p_bright)) + mean_bright - mean_dark
+ thresh /= np.log(mean_bright / mean_dark)
+ return np.round(thresh)
| OxfordIonTrapGroup/oitg | 768b2d5be8cf189b192d025877518a227171177f | diff --git a/test/test_threshold.py b/test/test_threshold.py
index 8d9f1dd..af7eb79 100755
--- a/test/test_threshold.py
+++ b/test/test_threshold.py
@@ -3,36 +3,32 @@ from oitg.threshold import *
class ThresholdTest(unittest.TestCase):
- def test_threshold(self):
- bright = 4e4
- dark = 2e4
- t_bin = 2e-3
+ def test_optimise_readout(self):
+ bright_rate = 9.e4
+ dark_rate = 3e4
p_bright = 0.5
- calc_thresh_rate(bright, dark, t_bin=t_bin, p_bright=p_bright)
-
- def test_calc_p_error(self):
- bright = 4e4
- dark = 2e4
- t_bin = 2e-3
- p_bright = 0.5
- calc_p_error(bright, dark, t_bin, p_bright)
-
- def test_bin_time(self):
- bright = 4e4
- dark = 2e4
+ dark_to_bright_rate = 1 / 1.168
+ t_bin, threshold, p_error = optimise_readout(
+ bright_rate, dark_rate, dark_to_bright_rate=dark_to_bright_rate,
+ p_bright=p_bright,)
+ self.assertAlmostEqual(0.0010438517809917116, t_bin,
+ delta=0.01 * 0.0010438517809917116)
+ self.assertEqual(58, threshold)
+ self.assertAlmostEqual(0.000285363868745659, p_error,
+ delta=0.01 * 0.000285363868745659)
+
+ def test_optimise_t_bin(self):
+ bright_rate = 9.e4
+ dark_rate = 3e4
p_bright = 0.5
- error_target = 1e-3
-
- t_bin, thresh_rate = calc_target_bin_time(bright,
- dark,
- error_target,
- p_bright=p_bright)
-
- self.assertAlmostEqual(error_target,
- calc_p_error(bright, dark, t_bin, p_bright),
- delta=0.01 * error_target)
- self.assertAlmostEqual(
- thresh_rate, calc_thresh_rate(bright, dark, t_bin=t_bin, p_bright=p_bright))
+ dark_to_bright_rate = 1 / 1.168
+ t_bin, p_error = optimise_t_bin(
+ bright_rate, dark_rate, 58,
+ dark_to_bright_rate=dark_to_bright_rate, p_bright=p_bright,)
+ self.assertAlmostEqual(0.0010438517809917116, t_bin,
+ delta=0.01 * 0.0010438517809917116)
+ self.assertAlmostEqual(0.000285363868745659, p_error,
+ delta=0.01 * 0.000285363868745659)
if __name__ == '__main__':
| Account for spontaneous shelving and deshelving in threshold.py
As per title.
Currently [threshold.py](https://github.com/OxfordIonTrapGroup/oitg/blob/master/oitg/threshold.py) assumes poissonian statistics. This is innacurate for longer readout durations. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_threshold.py::ThresholdTest::test_optimise_readout",
"test/test_threshold.py::ThresholdTest::test_optimise_t_bin"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2021-05-15T17:31:38Z" | mit |
|
PEtab-dev__libpetab-python-136 | diff --git a/petab/lint.py b/petab/lint.py
index ba9ad8f..e13f540 100644
--- a/petab/lint.py
+++ b/petab/lint.py
@@ -292,7 +292,12 @@ def check_observable_df(observable_df: pd.DataFrame) -> None:
noise = getattr(row, NOISE_FORMULA)
try:
- sp.sympify(noise)
+ sympified_noise = sp.sympify(noise)
+ if sympified_noise is None \
+ or (sympified_noise.is_Number
+ and not sympified_noise.is_finite):
+ raise AssertionError(f"No or non-finite {NOISE_FORMULA} "
+ f"given for observable {row.Index}.")
except sp.SympifyError as e:
raise AssertionError(f"Cannot parse expression '{noise}' "
f"for noise model for observable "
| PEtab-dev/libpetab-python | 902f34967ecc60230339006dc21e233578075dc6 | diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml
index 1c92a34..2aee552 100644
--- a/.github/workflows/ci_tests.yml
+++ b/.github/workflows/ci_tests.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
matrix:
platform: [windows-latest, macos-latest, ubuntu-latest]
- python-version: [3.7, 3.8, 3.9, "3.10"]
+ python-version: ["3.7", "3.10"]
runs-on: ${{ matrix.platform }}
steps:
diff --git a/tests/test_lint.py b/tests/test_lint.py
index eccbed2..40f4c61 100644
--- a/tests/test_lint.py
+++ b/tests/test_lint.py
@@ -495,6 +495,12 @@ def test_check_observable_df():
with pytest.raises(AssertionError):
lint.check_observable_df(bad_observable_df)
+ # Check that missing noiseFormula is detected
+ bad_observable_df = observable_df.copy()
+ bad_observable_df.loc['obs1', NOISE_FORMULA] = nan
+ with pytest.raises(AssertionError):
+ lint.check_observable_df(bad_observable_df)
+
def test_condition_ids_are_unique():
condition_df = pd.DataFrame(data={
| Reduce tests to min+max python?
takes quite some time to start+run all ... | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_lint.py::test_check_observable_df"
] | [
"tests/test_lint.py::test_assert_measured_observables_present_in_model",
"tests/test_lint.py::test_condition_table_is_parameter_free",
"tests/test_lint.py::test_measurement_table_has_timepoint_specific_mappings",
"tests/test_lint.py::test_observable_table_has_nontrivial_noise_formula",
"tests/test_lint.py::test_assert_overrides_match_parameter_count",
"tests/test_lint.py::test_assert_no_leading_trailing_whitespace",
"tests/test_lint.py::test_assert_model_parameters_in_condition_or_parameter_table",
"tests/test_lint.py::test_assert_noise_distributions_valid",
"tests/test_lint.py::test_check_measurement_df",
"tests/test_lint.py::test_check_parameter_bounds",
"tests/test_lint.py::test_assert_parameter_prior_type_is_valid",
"tests/test_lint.py::test_assert_parameter_prior_parameters_are_valid",
"tests/test_lint.py::test_petablint_succeeds",
"tests/test_lint.py::test_assert_measurement_conditions_present_in_condition_table",
"tests/test_lint.py::test_check_condition_df",
"tests/test_lint.py::test_check_ids",
"tests/test_lint.py::test_check_parameter_df",
"tests/test_lint.py::test_condition_ids_are_unique",
"tests/test_lint.py::test_parameter_ids_are_unique"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2022-04-28T14:13:04Z" | mit |
|
PEtab-dev__libpetab-python-152 | diff --git a/petab/problem.py b/petab/problem.py
index 517b91c..f523803 100644
--- a/petab/problem.py
+++ b/petab/problem.py
@@ -87,8 +87,9 @@ class Problem:
@staticmethod
def from_files(
- sbml_file: Union[str, Path, None] = None,
- condition_file: Union[str, Path, None] = None,
+ sbml_file: Union[str, Path] = None,
+ condition_file:
+ Union[str, Path, Iterable[Union[str, Path]]] = None,
measurement_file: Union[str, Path,
Iterable[Union[str, Path]]] = None,
parameter_file: Union[str, Path,
@@ -115,7 +116,8 @@ class Problem:
observable_df = None
if condition_file:
- condition_df = conditions.get_condition_df(condition_file)
+ condition_df = core.concat_tables(condition_file,
+ conditions.get_condition_df)
if measurement_file:
# If there are multiple tables, we will merge them
@@ -198,7 +200,10 @@ class Problem:
problem0 = yaml_config['problems'][0]
- yaml.assert_single_condition_and_sbml_file(problem0)
+ if len(problem0[SBML_FILES]) > 1:
+ # TODO https://github.com/PEtab-dev/libpetab-python/issues/6
+ raise NotImplementedError(
+ 'Support for multiple models is not yet implemented.')
if isinstance(yaml_config[PARAMETER_FILE], list):
parameter_file = [
@@ -213,7 +218,8 @@ class Problem:
if problem0[SBML_FILES] else None,
measurement_file=[get_path(f)
for f in problem0[MEASUREMENT_FILES]],
- condition_file=get_path(problem0[CONDITION_FILES][0]),
+ condition_file=[get_path(f)
+ for f in problem0[CONDITION_FILES]],
parameter_file=parameter_file,
visualization_files=[
get_path(f) for f in problem0.get(VISUALIZATION_FILES, [])],
| PEtab-dev/libpetab-python | 7a0b77ef6d968a4600497763df0ca791caa24768 | diff --git a/tests/test_petab.py b/tests/test_petab.py
index 62e74e8..0e0c4e0 100644
--- a/tests/test_petab.py
+++ b/tests/test_petab.py
@@ -1,8 +1,10 @@
import copy
import pickle
import tempfile
+from io import StringIO
from math import nan
from pathlib import Path
+from tempfile import TemporaryDirectory
import libsbml
import numpy as np
@@ -10,6 +12,7 @@ import pandas as pd
import petab
import pytest
from petab.C import *
+from yaml import safe_load
@pytest.fixture
@@ -452,6 +455,33 @@ def test_concat_measurements():
petab.measurements.get_measurement_df))
+def test_concat_condition_df():
+ df1 = pd.DataFrame(data={
+ CONDITION_ID: ['condition1', 'condition2'],
+ 'par1': [1.1, 1.2],
+ 'par2': [2.1, 2.2],
+ 'par3': [3.1, 3.2]
+ }).set_index(CONDITION_ID)
+
+ assert df1.equals(petab.concat_tables(df1, petab.get_condition_df))
+
+ df2 = pd.DataFrame(data={
+ CONDITION_ID: ['condition3'],
+ 'par1': [1.3],
+ 'par2': [2.3],
+ }).set_index(CONDITION_ID)
+
+ df_expected = pd.DataFrame(data={
+ CONDITION_ID: ['condition1', 'condition2', 'condition3'],
+ 'par1': [1.1, 1.2, 1.3],
+ 'par2': [2.1, 2.2, 2.3],
+ 'par3': [3.1, 3.2, np.nan],
+ }).set_index(CONDITION_ID)
+ assert df_expected.equals(
+ petab.concat_tables((df1, df2), petab.get_condition_df)
+ )
+
+
def test_get_observable_ids(petab_problem): # pylint: disable=W0621
"""Test if observable ids functions returns correct value."""
assert set(petab_problem.get_observable_ids()) == {'observable_1'}
@@ -535,3 +565,68 @@ def test_load_remote():
assert petab_problem.sbml_model is not None
assert petab_problem.measurement_df is not None \
and not petab_problem.measurement_df.empty
+
+
+def test_problem_from_yaml_v1_empty():
+ """Test loading PEtab version 1 yaml without any files"""
+ yaml_config = """
+ format_version: 1
+ parameter_file:
+ problems:
+ - condition_files: []
+ measurement_files: []
+ observable_files: []
+ sbml_files: []
+ """
+ yaml_config = safe_load(StringIO(yaml_config))
+ petab.Problem.from_yaml(yaml_config)
+
+
+def test_problem_from_yaml_v1_multiple_files():
+ """Test loading PEtab version 1 yaml with multiple condition / measurement
+ / observable files"""
+ yaml_config = """
+ format_version: 1
+ parameter_file:
+ problems:
+ - condition_files: [conditions1.tsv, conditions2.tsv]
+ measurement_files: [measurements1.tsv, measurements2.tsv]
+ observable_files: [observables1.tsv, observables2.tsv]
+ sbml_files: []
+ """
+
+ with TemporaryDirectory() as tmpdir:
+ yaml_path = Path(tmpdir, "problem.yaml")
+ with open(yaml_path, 'w') as f:
+ f.write(yaml_config)
+
+ for i in (1, 2):
+ condition_df = pd.DataFrame({
+ CONDITION_ID: [f"condition{i}"],
+ })
+ condition_df.set_index([CONDITION_ID], inplace=True)
+ petab.write_condition_df(condition_df,
+ Path(tmpdir, f"conditions{i}.tsv"))
+
+ measurement_df = pd.DataFrame({
+ SIMULATION_CONDITION_ID: [f"condition{i}"],
+ OBSERVABLE_ID: [f"observable{i}"],
+ TIME: [i],
+ MEASUREMENT: [1]
+ })
+ petab.write_measurement_df(measurement_df,
+ Path(tmpdir, f"measurements{i}.tsv"))
+
+ observables_df = pd.DataFrame({
+ OBSERVABLE_ID: [f"observable{i}"],
+ OBSERVABLE_FORMULA: [1],
+ NOISE_FORMULA: [1],
+ })
+ petab.write_observable_df(observables_df,
+ Path(tmpdir, f"observables{i}.tsv"))
+
+ petab_problem = petab.Problem.from_yaml(yaml_path)
+
+ assert petab_problem.measurement_df.shape[0] == 2
+ assert petab_problem.observable_df.shape[0] == 2
+ assert petab_problem.condition_df.shape[0] == 2
| Add support for multiple condition files per petab.Problem
Allowed in YAML file, but not implemented in library. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_petab.py::test_problem_from_yaml_v1_empty",
"tests/test_petab.py::test_problem_from_yaml_v1_multiple_files"
] | [
"tests/test_petab.py::test_split_parameter_replacement_list",
"tests/test_petab.py::test_get_measurement_parameter_ids",
"tests/test_petab.py::test_serialization",
"tests/test_petab.py::test_get_priors_from_df",
"tests/test_petab.py::test_startpoint_sampling",
"tests/test_petab.py::test_create_parameter_df",
"tests/test_petab.py::test_flatten_timepoint_specific_output_overrides",
"tests/test_petab.py::test_flatten_timepoint_specific_output_overrides_special_cases",
"tests/test_petab.py::test_concat_measurements",
"tests/test_petab.py::test_concat_condition_df",
"tests/test_petab.py::test_get_observable_ids",
"tests/test_petab.py::test_parameter_properties",
"tests/test_petab.py::test_to_float_if_float",
"tests/test_petab.py::test_to_files"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-05-24T20:18:04Z" | mit |
|
PEtab-dev__libpetab-python-156 | diff --git a/petab/parameters.py b/petab/parameters.py
index f794530..8dbf441 100644
--- a/petab/parameters.py
+++ b/petab/parameters.py
@@ -55,18 +55,8 @@ def get_parameter_df(
return None
parameter_df = pd.concat(dfs)
- # Remove identical parameter definitions
- parameter_df.drop_duplicates(inplace=True, ignore_index=False)
# Check for contradicting parameter definitions
- parameter_duplicates = set(parameter_df.index.values[
- parameter_df.index.duplicated()])
- if parameter_duplicates:
- raise ValueError(
- f'The values of {PARAMETER_ID} must be unique or'
- ' identical between all parameter subset files. The'
- ' following duplicates were found:\n'
- f'{parameter_duplicates}'
- )
+ _check_for_contradicting_parameter_definitions(parameter_df)
return parameter_df
@@ -81,10 +71,24 @@ def get_parameter_df(
except KeyError as e:
raise KeyError(
f"Parameter table missing mandatory field {PARAMETER_ID}.") from e
+ _check_for_contradicting_parameter_definitions(parameter_df)
return parameter_df
+def _check_for_contradicting_parameter_definitions(parameter_df: pd.DataFrame):
+ """
+ Raises a ValueError for non-unique parameter IDs
+ """
+ parameter_duplicates = set(parameter_df.index.values[
+ parameter_df.index.duplicated()])
+ if parameter_duplicates:
+ raise ValueError(
+ f'The values of `{PARAMETER_ID}` must be unique. The '
+ f'following duplicates were found:\n{parameter_duplicates}'
+ )
+
+
def write_parameter_df(df: pd.DataFrame, filename: Union[str, Path]) -> None:
"""Write PEtab parameter table
| PEtab-dev/libpetab-python | 5a87f8370aa5be56ed39b986801e7e36b18d94cb | diff --git a/tests/test_parameters.py b/tests/test_parameters.py
index 1727a73..5527133 100644
--- a/tests/test_parameters.py
+++ b/tests/test_parameters.py
@@ -81,11 +81,11 @@ def test_get_parameter_df():
PARAMETER_ID: ['id3'],
PARAMETER_NAME: ['name3']
})
- parameter_dfs['subset2_overlap'] = pd.DataFrame(data={
+ parameter_dfs['subset2_redundance'] = pd.DataFrame(data={
PARAMETER_ID: ['id2', 'id3'],
PARAMETER_NAME: ['name2', 'name3']
})
- parameter_dfs['subset2_error'] = pd.DataFrame(data={
+ parameter_dfs['subset2_contradiction'] = pd.DataFrame(data={
PARAMETER_ID: ['id2', 'id3'],
PARAMETER_NAME: ['different_name2', 'name3']
})
@@ -98,15 +98,52 @@ def test_get_parameter_df():
assert(petab.get_parameter_df(parameter_files['complete']).equals(
petab.get_parameter_df([parameter_files['subset1'],
parameter_files['subset2_strict']])))
- # Check that identical parameter definitions are correctly combined
- assert(petab.get_parameter_df(parameter_files['complete']).equals(
- petab.get_parameter_df([parameter_files['subset1'],
- parameter_files['subset2_overlap']])))
# Ensure an error is raised if there exist parameterId duplicates
+ # with identical parameter definitions
+ with pytest.raises(ValueError):
+ petab.get_parameter_df(
+ [parameter_files["subset1"],
+ parameter_files["subset2_redundance"]]
+ )
# with non-identical parameter definitions
with pytest.raises(ValueError):
- petab.get_parameter_df([parameter_files['subset1'],
- parameter_files['subset2_error']])
+ petab.get_parameter_df(
+ [parameter_files["subset1"],
+ parameter_files["subset2_contradiction"],
+ ]
+ )
+
+ # Ensure that parameters that differ only by parameterId
+ # are recognized as distinct
+ with tempfile.TemporaryDirectory() as directory:
+ parameter_dfs, parameter_files = ({}, {})
+ parameter_dfs["complete"] = pd.DataFrame(
+ data={
+ PARAMETER_ID: ["id1", "id2", "id3", "id4"],
+ NOMINAL_VALUE: [1, 1, 1, 1],
+ }
+ )
+ parameter_dfs["subset1"] = pd.DataFrame(
+ data={PARAMETER_ID: ["id1", "id2"], NOMINAL_VALUE: [1, 1]}
+ )
+ parameter_dfs["subset2"] = pd.DataFrame(
+ data={PARAMETER_ID: ["id3", "id4"], NOMINAL_VALUE: [1, 1]}
+ )
+ for name, df in parameter_dfs.items():
+ with tempfile.NamedTemporaryFile(
+ mode="w", delete=False, dir=directory
+ ) as fh:
+ parameter_files[name] = fh.name
+ parameter_dfs[name].to_csv(fh, sep="\t", index=False)
+ # from one parameter file
+ df_template = parameter_dfs["complete"].set_index(PARAMETER_ID)
+ df_test = petab.get_parameter_df(parameter_files["complete"])
+ assert (df_template == df_test).all().all()
+ # several parameter files
+ assert petab.get_parameter_df(parameter_files["complete"]).equals(
+ petab.get_parameter_df([parameter_files["subset1"],
+ parameter_files["subset2"]])
+ )
def test_write_parameter_df():
| Parameters dropped when using subset parameter files
On branch `develop`: When supplying multiple parameter files to `parameters.get_parameter_df`, parameters will be dropped if they only differ from other parameters by their `parameterId`.
___
Example using parameter files from [demo_parameters.zip](https://github.com/PEtab-dev/libpetab-python/files/8893550/demo_parameters.zip):
```python
import petab
import pandas as pd
fp_params_1 = 'demo_parameters_1.tsv'
fp_params_2 = 'demo_parameters_2.tsv'
df = petab.parameters.get_parameter_df([fp_params_1, fp_params_2])
print(df.index.values)
```
Expected output:
```['id11' 'id12' 'id13' 'id21' 'id22']```
Actual output using `develop` at 5a87f8370aa5be56ed39b986801e7e36b18d94cb:
``` ['id11' 'id21' 'id22'] ```
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_parameters.py::test_get_parameter_df"
] | [
"tests/test_parameters.py::test_get_optimization_parameter_scaling",
"tests/test_parameters.py::test_get_optimization_parameters",
"tests/test_parameters.py::test_write_parameter_df",
"tests/test_parameters.py::test_normalize_parameter_df",
"tests/test_parameters.py::test_scale_unscale"
] | {
"failed_lite_validators": [
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false
} | "2022-06-14T08:27:45Z" | mit |
|
PEtab-dev__libpetab-python-184 | diff --git a/petab/C.py b/petab/C.py
index 09553ce..08c27d9 100644
--- a/petab/C.py
+++ b/petab/C.py
@@ -234,6 +234,12 @@ SCATTER_PLOT = 'ScatterPlot'
#: Supported plot types
PLOT_TYPES_SIMULATION = [LINE_PLOT, BAR_PLOT, SCATTER_PLOT]
+#: Supported xScales
+X_SCALES = [LIN, LOG, LOG10]
+
+#: Supported yScales
+Y_SCALES = [LIN, LOG, LOG10]
+
#:
MEAN_AND_SD = 'MeanAndSD'
diff --git a/petab/lint.py b/petab/lint.py
index 50a4781..8b2a69a 100644
--- a/petab/lint.py
+++ b/petab/lint.py
@@ -842,6 +842,13 @@ def lint_problem(problem: 'petab.Problem') -> bool:
logger.error(e)
errors_occurred = True
+ if problem.visualization_df is not None:
+ logger.info("Checking visualization table...")
+ from petab.visualize.lint import validate_visualization_df
+ errors_occurred |= validate_visualization_df(problem)
+ else:
+ logger.warning("Visualization table not available. Skipping.")
+
if errors_occurred:
logger.error('Not OK')
elif problem.measurement_df is None or problem.condition_df is None \
diff --git a/petab/petablint.py b/petab/petablint.py
index 8be50aa..f31a63f 100755
--- a/petab/petablint.py
+++ b/petab/petablint.py
@@ -50,6 +50,9 @@ def parse_cli_args():
help='Conditions table')
parser.add_argument('-p', '--parameters', dest='parameter_file_name',
help='Parameter table')
+ parser.add_argument('--vis', '--visualizations',
+ dest='visualization_file_name',
+ help='Visualization table')
group = parser.add_mutually_exclusive_group()
group.add_argument('-y', '--yaml', dest='yaml_file_name',
@@ -109,6 +112,9 @@ def main():
logger.debug(f'\tMeasurement table: {args.measurement_file_name}')
if args.parameter_file_name:
logger.debug(f'\tParameter table: {args.parameter_file_name}')
+ if args.visualization_file_name:
+ logger.debug('\tVisualization table: '
+ f'{args.visualization_file_name}')
try:
problem = petab.Problem.from_files(
@@ -116,7 +122,8 @@ def main():
condition_file=args.condition_file_name,
measurement_file=args.measurement_file_name,
parameter_file=args.parameter_file_name,
- observable_files=args.observable_file_name
+ observable_files=args.observable_file_name,
+ visualization_files=args.visualization_file_name,
)
except FileNotFoundError as e:
logger.error(e)
diff --git a/petab/visualize/lint.py b/petab/visualize/lint.py
new file mode 100644
index 0000000..31b23cb
--- /dev/null
+++ b/petab/visualize/lint.py
@@ -0,0 +1,130 @@
+"""Validation of PEtab visualization files"""
+import logging
+
+import pandas as pd
+
+from .. import C, Problem
+from ..C import VISUALIZATION_DF_REQUIRED_COLS
+
+
+logger = logging.getLogger(__name__)
+
+
+def validate_visualization_df(
+ problem: Problem
+) -> bool:
+ """Validate visualization table
+
+ Arguments:
+ problem: The PEtab problem containing a visualization table
+
+ Returns:
+ ``True`` if errors occurred, ``False`` otherwise
+ """
+ vis_df = problem.visualization_df
+ if vis_df is None or vis_df.empty:
+ return False
+
+ errors = False
+
+ if missing_req_cols := (set(VISUALIZATION_DF_REQUIRED_COLS)
+ - set(vis_df.columns)):
+ logger.error(f"Missing required columns {missing_req_cols} "
+ "in visualization table.")
+ errors = True
+
+ # Set all unspecified optional values to their defaults to simplify
+ # validation
+ vis_df = vis_df.copy()
+ _apply_defaults(vis_df)
+
+ if unknown_types := (set(vis_df[C.PLOT_TYPE_SIMULATION].unique())
+ - set(C.PLOT_TYPES_SIMULATION)):
+ logger.error(f"Unknown {C.PLOT_TYPE_SIMULATION}: {unknown_types}. "
+ f"Must be one of {C.PLOT_TYPES_SIMULATION}")
+ errors = True
+
+ if unknown_types := (set(vis_df[C.PLOT_TYPE_DATA].unique())
+ - set(C.PLOT_TYPES_DATA)):
+ logger.error(f"Unknown {C.PLOT_TYPE_DATA}: {unknown_types}. "
+ f"Must be one of {C.PLOT_TYPES_DATA}")
+ errors = True
+
+ if unknown_scale := (set(vis_df[C.X_SCALE].unique())
+ - set(C.X_SCALES)):
+ logger.error(f"Unknown {C.X_SCALE}: {unknown_scale}. "
+ f"Must be one of {C.X_SCALES}")
+ errors = True
+
+ if any(
+ (vis_df[C.X_SCALE] == 'order')
+ & (vis_df[C.PLOT_TYPE_SIMULATION] != C.LINE_PLOT)
+ ):
+ logger.error(f"{C.X_SCALE}=order is only allowed with "
+ f"{C.PLOT_TYPE_SIMULATION}={C.LINE_PLOT}.")
+ errors = True
+
+ if unknown_scale := (set(vis_df[C.Y_SCALE].unique())
+ - set(C.Y_SCALES)):
+ logger.error(f"Unknown {C.Y_SCALE}: {unknown_scale}. "
+ f"Must be one of {C.Y_SCALES}")
+ errors = True
+
+ if problem.condition_df is not None:
+ # check for ambiguous values
+ reserved_names = {C.TIME, "condition"}
+ for reserved_name in reserved_names:
+ if reserved_name in problem.condition_df \
+ and reserved_name in vis_df[C.X_VALUES]:
+ logger.error(f"Ambiguous value for `{C.X_VALUES}`: "
+ f"`{reserved_name}` has a special meaning as "
+ f"`{C.X_VALUES}`, but there exists also a model "
+ "entity with that name.")
+ errors = True
+
+ # check xValues exist in condition table
+ for xvalue in set(vis_df[C.X_VALUES].unique()) - reserved_names:
+ if xvalue not in problem.condition_df:
+ logger.error(f"{C.X_VALUES} was set to `{xvalue}`, but no "
+ "such column exists in the conditions table.")
+ errors = True
+
+ if problem.observable_df is not None:
+ # yValues must be an observable
+ for yvalue in vis_df[C.Y_VALUES].unique():
+ if yvalue not in problem.observable_df.index:
+ logger.error(
+ f"{C.Y_VALUES} was set to `{yvalue}`, but no such "
+ "observable exists in the observables table."
+ )
+ errors = True
+
+ return errors
+
+
+def _apply_defaults(vis_df: pd.DataFrame):
+ """
+ Set default values.
+
+ Adds default values to the given visualization table where no value was
+ specified.
+ """
+ def set_default(column: str, value):
+ if column not in vis_df:
+ vis_df[column] = value
+ elif value is not None:
+ vis_df[column].fillna(value)
+
+ set_default(C.PLOT_NAME, "")
+ set_default(C.PLOT_TYPE_SIMULATION, C.LINE_PLOT)
+ set_default(C.PLOT_TYPE_DATA, C.MEAN_AND_SD)
+ set_default(C.DATASET_ID, None)
+ set_default(C.X_VALUES, C.TIME)
+ set_default(C.X_OFFSET, 0)
+ set_default(C.X_LABEL, vis_df[C.X_VALUES])
+ set_default(C.X_SCALE, C.LIN)
+ set_default(C.Y_VALUES, None)
+ set_default(C.Y_OFFSET, 0)
+ set_default(C.Y_LABEL, vis_df[C.Y_VALUES])
+ set_default(C.Y_SCALE, C.LIN)
+ set_default(C.LEGEND_ENTRY, vis_df[C.DATASET_ID])
| PEtab-dev/libpetab-python | a0817db21c884217aee5771790c2b23eafc89549 | diff --git a/tests/test_visualization.py b/tests/test_visualization.py
index fc33676..2d7d7f7 100644
--- a/tests/test_visualization.py
+++ b/tests/test_visualization.py
@@ -6,9 +6,11 @@ from tempfile import TemporaryDirectory
import matplotlib.pyplot as plt
import pytest
+import petab
from petab.C import *
from petab.visualize import plot_with_vis_spec, plot_without_vis_spec
from petab.visualize.plotting import VisSpecParser
+from petab.visualize.lint import validate_visualization_df
# Avoid errors when plotting without X server
plt.switch_backend('agg')
@@ -135,6 +137,12 @@ def test_visualization_with_vis_and_sim(data_file_Isensee,
condition_file_Isensee,
vis_spec_file_Isensee,
simulation_file_Isensee):
+ validate_visualization_df(
+ petab.Problem(
+ condition_df=petab.get_condition_df(condition_file_Isensee),
+ visualization_df=petab.get_visualization_df(vis_spec_file_Isensee),
+ )
+ )
plot_with_vis_spec(vis_spec_file_Isensee, condition_file_Isensee,
data_file_Isensee, simulation_file_Isensee)
@@ -366,3 +374,25 @@ def test_cli():
"-o", temp_dir
]
subprocess.run(args, check=True)
+
+
[email protected](
+ "vis_file",
+ (
+ "vis_spec_file_Isensee",
+ "vis_spec_file_Isensee_replicates",
+ "vis_spec_file_Isensee_scatterplot",
+ "visu_file_Fujita_wo_dsid_wo_yvalues",
+ "visu_file_Fujita_all_obs_with_diff_settings",
+ "visu_file_Fujita_empty",
+ "visu_file_Fujita_minimal",
+ "visu_file_Fujita_replicates",
+ "visu_file_Fujita_small",
+ )
+)
+def test_validate(vis_file, request):
+ """Check that all test files pass validation."""
+ vis_file = request.getfixturevalue(vis_file)
+ assert False is validate_visualization_df(
+ petab.Problem(visualization_df=petab.get_visualization_df(vis_file))
+ )
| Implement validation of PEtab visualization files
- [x] Implement consistency checks
- [x] plotId
- [x] [plotName]
- [x] plotTypeSimulation
- [x] plotTypeData
- [x] datasetId
- [x] [xValues]
- [x] [xOffset]
- [x] [xLabel]
- [x] [xScale]
- [x] [yValues]
- [x] [yOffset]
- [x] [yLabel]
- [x] [yScale]
- [x] [legendEntry]
- [x] Add as command line option to `petablint`
see also #1 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_visualization.py::test_visualization_with_vis_and_sim",
"tests/test_visualization.py::test_visualization_replicates",
"tests/test_visualization.py::test_visualization_scatterplot",
"tests/test_visualization.py::test_visualization_small_visu_file_w_datasetid",
"tests/test_visualization.py::test_visualization_small_visu_file_wo_datasetid",
"tests/test_visualization.py::test_visualization_all_obs_with_diff_settings",
"tests/test_visualization.py::test_visualization_minimal_visu_file",
"tests/test_visualization.py::test_visualization_empty_visu_file",
"tests/test_visualization.py::test_visualization_minimal_data_file",
"tests/test_visualization.py::test_visualization_with_dataset_list",
"tests/test_visualization.py::test_visualization_without_datasets",
"tests/test_visualization.py::test_visualization_only_simulations",
"tests/test_visualization.py::test_simple_visualization",
"tests/test_visualization.py::test_save_plots_to_file",
"tests/test_visualization.py::test_save_visu_file",
"tests/test_visualization.py::test_cli",
"tests/test_visualization.py::test_validate[vis_spec_file_Isensee]",
"tests/test_visualization.py::test_validate[vis_spec_file_Isensee_replicates]",
"tests/test_visualization.py::test_validate[vis_spec_file_Isensee_scatterplot]",
"tests/test_visualization.py::test_validate[visu_file_Fujita_wo_dsid_wo_yvalues]",
"tests/test_visualization.py::test_validate[visu_file_Fujita_all_obs_with_diff_settings]",
"tests/test_visualization.py::test_validate[visu_file_Fujita_empty]",
"tests/test_visualization.py::test_validate[visu_file_Fujita_minimal]",
"tests/test_visualization.py::test_validate[visu_file_Fujita_replicates]",
"tests/test_visualization.py::test_validate[visu_file_Fujita_small]"
] | [] | {
"failed_lite_validators": [
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-12-06T11:21:53Z" | mit |
|
PEtab-dev__libpetab-python-235 | diff --git a/petab/core.py b/petab/core.py
index 05deb16..0e7b7da 100644
--- a/petab/core.py
+++ b/petab/core.py
@@ -71,7 +71,7 @@ def write_simulation_df(df: pd.DataFrame, filename: Union[str, Path]) -> None:
def get_visualization_df(
- visualization_file: Union[str, Path, pd.DataFrame, None]
+ visualization_file: Union[str, Path, pd.DataFrame, None]
) -> Union[pd.DataFrame, None]:
"""Read PEtab visualization table
@@ -254,7 +254,7 @@ def flatten_timepoint_specific_output_overrides(
Arguments:
petab_problem:
- PEtab problem to work on
+ PEtab problem to work on. Modified in place.
"""
new_measurement_dfs = []
new_observable_dfs = []
@@ -277,22 +277,21 @@ def flatten_timepoint_specific_output_overrides(
for field, hyperparameter_type, target in [
(NOISE_PARAMETERS, "noiseParameter", NOISE_FORMULA),
(OBSERVABLE_PARAMETERS, "observableParameter", OBSERVABLE_FORMULA),
+ (OBSERVABLE_PARAMETERS, "observableParameter", NOISE_FORMULA),
]:
- if field in measurements:
- hyperparameter_replacement_id = (
- get_hyperparameter_replacement_id(
- hyperparameter_type=hyperparameter_type,
- observable_replacement_id=observable_replacement_id,
- )
- )
- hyperparameter_id = mappings[field][
- hyperparameter_replacement_id
- ]
- observable[target] = re.sub(
- hyperparameter_id,
- hyperparameter_replacement_id,
- observable[target],
- )
+ if field not in measurements:
+ continue
+
+ hyperparameter_replacement_id = get_hyperparameter_replacement_id(
+ hyperparameter_type=hyperparameter_type,
+ observable_replacement_id=observable_replacement_id,
+ )
+ hyperparameter_id = mappings[field][hyperparameter_replacement_id]
+ observable[target] = re.sub(
+ hyperparameter_id,
+ hyperparameter_replacement_id,
+ observable[target],
+ )
measurements[OBSERVABLE_ID] = observable_replacement_id
new_measurement_dfs.append(measurements)
@@ -306,7 +305,7 @@ def flatten_timepoint_specific_output_overrides(
def unflatten_simulation_df(
simulation_df: pd.DataFrame,
petab_problem: "petab.problem.Problem",
-) -> None:
+) -> pd.DataFrame:
"""Unflatten simulations from a flattened PEtab problem.
A flattened PEtab problem is the output of applying
| PEtab-dev/libpetab-python | 4e6a0189eedf7078de8a674c200819cbdc26c83b | diff --git a/tests/test_petab.py b/tests/test_petab.py
index 89053fb..b836880 100644
--- a/tests/test_petab.py
+++ b/tests/test_petab.py
@@ -353,36 +353,32 @@ def test_flatten_timepoint_specific_output_overrides():
OBSERVABLE_FORMULA: [
"observableParameter1_obs1 + observableParameter2_obs1"
],
- NOISE_FORMULA: ["noiseParameter1_obs1"],
+ NOISE_FORMULA: [
+ "(observableParameter1_obs1 + observableParameter2_obs1) * noiseParameter1_obs1"
+ ],
}
)
observable_df.set_index(OBSERVABLE_ID, inplace=True)
+ # new observable IDs (obs${i_obs}_${i_obsParOverride}_${i_noiseParOverride}_${i_condition})
+ obs1_1_1_1 = "obs1__obsParOverride1_1_0__noiseParOverride1__condition1"
+ obs1_2_1_1 = "obs1__obsParOverride2_1_0__noiseParOverride1__condition1"
+ obs1_2_2_1 = "obs1__obsParOverride2_1_0__noiseParOverride2__condition1"
observable_df_expected = pd.DataFrame(
data={
- OBSERVABLE_ID: [
- "obs1__obsParOverride1_1_0__noiseParOverride1__condition1",
- "obs1__obsParOverride2_1_0__noiseParOverride1__condition1",
- "obs1__obsParOverride2_1_0__noiseParOverride2__condition1",
- ],
+ OBSERVABLE_ID: [obs1_1_1_1, obs1_2_1_1, obs1_2_2_1],
OBSERVABLE_FORMULA: [
- "observableParameter1_obs1__obsParOverride1_1_0__"
- "noiseParOverride1__condition1 + observableParameter2_obs1"
- "__obsParOverride1_1_0__noiseParOverride1__condition1",
- "observableParameter1_obs1__obsParOverride2_1_0__noiseParOverride1"
- "__condition1 + observableParameter2_obs1__obsParOverride2_1_0"
- "__noiseParOverride1__condition1",
- "observableParameter1_obs1__obsParOverride2_1_0"
- "__noiseParOverride2__condition1 + observableParameter2_obs1__"
- "obsParOverride2_1_0__noiseParOverride2__condition1",
+ f"observableParameter1_{obs1_1_1_1} + observableParameter2_{obs1_1_1_1}",
+ f"observableParameter1_{obs1_2_1_1} + observableParameter2_{obs1_2_1_1}",
+ f"observableParameter1_{obs1_2_2_1} + observableParameter2_{obs1_2_2_1}",
],
NOISE_FORMULA: [
- "noiseParameter1_obs1__obsParOverride1_1_0__"
- "noiseParOverride1__condition1",
- "noiseParameter1_obs1__obsParOverride2_1_0__"
- "noiseParOverride1__condition1",
- "noiseParameter1_obs1__obsParOverride2_1_0__"
- "noiseParOverride2__condition1",
+ f"(observableParameter1_{obs1_1_1_1} + observableParameter2_{obs1_1_1_1})"
+ f" * noiseParameter1_{obs1_1_1_1}",
+ f"(observableParameter1_{obs1_2_1_1} + observableParameter2_{obs1_2_1_1})"
+ f" * noiseParameter1_{obs1_2_1_1}",
+ f"(observableParameter1_{obs1_2_2_1} + observableParameter2_{obs1_2_2_1})"
+ f" * noiseParameter1_{obs1_2_2_1}",
],
}
)
@@ -418,12 +414,7 @@ def test_flatten_timepoint_specific_output_overrides():
measurement_df_expected = pd.DataFrame(
data={
- OBSERVABLE_ID: [
- "obs1__obsParOverride1_1_0__noiseParOverride1__condition1",
- "obs1__obsParOverride2_1_0__noiseParOverride1__condition1",
- "obs1__obsParOverride2_1_0__noiseParOverride2__condition1",
- "obs1__obsParOverride2_1_0__noiseParOverride2__condition1",
- ],
+ OBSERVABLE_ID: [obs1_1_1_1, obs1_2_1_1, obs1_2_2_1, obs1_2_2_1],
SIMULATION_CONDITION_ID: [
"condition1",
"condition1",
@@ -472,8 +463,12 @@ def test_flatten_timepoint_specific_output_overrides():
is False
)
- assert problem.observable_df.equals(observable_df_expected) is True
- assert problem.measurement_df.equals(measurement_df_expected) is True
+ pd.testing.assert_frame_equal(
+ problem.observable_df, observable_df_expected
+ )
+ pd.testing.assert_frame_equal(
+ problem.measurement_df, measurement_df_expected
+ )
assert petab.lint_problem(problem) is False
@@ -591,8 +586,12 @@ def test_flatten_timepoint_specific_output_overrides_special_cases():
is False
)
- assert problem.observable_df.equals(observable_df_expected) is True
- assert problem.measurement_df.equals(measurement_df_expected) is True
+ pd.testing.assert_frame_equal(
+ problem.observable_df, observable_df_expected
+ )
+ pd.testing.assert_frame_equal(
+ problem.measurement_df, measurement_df_expected
+ )
assert petab.lint_problem(problem) is False
@@ -842,13 +841,14 @@ def test_get_required_parameters_for_parameter_table(petab_problem):
# as part of the proportional error model.
assert "observableParameter1_obs1" in noise_placeholders
- required_parameters_for_parameter_table = \
+ required_parameters_for_parameter_table = (
petab.parameters.get_required_parameters_for_parameter_table(
model=petab_problem.model,
condition_df=petab_problem.condition_df,
observable_df=petab_problem.observable_df,
measurement_df=petab_problem.measurement_df,
)
+ )
# The observable parameter is correctly recognized as a placeholder,
# i.e. does not need to be in the parameter table.
assert (
| `flatten_timepoint_specific_output_overrides` does not support observableParameter overrides as placeholders in noise formulae
`flatten_timepoint_specific_output_overrides` does not support `observableParameter` placeholders in noise formulae.
Related to https://github.com/PEtab-dev/libpetab-python/pull/231 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_petab.py::test_flatten_timepoint_specific_output_overrides"
] | [
"tests/test_petab.py::test_split_parameter_replacement_list",
"tests/test_petab.py::test_get_measurement_parameter_ids",
"tests/test_petab.py::test_serialization",
"tests/test_petab.py::test_get_priors_from_df",
"tests/test_petab.py::test_startpoint_sampling",
"tests/test_petab.py::test_startpoint_sampling_dict",
"tests/test_petab.py::test_create_parameter_df",
"tests/test_petab.py::test_flatten_timepoint_specific_output_overrides_special_cases",
"tests/test_petab.py::test_concat_measurements",
"tests/test_petab.py::test_concat_condition_df",
"tests/test_petab.py::test_get_observable_ids",
"tests/test_petab.py::test_parameter_properties",
"tests/test_petab.py::test_to_float_if_float",
"tests/test_petab.py::test_to_files",
"tests/test_petab.py::test_load_remote",
"tests/test_petab.py::test_problem_from_yaml_v1_empty",
"tests/test_petab.py::test_problem_from_yaml_v1_multiple_files",
"tests/test_petab.py::test_get_required_parameters_for_parameter_table"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-12-14T14:41:38Z" | mit |
|
PEtab-dev__libpetab-python-92 | diff --git a/petab/lint.py b/petab/lint.py
index fe353ac..ba9ad8f 100644
--- a/petab/lint.py
+++ b/petab/lint.py
@@ -108,6 +108,10 @@ def check_condition_df(
check_ids(df.index.values, kind='condition')
+ if not df.index.is_unique:
+ raise AssertionError("Non-unique condition IDs: "
+ f"{df.index.values[df.index.duplicated()]}")
+
for column_name in req_cols:
if not np.issubdtype(df[column_name].dtype, np.number):
assert_no_leading_trailing_whitespace(
| PEtab-dev/libpetab-python | f6fd4896f557497a16afe3773467eb06a2614a79 | diff --git a/tests/test_lint.py b/tests/test_lint.py
index 2756226..eccbed2 100644
--- a/tests/test_lint.py
+++ b/tests/test_lint.py
@@ -494,3 +494,36 @@ def test_check_observable_df():
bad_observable_df.index = ['obs1', 'obs1']
with pytest.raises(AssertionError):
lint.check_observable_df(bad_observable_df)
+
+
+def test_condition_ids_are_unique():
+ condition_df = pd.DataFrame(data={
+ CONDITION_ID: ['condition1', 'condition1'],
+ 'parameter1': [1.0, 2.0]
+ })
+ condition_df.set_index(CONDITION_ID, inplace=True)
+
+ with pytest.raises(AssertionError):
+ lint.check_condition_df(condition_df)
+
+ condition_df.index = ['condition0', 'condition1']
+ condition_df.index.name = 'conditionId'
+ lint.check_condition_df(condition_df)
+
+
+def test_parameter_ids_are_unique():
+ parameter_df = pd.DataFrame({
+ PARAMETER_ID: ['par0', 'par0'],
+ PARAMETER_SCALE: [LIN, LIN],
+ ESTIMATE: [1, 1],
+ LOWER_BOUND: [1e-5, 1e-6],
+ UPPER_BOUND: [1e5, 1e6]
+
+ }).set_index(PARAMETER_ID)
+
+ with pytest.raises(AssertionError):
+ lint.check_parameter_df(parameter_df)
+
+ parameter_df.index = ['par0', 'par1']
+ parameter_df.index.name = 'parameterId'
+ lint.check_parameter_df(parameter_df)
| no lint error for non-unique condition ids
`petab.lint.check_condition_df` does not check whether index entries are unique, but this is required according to documentation. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_lint.py::test_condition_ids_are_unique"
] | [
"tests/test_lint.py::test_assert_measured_observables_present_in_model",
"tests/test_lint.py::test_condition_table_is_parameter_free",
"tests/test_lint.py::test_measurement_table_has_timepoint_specific_mappings",
"tests/test_lint.py::test_observable_table_has_nontrivial_noise_formula",
"tests/test_lint.py::test_assert_overrides_match_parameter_count",
"tests/test_lint.py::test_assert_no_leading_trailing_whitespace",
"tests/test_lint.py::test_assert_model_parameters_in_condition_or_parameter_table",
"tests/test_lint.py::test_assert_noise_distributions_valid",
"tests/test_lint.py::test_check_measurement_df",
"tests/test_lint.py::test_check_parameter_bounds",
"tests/test_lint.py::test_assert_parameter_prior_type_is_valid",
"tests/test_lint.py::test_assert_parameter_prior_parameters_are_valid",
"tests/test_lint.py::test_assert_measurement_conditions_present_in_condition_table",
"tests/test_lint.py::test_check_condition_df",
"tests/test_lint.py::test_check_ids",
"tests/test_lint.py::test_check_parameter_df",
"tests/test_lint.py::test_check_observable_df",
"tests/test_lint.py::test_parameter_ids_are_unique"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2021-11-29T13:42:30Z" | mit |
|
PGijsbers__gama-64 | diff --git a/docs/source/releases.rst b/docs/source/releases.rst
index abed928..b311f77 100644
--- a/docs/source/releases.rst
+++ b/docs/source/releases.rst
@@ -1,6 +1,21 @@
Release Notes
=============
+Version 19.11.0
+---------------
+Features:
+ - `gama.__version__` can now be used to retrieve gama's version.
+ - `fit_arff`, `score_arff` and `predict_arff` now accept a `target_column` parameter to specify the target.
+ If left unset, the last column of the ARFF file is assumed to be the target column.
+
+Bugfixes:
+ - fit(x, y) may now be called with y as (N,1) array.
+ - ensemble post-processing is now compatible with non-zero indexed class labels
+
+Maintenance:
+ - `__version__.py` is now the only place with hard-coded version.
+
+
Version 19.08.0
---------------
- Prototype dash app for visualizing GAMA logs.
diff --git a/gama/__init__.py b/gama/__init__.py
index fa98f03..fb18dad 100644
--- a/gama/__init__.py
+++ b/gama/__init__.py
@@ -1,5 +1,6 @@
from .GamaClassifier import GamaClassifier
from .GamaRegressor import GamaRegressor
+from .__version__ import __version__
name = "gama"
diff --git a/gama/__version__.py b/gama/__version__.py
new file mode 100644
index 0000000..e934ba5
--- /dev/null
+++ b/gama/__version__.py
@@ -0,0 +1,2 @@
+# We employ YY.0M.micro scheme in 2019. In 2020 we move to YY.minor.micro.
+__version__ = '19.11.0'
diff --git a/gama/data.py b/gama/data.py
index 13e192a..f4694eb 100644
--- a/gama/data.py
+++ b/gama/data.py
@@ -1,5 +1,5 @@
""" This module contains functions for loading data. """
-from typing import Tuple
+from typing import Tuple, Optional
import arff
import pandas as pd
@@ -19,6 +19,9 @@ def arff_to_pandas(file_path: str) -> pd.DataFrame:
A dataframe of the data in the ARFF file,
with categorical columns having category dtype.
"""
+ if not isinstance(file_path, str):
+ raise TypeError(f"`file_path` must be of type `str` but is of type {type(file_path)}")
+
with open(file_path, 'r') as arff_file:
arff_dict = arff.load(arff_file)
@@ -31,17 +34,17 @@ def arff_to_pandas(file_path: str) -> pd.DataFrame:
return data
-def X_y_from_arff(file_path: str, split_column: str = 'last') -> Tuple[pd.DataFrame, pd.Series]:
+def X_y_from_arff(file_path: str, split_column: Optional[str] = None) -> Tuple[pd.DataFrame, pd.Series]:
""" Load data from the ARFF file into pandas DataFrame and specified column to pd.Series. "
Parameters
----------
file_path: str
path to the ARFF file.
- split_column: str (default='last')
+ split_column: str, optional (default=None)
Column to split and return separately (e.g. target column).
- Value should either match a column name or 'last'.
- If 'last' is specified, the last column is returned separately.
+ Value should either match a column name or None.
+ If None is specified, the last column is returned separately.
Returns
-------
@@ -50,7 +53,7 @@ def X_y_from_arff(file_path: str, split_column: str = 'last') -> Tuple[pd.DataFr
"""
data = arff_to_pandas(file_path)
- if split_column == 'last':
+ if split_column is None:
return data.iloc[:, :-1], data.iloc[:, -1]
elif split_column in data.columns:
return data.loc[:, data.columns != split_column], data.loc[:, split_column]
diff --git a/gama/gama.py b/gama/gama.py
index 03e45c1..cd61d85 100644
--- a/gama/gama.py
+++ b/gama/gama.py
@@ -21,6 +21,7 @@ from gama.search_methods.base_search import BaseSearch
from gama.utilities.metrics import scoring_to_metric
from .utilities.observer import Observer
+from gama.__version__ import __version__
from gama.data import X_y_from_arff
from gama.search_methods.async_ea import AsyncEA
from gama.utilities.generic.timekeeper import TimeKeeper
@@ -42,7 +43,6 @@ log = logging.getLogger(__name__)
STR_NO_OPTIMAL_PIPELINE = """Gama did not yet establish an optimal pipeline.
This can be because `fit` was not yet called, or
did not terminate successfully."""
-__version__ = '19.01.0'
for module_to_ignore in ["sklearn", "numpy"]:
warnings.filterwarnings("ignore", module=module_to_ignore)
@@ -202,23 +202,24 @@ class Gama(ABC):
x[col] = x[col].astype(self._X[col].dtype)
return self._predict(x)
- def predict_arff(self, arff_file_path: str):
+ def predict_arff(self, arff_file_path: str, target_column: Optional[str] = None) -> np.ndarray:
""" Predict the target for input found in the ARFF file.
Parameters
----------
arff_file_path: str
An ARFF file with the same columns as the one that used in fit.
- The target column is ignored (but must be present).
+ Target column must be present in file, but its values are ignored (can be '?').
+ target_column: str, optional (default=None)
+ Specifies which column the model should predict.
+ If left None, the last column is taken to be the target.
Returns
-------
numpy.ndarray
array with predictions for each row in the ARFF file.
"""
- if not isinstance(arff_file_path, str):
- raise TypeError(f"`arff_file_path` must be of type `str` but is of type {type(arff_file_path)}")
- X, _ = X_y_from_arff(arff_file_path)
+ X, _ = X_y_from_arff(arff_file_path, split_column=target_column)
return self._predict(X)
def score(self, x: Union[pd.DataFrame, np.ndarray], y: Union[pd.Series, np.ndarray]) -> float:
@@ -239,32 +240,38 @@ class Gama(ABC):
predictions = self.predict_proba(x) if self._metrics[0].requires_probabilities else self.predict(x)
return self._metrics[0].score(y, predictions)
- def score_arff(self, arff_file_path: str) -> float:
+ def score_arff(self, arff_file_path: str, target_column: Optional[str] = None) -> float:
""" Calculate the score of the model according to the `scoring` metric and input in the ARFF file.
Parameters
----------
- arff_file_path: string
+ arff_file_path: str
An ARFF file with which to calculate the score.
+ target_column: str, optional (default=None)
+ Specifies which column the model should predict.
+ If left None, the last column is taken to be the target.
Returns
-------
float
The score obtained on the given test data according to the `scoring` metric.
"""
- X, y = X_y_from_arff(arff_file_path)
+ X, y = X_y_from_arff(arff_file_path, split_column=target_column)
return self.score(X, y)
- def fit_arff(self, arff_file_path: str, *args, **kwargs):
+ def fit_arff(self, arff_file_path: str, target_column: Optional[str] = None, *args, **kwargs):
""" Find and fit a model to predict the target column (last) from other columns.
Parameters
----------
- arff_file_path: string
+ arff_file_path: str
Path to an ARFF file containing the training data.
- The last column is always taken to be the target.
+ target_column: str, optional (default=None)
+ Specifies which column the model should predict.
+ If left None, the last column is taken to be the target.
+
"""
- X, y = X_y_from_arff(arff_file_path)
+ X, y = X_y_from_arff(arff_file_path, split_column=target_column)
self.fit(X, y, *args, **kwargs)
def fit(self,
diff --git a/gama/utilities/preprocessing.py b/gama/utilities/preprocessing.py
index 476db87..bc99e8d 100644
--- a/gama/utilities/preprocessing.py
+++ b/gama/utilities/preprocessing.py
@@ -86,8 +86,12 @@ def format_x_y(x: Union[pd.DataFrame, np.ndarray], y: Union[pd.DataFrame, pd.Ser
if isinstance(x, np.ndarray):
x = heuristic_numpy_to_dataframe(x)
- if isinstance(y, np.ndarray) and y.ndim == 2 and y.shape[1] > 1:
- y = np.argmax(y, axis=1)
+ if isinstance(y, np.ndarray) and y.ndim == 2:
+ # Either indicator matrix or should be a vector.
+ if y.shape[1] > 1:
+ y = np.argmax(y, axis=1)
+ else:
+ y = y.squeeze()
if y_type == pd.Series:
if isinstance(y, pd.DataFrame):
diff --git a/setup.py b/setup.py
index 511a393..358d9ce 100644
--- a/setup.py
+++ b/setup.py
@@ -3,6 +3,9 @@ import os
from setuptools import setup, find_packages
+with open("gama/__version__.py", 'r') as fh:
+ version = fh.readlines()[-1].split()[-1].strip("\"'")
+
requirements = [
'numpy>=1.14.0',
'scipy>=1.0.0',
@@ -37,7 +40,7 @@ with open(os.path.join("README.md")) as fid:
setup(
name='gama',
- version='19.08.0',
+ version=version,
description='A package for automated machine learning based on scikit-learn.',
long_description=README,
long_description_content_type='text/markdown',
| PGijsbers/gama | 62317c2f574efee8b3a5b83272bb40219a0bf0ee | diff --git a/tests/unit/test_preprocessing.py b/tests/unit/test_preprocessing.py
index 31c6c7c..812c579 100644
--- a/tests/unit/test_preprocessing.py
+++ b/tests/unit/test_preprocessing.py
@@ -14,7 +14,8 @@ def test_format_x_y():
X_np, y_np = load_digits(return_X_y=True)
X_df, y_df = pd.DataFrame(X_np), pd.DataFrame(y_np)
y_series = pd.Series(y_np)
+ y_2d = y_np.reshape(-1, 1)
- for X, y in itertools.product([X_np, X_df], [y_np, y_series, y_df]):
+ for X, y in itertools.product([X_np, X_df], [y_np, y_series, y_df, y_2d]):
well_formatted_x_y(*format_x_y(X, y), y_type=pd.Series)
well_formatted_x_y(*format_x_y(X, y, y_type=pd.DataFrame), y_type=pd.DataFrame)
| Centralize version, make it attribute of module
There are currently two issues with keeping track of gama's version:
- it is located in two files (gama\gama.py and setup.py)
- it is not directly available with `import gama; gama.__version__`
todo: create a `__version__` file and refer to it from `gama\__init__.py` and `setup.py`. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/test_preprocessing.py::test_format_x_y"
] | [] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2019-11-07T14:03:35Z" | apache-2.0 |
|
PMEAL__OpenPNM-1794 | diff --git a/DEV_GUIDE.md b/DEV_GUIDE.md
index d61dd84b1..586f567e6 100644
--- a/DEV_GUIDE.md
+++ b/DEV_GUIDE.md
@@ -54,18 +54,19 @@ For our changelog generator to work well, the only thing you need to remember is
When merging your branch onto `dev`, as the merge message, describe what your pull request does concisely and preferably in a single sentence plus one of the following "standard" keywords:
-| Change type | Standard keywords * | Magical keywords ** |
-|:-------------|:--------------------|:------------------------------------------------------|
-| New feature | `new` | `feature` , `added` |
-| Enhancement | `enh` | `revamped` , `improved` , `enhanced` , `optimized` |
-| Maintenance | `maint` | `backend` |
-| API change | `api` | `deprecated` , `changed` , `removed` , `modified` |
-| Bug fix | `bug` | `bugfix` , `hotfix` , `fixed` |
+| Change type | Standard keywords * | Magical keywords ** |
+|:-------------|:---------------------|:------------------------------------------------------|
+| New feature | `#new` | `feature` , `added` |
+| Enhancement | `#enh` | `revamped` , `improved` , `enhanced` , `optimized` |
+| Maintenance | `#maint` | `backend` |
+| API change | `#api` | `deprecated` , `changed` , `removed` , `modified` |
+| Bug fix | `#bug` | `bugfix` , `hotfix` , `fixed` |
+| Documentation| `#doc` | `documentation` , `docstring` |
\* **Standard keywords**: For consistency, make sure you always use these. Example merge commit message:
```
-`topotools.plot_connections` no longer accepts list of pores [api].
+`topotools.plot_connections` no longer accepts list of pores [#api].
```
\** **Magical keywords**: For ease of use - and also in case you forget to use standard keywords -, feel free to use these in your merge commit message, they will automatically get caught. Example merge commit message:
@@ -74,7 +75,9 @@ When merging your branch onto `dev`, as the merge message, describe what your pu
Optimized `topotools.find_neighbor_pores` which is now ~10X faster.
```
-**Note**: You can use multiple keywords in case your pull request is doing multiple things (e.g. fixes a bug + deprecates a method), although this is discouraged. Please make a separate pull request for every change.
+**Note 1**: You can use multiple keywords in case your pull request is doing multiple things (e.g. fixes a bug + deprecates a method), although this is discouraged. Please make a separate pull request for every change.
+
+**Note 2**: We're deprecating the magical keywords from `v2.6.0`, so only use the special keywords.
### Merging `dev` onto `release`
Finally, if we're ready to publish a new release to PyPI and `conda`, you should create a pull request, asking to merge the `dev` branch onto the `release` branch. Again, this process is automated so that the version number gets bumped accordingly. The only thing you need to remember is to use the proper keyword, so that our automated workflow knows how to bump the version number. Please use the following keywords:
diff --git a/openpnm/algorithms/AdvectionDiffusion.py b/openpnm/algorithms/AdvectionDiffusion.py
index 7554b0429..5373467f1 100644
--- a/openpnm/algorithms/AdvectionDiffusion.py
+++ b/openpnm/algorithms/AdvectionDiffusion.py
@@ -74,7 +74,7 @@ class AdvectionDiffusion(ReactiveTransport):
**kwargs
):
r"""
- Setup method for setting/modifying algorithm settings.
+ Setup method for setting/modifying algorithm settings
"""
if phase:
self.settings['phase'] = phase.name
@@ -92,17 +92,31 @@ class AdvectionDiffusion(ReactiveTransport):
def set_outflow_BC(self, pores, mode='merge'):
r"""
- Adds outflow boundary condition to the selected pores.
+ Adds outflow boundary condition to the selected pores
+
+ Parameters
+ ----------
+ pores : array_like
+ The pore indices where the condition should be applied
+ mode : string, optional
+ Controls how the boundary conditions are applied. Options are:
+
+ 'merge' - (Default) Adds supplied boundary conditions to already
+ existing conditions, and also overwrites any existing values.
+ If at rate or value BC exists at the given locations, these
+ are deleted, and outflow conditions are given priority.
+ 'overwrite' - Deletes all boundary conditions of the given type
+ then adds the specified new ones.
Notes
-----
- Outflow condition simply means that the gradient of the solved
- quantity does not change, i.e. is 0.
+ Outflow condition means that the gradient of the solved quantity
+ does not change, i.e. is 0.
"""
# Hijack the parse_mode function to verify mode/pores argument
- allowed_modes = ['merge', 'overwrite', 'remove']
- mode = self._parse_mode(mode, allowed=allowed_modes, single=True)
+ mode = self._parse_mode(mode, allowed=['merge', 'overwrite'],
+ single=True)
pores = self._parse_indices(pores)
# Calculating A[i,i] values to ensure the outflow condition
@@ -117,11 +131,32 @@ class AdvectionDiffusion(ReactiveTransport):
np.add.at(Qp, C12[:, 0], -Q12)
np.add.at(Qp, C12[:, 1], Q12)
+ # Ensure other BCs are not already applied at given pores
+ hits = ~np.isnan(self['pore.bc_rate'][pores])
+ if np.any(hits):
+ self['pore.bc_rate'][pores] = np.nan
+ logger.warning('Rate boundary conditions found in some of the '
+ + 'specified pores will be overwritten')
+ hits = ~np.isnan(self['pore.bc_value'][pores])
+ if np.any(hits):
+ self['pore.bc_value'][pores] = np.nan
+ logger.warning('Value boundary conditions found in some of the '
+ + 'specified pores will be overwritten')
# Store boundary values
if ('pore.bc_outflow' not in self.keys()) or (mode == 'overwrite'):
self['pore.bc_outflow'] = np.nan
self['pore.bc_outflow'][pores] = Qp[pores]
+ def remove_BC(self, pores=None, bctype='all'):
+ # parse bctype argument
+ if isinstance(bctype, str):
+ bctype = [bctype]
+ if 'all' in bctype:
+ bctype = ['value', 'rate', 'outflow']
+ if ('pore.bc_outflow' in self.keys()) and ('outflow' in bctype):
+ self['pore.bc_outflow'][pores] = np.nan
+ super().remove_BC(pores=pores, bctype=bctype)
+
def _apply_BCs(self):
r"""
Applies Dirichlet, Neumann, and outflow BCs in order
@@ -135,3 +170,15 @@ class AdvectionDiffusion(ReactiveTransport):
ind = np.isfinite(self['pore.bc_outflow'])
diag[ind] += self['pore.bc_outflow'][ind]
self.A.setdiag(diag)
+
+ def _set_BC(self, pores, bctype, bcvalues=None, mode='merge'):
+ pores = self._parse_indices(pores)
+ # First check that given pores outflow BCs already applied
+ if 'pore.bc_outflow' in self.keys():
+ hits = ~np.isnan(self['pore.bc_outflow'][pores])
+ if np.any(hits):
+ raise Exception('Cannot apply BCs to the following pores '
+ + 'which already have an outflow BC '
+ + 'specified', pores[np.where(hits)])
+ # Then call parent class function if above check passes
+ super()._set_BC(pores=pores, bctype=bctype, bcvalues=bcvalues, mode=mode)
diff --git a/openpnm/algorithms/GenericTransport.py b/openpnm/algorithms/GenericTransport.py
index ef8440c11..e00a36092 100644
--- a/openpnm/algorithms/GenericTransport.py
+++ b/openpnm/algorithms/GenericTransport.py
@@ -266,13 +266,13 @@ class GenericTransport(GenericAlgorithm):
mode : string, optional
Controls how the boundary conditions are applied. Options are:
- +-------------+--------------------------------------------------+
- | 'merge' | (Default) Adds supplied boundary conditions to |
- | | already existing conditions |
- +-------------+--------------------------------------------------+
- | 'overwrite' | Deletes all boundary condition on object then |
- | | adds the given ones |
- +-------------+--------------------------------------------------+
+ 'merge' - (Default) Adds supplied boundary conditions to already
+ existing conditions, and also overwrites any existing values.
+ If BCs of the complementary type already exist in the given
+ locations, those values are kept.
+ 'overwrite' - Deletes all boundary conditions of the given type
+ then adds the specified new ones (unless locations already have
+ BCs of the other type).
Notes
-----
@@ -280,7 +280,6 @@ class GenericTransport(GenericAlgorithm):
``settings``, e.g. ``alg.settings['quantity'] = 'pore.pressure'``.
"""
- mode = self._parse_mode(mode, allowed=['merge', 'overwrite'], single=True)
self._set_BC(pores=pores, bctype='value', bcvalues=values, mode=mode)
def set_rate_BC(self, pores, rates=None, total_rate=None, mode='merge',
@@ -303,13 +302,13 @@ class GenericTransport(GenericAlgorithm):
mode : str, optional
Controls how the boundary conditions are applied. Options are:
- +-------------+--------------------------------------------------+
- | 'merge' | (Default) Adds supplied boundary conditions to |
- | | already existing conditions |
- +-------------+--------------------------------------------------+
- | 'overwrite' | Deletes all boundary condition on object then |
- | | adds the given ones |
- +-------------+--------------------------------------------------+
+ 'merge' - (Default) Adds supplied boundary conditions to already
+ existing conditions, and also overwrites any existing values.
+ If BCs of the complementary type already exist in the given
+ locations, these values are kept.
+ 'overwrite' - Deletes all boundary conditions of the given type
+ then adds the specified new ones (unless locations already have
+ BCs of the other type).
Notes
-----
@@ -331,7 +330,6 @@ class GenericTransport(GenericAlgorithm):
+ 'total_rate')
pores = self._parse_indices(pores)
rates = total_rate/pores.size
- mode = self._parse_mode(mode, allowed=['merge', 'overwrite'], single=True)
self._set_BC(pores=pores, bctype='rate', bcvalues=rates, mode=mode)
@docstr.get_sectionsf(base='GenericTransport._set_BC',
@@ -349,12 +347,8 @@ class GenericTransport(GenericAlgorithm):
Specifies the type or the name of boundary condition to apply. The
types can be one one of the following:
- +-------------+--------------------------------------------------+
- | 'value' | Specify the value of the quantity in each |
- | | location |
- +-------------+--------------------------------------------------+
- | 'rate' | Specify the flow rate into each location |
- +-------------+--------------------------------------------------+
+ 'value' - Specify the value of the quantity in each location
+ 'rate' - Specify the flow rate into each location
bcvalues : int or array_like
The boundary value to apply, such as concentration or rate. If
@@ -366,13 +360,13 @@ class GenericTransport(GenericAlgorithm):
mode : string, optional
Controls how the boundary conditions are applied. Options are:
- +-------------+--------------------------------------------------+
- | 'merge' | (Default) Adds supplied boundary conditions to |
- | | already existing conditions |
- +-------------+--------------------------------------------------+
- | 'overwrite' | Deletes all boundary condition on object then |
- | | adds the given ones |
- +-------------+--------------------------------------------------+
+ 'merge' - (Default) Adds supplied boundary conditions to already
+ existing conditions, and also overwrites any existing values.
+ If BCs of the complementary type already exist in the given
+ locations, these values are kept.
+ 'overwrite' - Deletes all boundary conditions of the given type
+ then adds the specified new ones (unless locations already have
+ BCs of the other type).
Notes
-----
@@ -385,6 +379,7 @@ class GenericTransport(GenericAlgorithm):
# Hijack the parse_mode function to verify bctype argument
bctype = self._parse_mode(bctype, allowed=['value', 'rate'],
single=True)
+ othertype = list(set(['value', 'rate']).difference(set([bctype])))[0]
mode = self._parse_mode(mode, allowed=['merge', 'overwrite'],
single=True)
pores = self._parse_indices(pores)
@@ -394,16 +389,25 @@ class GenericTransport(GenericAlgorithm):
raise Exception('The number of boundary values must match the '
+ 'number of locations')
- # Warn the user that another boundary condition already exists
- value_BC_mask = np.isfinite(self["pore.bc_value"])
- rate_BC_mask = np.isfinite(self["pore.bc_rate"])
- BC_locs = self.Ps[rate_BC_mask + value_BC_mask]
- if np.intersect1d(pores, BC_locs).size:
- logger.info('Another boundary condition detected in some locations!')
+ # Create boundary array if needed (though these are created on init)
+ if 'pore.bc_' + bctype not in self.keys():
+ self['pore.bc_' + bctype] = np.nan
- # Clear old boundary values if needed
- if ('pore.bc_' + bctype not in self.keys()) or (mode == 'overwrite'):
+ # Catch pores with existing BCs
+ if mode == 'merge': # remove offenders, and warn user
+ existing_bcs = np.isfinite(self["pore.bc_" + othertype])
+ inds = pores[existing_bcs[pores]]
+ elif mode == 'overwrite': # Remove existing BCs and write new ones
self['pore.bc_' + bctype] = np.nan
+ existing_bcs = np.isfinite(self["pore.bc_" + othertype])
+ inds = pores[existing_bcs[pores]]
+ # Now drop any pore indices which have BCs that should be kept
+ if len(inds) > 0:
+ msg = r'Boundary conditions are already specified in ' + \
+ r'the following given pores, so these will be skipped: '
+ msg = '\n'.join((msg, inds.__repr__()))
+ logger.warning(msg)
+ pores = np.array(list(set(pores).difference(set(inds))), dtype=int)
# Store boundary values
self['pore.bc_' + bctype][pores] = values
@@ -424,21 +428,18 @@ class GenericTransport(GenericAlgorithm):
-*'all'*: (default) Removes all boundary conditions
-*'value'*: Removes only value conditions
-*'rate'*: Removes only rate conditions
- -*'outflow'*: Removes only outflow conditions
"""
if isinstance(bctype, str):
bctype = [bctype]
if 'all' in bctype:
- bctype = ['value', 'rate', 'outflow']
+ bctype = ['value', 'rate']
if pores is None:
pores = self.Ps
if ('pore.bc_value' in self.keys()) and ('value' in bctype):
self['pore.bc_value'][pores] = np.nan
if ('pore.bc_rate' in self.keys()) and ('rate' in bctype):
self['pore.bc_rate'][pores] = np.nan
- if ('pore.bc_outflow' in self.keys()) and ('outflow' in bctype):
- self['pore.bc_outflow'][pores] = np.nan
def _build_A(self):
r"""
@@ -669,11 +670,23 @@ class GenericTransport(GenericAlgorithm):
return x
# PyPardiso
elif self.settings['solver_family'] == 'pypardiso':
+ try:
+ import pypardiso
+ except ModuleNotFoundError:
+ if self.Np <= 8000:
+ logger.critical("Pardiso not found, reverting to much "
+ + "slower spsolve. Install pardiso with: "
+ + "conda install -c conda-forge pardiso4py")
+ self.settings['solver_family'] = 'scipy'
+ return self._get_solver()
+ else:
+ raise Exception("Pardiso not found. Install it with: "
+ + "conda install -c conda-forge pardiso4py")
+
def solver(A, b, **kwargs):
r"""
Wrapper method for PyPardiso sparse linear solver.
"""
- import pypardiso
x = pypardiso.spsolve(A=A, b=b)
return x
else:
| PMEAL/OpenPNM | 86411dbd0af894ac569ab2cb09d2d33441cd8716 | diff --git a/tests/unit/algorithms/AdvectionDiffusionTest.py b/tests/unit/algorithms/AdvectionDiffusionTest.py
index e4dc21e0c..46404808c 100644
--- a/tests/unit/algorithms/AdvectionDiffusionTest.py
+++ b/tests/unit/algorithms/AdvectionDiffusionTest.py
@@ -2,6 +2,7 @@ import numpy as np
import openpnm as op
from numpy.testing import assert_allclose
from openpnm.algorithms import AdvectionDiffusion, StokesFlow
+import pytest
class AdvectionDiffusionTest:
@@ -156,6 +157,30 @@ class AdvectionDiffusionTest:
y = ad[ad.settings['quantity']].mean()
assert_allclose(actual=y, desired=2, rtol=1e-5)
+ def test_add_outflow_overwrite_rate_and_value_BC(self):
+ ad = AdvectionDiffusion(network=self.net, phase=self.phase)
+ ad.set_rate_BC(pores=[0, 1], total_rate=1)
+ ad.set_value_BC(pores=[2, 3], values=1)
+ assert np.sum(np.isfinite(ad['pore.bc_rate'])) == 2
+ assert np.sum(np.isfinite(ad['pore.bc_value'])) == 2
+ ad.set_outflow_BC(pores=[0, 1, 2, 3])
+ assert np.sum(np.isfinite(ad['pore.bc_rate'])) == 0
+ assert np.sum(np.isfinite(ad['pore.bc_value'])) == 0
+
+ def test_value_BC_does_not_overwrite_outflow(self):
+ ad = AdvectionDiffusion(network=self.net, phase=self.phase)
+ ad.set_outflow_BC(pores=[0, 1])
+ with pytest.raises(Exception):
+ ad.set_value_BC(pores=[0, 1], values=1)
+
+ def test_add_rate_BC_fails_when_outflow_BC_present(self):
+ ad = AdvectionDiffusion(network=self.net, phase=self.phase)
+ ad.set_outflow_BC(pores=[0, 1])
+ with pytest.raises(Exception):
+ ad.set_rate_BC(pores=[0, 1], total_rate=1)
+ ad.set_rate_BC(pores=[2, 3], total_rate=1)
+ assert np.all(ad['pore.bc_rate'][[2, 3]] == 0.5)
+
def test_outflow_BC_rigorous(self):
ad = AdvectionDiffusion(network=self.net, phase=self.phase)
ad.settings["cache_A"] = False
@@ -220,8 +245,8 @@ class AdvectionDiffusionTest:
if __name__ == '__main__':
t = AdvectionDiffusionTest()
t.setup_class()
+ self = t
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
t.__getattribute__(item)()
- self = t
diff --git a/tests/unit/algorithms/GenericTransportTest.py b/tests/unit/algorithms/GenericTransportTest.py
index f8cf4858d..aa0b83ed4 100644
--- a/tests/unit/algorithms/GenericTransportTest.py
+++ b/tests/unit/algorithms/GenericTransportTest.py
@@ -98,6 +98,27 @@ class GenericTransportTest:
y = np.unique(np.around(alg['pore.mole_fraction'], decimals=3))
assert np.all(x == y)
+ def test_set_value_bc_where_rate_is_already_set_mode_merge(self):
+ alg = op.algorithms.GenericTransport(network=self.net,
+ phase=self.phase)
+ alg.settings['conductance'] = 'throat.diffusive_conductance'
+ alg.settings['quantity'] = 'pore.mole_fraction'
+ alg.set_rate_BC(pores=[0, 1], values=1, mode='merge')
+ alg.set_value_BC(pores=[1, 2], values=0, mode='merge')
+ assert np.isfinite(alg['pore.bc_rate']).sum() == 2
+ assert np.isfinite(alg['pore.bc_value']).sum() == 1
+
+ def test_set_value_bc_where_rate_is_already_set_mode_overwrite(self):
+ alg = op.algorithms.GenericTransport(network=self.net,
+ phase=self.phase)
+ alg.settings['conductance'] = 'throat.diffusive_conductance'
+ alg.settings['quantity'] = 'pore.mole_fraction'
+ alg.set_rate_BC(pores=[0, 1], values=1, mode='merge')
+ alg.set_value_BC(pores=[2, 3, 4], values=0, mode='merge')
+ alg.set_value_BC(pores=[1, 2], values=0, mode='overwrite')
+ assert np.isfinite(alg['pore.bc_rate']).sum() == 2
+ assert np.isfinite(alg['pore.bc_value']).sum() == 1
+
def test_cache_A(self):
alg = op.algorithms.GenericTransport(network=self.net,
phase=self.phase)
diff --git a/tests/unit/algorithms/SolversTest.py b/tests/unit/algorithms/SolversTest.py
index 4fd53af69..3214ec661 100644
--- a/tests/unit/algorithms/SolversTest.py
+++ b/tests/unit/algorithms/SolversTest.py
@@ -145,8 +145,8 @@ class SolversTest:
if __name__ == '__main__':
t = SolversTest()
t.setup_class()
+ self = t
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
- self = t
| Outflow BC and other BC types must be prevented to coexist in a given set of pores | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_setup",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_conductance_gets_updated_when_pressure_changes",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_powerlaw_advection_diffusion",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_upwind_advection_diffusion",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_hybrid_advection_diffusion",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_exponential_advection_diffusion",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_outflow_BC",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_add_outflow_overwrite_rate_and_value_BC",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_value_BC_does_not_overwrite_outflow",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_add_rate_BC_fails_when_outflow_BC_present",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_outflow_BC_rigorous",
"tests/unit/algorithms/AdvectionDiffusionTest.py::AdvectionDiffusionTest::test_rate",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_generic_transport",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_two_value_conditions",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_one_value_one_rate",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_set_value_bc_where_rate_is_already_set_mode_merge",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_set_value_bc_where_rate_is_already_set_mode_overwrite",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_cache_A",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_rate_single_pore",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_rate_multiple_pores",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_rate_multiple_values",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_rate_Nt_by_2_conductance",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_reset_settings_and_data",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_reset_actual_results",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_total_rate",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_cg_exception_nonsymmetric_A"
] | [
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_results",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_set_solver",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_remove_boundary_conditions",
"tests/unit/algorithms/GenericTransportTest.py::GenericTransportTest::test_validate_data_health",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_solver_not_available",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_scipy_direct",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_scipy_iterative",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_scipy_iterative_diverge",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_pyamg_exception_if_not_found",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_pyamg",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_pypardiso",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_petsc_exception_if_not_found",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_petsc_mumps",
"tests/unit/algorithms/SolversTest.py::SolversTest::test_petsc_iterative"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-28T22:26:57Z" | mit |
|
PMEAL__OpenPNM-1844 | diff --git a/openpnm/algorithms/ReactiveTransport.py b/openpnm/algorithms/ReactiveTransport.py
index 15ea1402a..ba99b6399 100644
--- a/openpnm/algorithms/ReactiveTransport.py
+++ b/openpnm/algorithms/ReactiveTransport.py
@@ -66,6 +66,7 @@ class ReactiveTransportSettings(GenericSettings):
**The following parameters pertain to the ``GenericTransport`` class**
%(GenericTransportSettings.other_parameters)s
+
"""
nlin_max_iter = 5000
@@ -84,7 +85,7 @@ class ReactiveTransportSettings(GenericSettings):
@docstr.dedent
class ReactiveTransport(GenericTransport):
r"""
- A subclass for steady-state simulations with (optionally) source terms
+ A subclass for steady-state simulations with (optional) source terms.
Parameters
----------
@@ -92,9 +93,9 @@ class ReactiveTransport(GenericTransport):
Notes
-----
-
This subclass performs steady simulations of transport phenomena with
reactions when source terms are added.
+
"""
def __init__(self, settings={}, phase=None, **kwargs):
@@ -111,8 +112,8 @@ class ReactiveTransport(GenericTransport):
nlin_max_iter=None, relaxation_source=None,
relaxation_quantity=None, **kwargs):
r"""
- This method takes several arguments that are essential to running the
- algorithm and adds them to the settings
+ This method takes several arguments that are essential to running
+ the algorithm and adds them to the settings.
Parameters
----------
@@ -122,11 +123,12 @@ class ReactiveTransport(GenericTransport):
Notes
-----
Under-relaxation is a technique used for improving stability of a
- computation, particularly in the presence of highly non-linear terms.
- Under-relaxation used here limits the change in a variable from one
- iteration to the next. An optimum choice of the relaxation factor is
- one that is small enough to ensure stable simulation and large enough
- to speed up the computation.
+ computation, particularly in the presence of highly non-linear
+ terms. Under-relaxation used here limits the change in a variable
+ from one iteration to the next. An optimum choice of the
+ relaxation factor is one that is small enough to ensure stable
+ simulation and large enough to speed up the computation.
+
"""
if phase:
self.settings['phase'] = phase.name
@@ -174,6 +176,7 @@ class ReactiveTransport(GenericTransport):
If ``True`` removes source terms. The default is ``False``.
variable_props : boolean
If ``True`` removes variable properties. The default is ``False``.
+
"""
super().reset(**kwargs)
if source_terms:
@@ -187,26 +190,26 @@ class ReactiveTransport(GenericTransport):
def set_source(self, propname, pores, mode='overwrite'):
r"""
- Applies a given source term to the specified pores
+ Applies a given source term to the specified pores.
Parameters
----------
propname : string
- The property name of the source term model to be applied
+ The property name of the source term model to be applied.
pores : array_like
- The pore indices where the source term should be applied
+ The pore indices where the source term should be applied.
mode : str
Controls how the sources are applied. Options are:
-
- 'merge' - Adds supplied source term to already existing ones.
- 'overwrite' - (default) Deletes all existing source terms of the
- given ``propname`` then adds the specified new ones
+ - 'merge': Adds supplied source term to existing ones.
+ - 'overwrite': (default) Deletes all existing source terms
+ of the given ``propname`` then adds the specified new ones.
Notes
-----
Source terms cannot be applied in pores where boundary conditions have
already been set. Attempting to do so will result in an error being
raised.
+
"""
locs = self.tomask(pores=pores)
# Check if any BC is already set in the same locations
@@ -226,14 +229,15 @@ class ReactiveTransport(GenericTransport):
def remove_source(self, propname, pores=None):
r"""
- Removes source terms from specified pores
+ Removes source terms from specified pores.
Parameters
----------
propname : str
- The property name of the source term model to be removed
+ The property name of the source term model to be removed.
pores : array_like
- The pore indices where the source term should be applied
+ The pore indices where the source term should be applied.
+
"""
if pores is None:
pores = self.Ps
@@ -244,12 +248,12 @@ class ReactiveTransport(GenericTransport):
def _set_variable_props(self, propnames):
r"""
- Inform the algorithm which properties are variable, so those on which
- they depend will be updated on each solver iteration.
+ Inform the algorithm which properties are variable, so those on
+ which they depend will be updated on each solver iteration.
Parameters
----------
- propnames : string or list of strings
+ propnames : str or List[str]
The propnames of the properties that are variable throughout
the algorithm.
@@ -261,13 +265,14 @@ class ReactiveTransport(GenericTransport):
def _update_iterative_props(self):
"""r
- Update physics using the current value of ``quantity``
+ Update physics using the current value of ``quantity``.
Notes
-----
- The algorithm directly writes the value of 'quantity' into the phase.
- This method was implemented relaxing one of the OpenPNM rules of
- algorithms not being able to write into phases.
+ The algorithm directly writes the value of 'quantity' into the
+ phase. This method was implemented relaxing one of the OpenPNM
+ rules of algorithms not being able to write into phases.
+
"""
phase = self.project.phases()[self.settings['phase']]
physics = self.project.find_physics(phase=phase)
@@ -286,21 +291,23 @@ class ReactiveTransport(GenericTransport):
def _apply_sources(self):
"""r
- Update ``A`` and ``b`` applying source terms to specified pores
+ Update ``A`` and ``b`` applying source terms to specified pores.
Notes
-----
- - Applying source terms to ``A`` and ``b`` is performed after (optionally)
- under-relaxing the source term to improve numerical stability. Physics
- are also updated before applying source terms to ensure that source
- terms values are associated with the current value of 'quantity'.
-
- - For source term under-relaxation, old values of S1 and S2 need to be
- stored somewhere, we chose to store them on the algorithm object. This is
- because storing them on phase/physics creates unintended problems, ex.
- storing them on physics -> IO complains added depth to the NestedDict, and
- storing them on the phase object results in NaNs in case source term is
- only added to a subset of nodes, which breaks our _check_for_nans algorithm.
+ Applying source terms to ``A`` and ``b`` is performed after
+ (optionally) under-relaxing the source term to improve numerical
+ stability. Physics are also updated before applying source terms
+ to ensure that source terms values are associated with the current
+ value of 'quantity'.
+
+ For source term under-relaxation, old values of S1 and S2 need
+ to be stored somewhere, we chose to store them on the algorithm
+ object. This is because storing them on phase/physics creates
+ unintended problems, ex. storing them on physics -> IO complains
+ added depth to the NestedDict, and storing them on the phase
+ object results in NaNs in case source term is only added to a
+ subset of nodes, which breaks our _check_for_nans algorithm.
Warnings
--------
@@ -336,29 +343,30 @@ class ReactiveTransport(GenericTransport):
def _run_reactive(self, x0):
r"""
- Repeatedly updates ``A``, ``b``, and the solution guess within according
- to the applied source term then calls ``_solve`` to solve the resulting
- system of linear equations.
+ Repeatedly updates ``A``, ``b``, and the solution guess within
+ according to the applied source term then calls ``_solve`` to
+ solve the resulting system of linear equations.
- Stops when the residual falls below ``solver_tol * norm(b)`` or when
- the maximum number of iterations is reached.
+ Stops when the residual falls below ``solver_tol * norm(b)`` or
+ when the maximum number of iterations is reached.
Parameters
----------
- x0 : ND-array
+ x0 : ndarray
Initial guess of unknown variable
Returns
-------
- x : ND-array
+ x : ndarray
Solution array.
Notes
-----
- The algorithm must at least complete one iteration, and hence the check for
- itr >= 1, because otherwise, _check_for_nans() never get's called in case
- there's something wrong with the data, and therefore, the user won't get
- notified about the root cause of the algorithm divergence.
+ The algorithm must at least complete one iteration, and hence the
+ check for itr >= 1, because otherwise, _check_for_nans() never
+ gets called in case there's something wrong with the data, and
+ therefore, the user won't get notified about the root cause of the
+ algorithm divergence.
"""
w = self.settings['relaxation_quantity']
@@ -388,7 +396,8 @@ class ReactiveTransport(GenericTransport):
def _update_A_and_b(self):
r"""
- Updates A and b based on the most recent solution stored on algorithm object.
+ Updates A and b based on the most recent solution stored on
+ algorithm object.
"""
# Update iterative properties on phase, geometries, and physics
self._update_iterative_props()
@@ -439,8 +448,8 @@ class ReactiveTransport(GenericTransport):
@docstr.dedent
def _set_BC(self, pores, bctype, bcvalues=None, mode='merge'):
r"""
- Apply boundary conditions to specified pores if no source terms are
- already assigned to these pores. Otherwise, raise an error.
+ Apply boundary conditions to specified pores if no source terms
+ are already assigned to these pores. Otherwise, raise an error.
Parameters
----------
@@ -449,12 +458,14 @@ class ReactiveTransport(GenericTransport):
Notes
-----
%(GenericTransport._set_BC.notes)s
+
"""
# First check that given pores do not have source terms already set
for item in self.settings['sources']:
if np.any(self[item][pores]):
- raise Exception('Source term already present in given '
- + 'pores, cannot also assign boundary '
- + 'conditions')
+ raise Exception(
+ 'Source term already present in given pores, cannot also'
+ ' assign boundary conditions'
+ )
# Then call parent class function if above check passes
super()._set_BC(pores=pores, bctype=bctype, bcvalues=bcvalues, mode=mode)
diff --git a/openpnm/network/Cubic.py b/openpnm/network/Cubic.py
index 389562f19..41278b9c0 100644
--- a/openpnm/network/Cubic.py
+++ b/openpnm/network/Cubic.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
===============================================================================
Cubic: Generate lattice-like networks
@@ -140,9 +139,7 @@ class Cubic(GenericNetwork):
elif connectivity == 6 + 8 + 12:
joints = face_joints + corner_joints + edge_joints
else:
- raise Exception(
- "Invalid connectivity receieved. Must be 6, 14, 18, 20 or 26"
- )
+ raise Exception("Invalid connectivity. Must be 6, 14, 18, 20 or 26.")
tails, heads = np.array([], dtype=int), np.array([], dtype=int)
for T, H in joints:
@@ -245,18 +242,18 @@ class Cubic(GenericNetwork):
dims = topotools.dimensionality(self)
# Ensure vectors point in n-dims unique directions
c = {tuple(row): 1 for row in unit_vec}
+ mag = np.atleast_1d(mag.squeeze()).astype(float)
if len(c.keys()) > sum(dims):
raise Exception(
- "Spacing is undefined when throats point in "
- + "more directions than network has dimensions"
+ "Spacing is undefined when throats point in more directions"
+ " than network has dimensions."
)
- mag = np.float64(mag.squeeze())
for ax in [0, 1, 2]:
if dims[ax]:
inds = np.where(unit_vec[:, ax] == unit_vec[:, ax].max())[0]
temp = np.unique(mag[inds])
if not np.allclose(temp, temp[0]):
- raise Exception("A unique value of spacing could not be found")
+ raise Exception("A unique value of spacing could not be found.")
spacing[ax] = temp[0]
self.settings['spacing'] = spacing
return np.array(spacing)
| PMEAL/OpenPNM | 11530fa4ce00a80700429710bad3b26bc540d600 | diff --git a/tests/unit/network/CubicTest.py b/tests/unit/network/CubicTest.py
index 5aaa271f7..889048aed 100644
--- a/tests/unit/network/CubicTest.py
+++ b/tests/unit/network/CubicTest.py
@@ -10,6 +10,16 @@ class CubicTest:
def teardown_class(self):
pass
+ def test_spacing_could_not_be_found(self):
+ net = op.network.Cubic(shape=[1, 5, 1], spacing=1)
+ net["pore.coords"][4, 1] += 5
+ with pytest.raises(Exception):
+ _ = net.spacing
+
+ def test_spacing_1D(self):
+ net = op.network.Cubic(shape=[2, 1, 1], spacing=1)
+ assert np.all(net.spacing == [1.0, 0.0, 0.0])
+
def test_spacing_2D(self):
net = op.network.Cubic(shape=[5, 5, 1], spacing=[1, 1])
assert np.all(net.spacing == [1.0, 1.0, 0.0])
@@ -114,13 +124,13 @@ class CubicTest:
net = op.network.Cubic(shape=[3, 4, 5])
net['pore.coords'] += np.random.rand(net.Np, 3)
with pytest.raises(Exception):
- net.spacing
+ _ = net.spacing
def test_spacing_on_network_with_boundary_pores(self):
net = op.network.Cubic(shape=[3, 4, 5])
net.add_boundary_pores()
with pytest.raises(Exception):
- net.spacing
+ _ = net.spacing
def test_connectivity(self):
clist = [6, 14, 18, 20, 26]
diff --git a/tests/unit/network/GenericNetworkTest.py b/tests/unit/network/GenericNetworkTest.py
index 92f02d64f..1c3d859ae 100644
--- a/tests/unit/network/GenericNetworkTest.py
+++ b/tests/unit/network/GenericNetworkTest.py
@@ -1,5 +1,4 @@
import numpy as np
-import scipy as sp
import openpnm as op
| network.spacing breaks for 1D networks with only two pores
```python
import openpnm as op
net = op.network.Cubic([2, 1, 1], spacing=1)
print(net.spacing)
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_1D"
] | [
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_could_not_be_found",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_2D",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_3D",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_2D_uneven",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_3D_uneven",
"tests/unit/network/CubicTest.py::CubicTest::test_shape_1D",
"tests/unit/network/CubicTest.py::CubicTest::test_shape_2D",
"tests/unit/network/CubicTest.py::CubicTest::test_shape_3D",
"tests/unit/network/CubicTest.py::CubicTest::test_shape_extremely_small_spacing",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_3d_rotated",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_3d_rotated_uneven",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_2d_sheared",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_2d_sheared_uneven",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_3d_sheared",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_3d_sheared_uneven",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_on_joggled_network",
"tests/unit/network/CubicTest.py::CubicTest::test_spacing_on_network_with_boundary_pores",
"tests/unit/network/CubicTest.py::CubicTest::test_connectivity",
"tests/unit/network/CubicTest.py::CubicTest::test_invalid_connectivity",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_connected_pores_numeric_not_flattend",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_connected_pores_numeric_flattend",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_connected_pores_boolean_flattend",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_connected_pores_empty_flattend",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_boolean",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric_union",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric_intersection",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric_exclusive_or",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric_union_include_input",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric_intersection_include_input",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric_intersection_exclude_input",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_pores_numeric_exclusive_or_include_input",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_throats_on_pores_wo_throats",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_throats_empty",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_throats_boolean",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_throats_numeric_union",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_throats_numeric_intersection",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_neighbor_throats_numeric_exclusive_or",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_num_neighbors_empty",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_num_neighbors_pores_flattened",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_num_neighbors_pores_with_modes",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_num_neighbors_pores_not_flattened",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_nearby_pores_distance_1",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_nearby_pores_distance_2",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_nearby_pores_distance_0",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_nearby_pores_distance_1_flattened",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_nearby_pores_distance_2_flattened",
"tests/unit/network/GenericNetworkTest.py::GenericNetworkTest::test_find_nearby_pores_distance_2_flattened_include_input"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-02-11T15:54:31Z" | mit |
|
PMEAL__OpenPNM-2160 | diff --git a/openpnm/models/geometry/misc.py b/openpnm/models/geometry/misc.py
index fa4df4578..f300a85bb 100644
--- a/openpnm/models/geometry/misc.py
+++ b/openpnm/models/geometry/misc.py
@@ -1,6 +1,5 @@
r"""
-Helper methods for openpnm.geometry module.
-
+Helper methods for openpnm.models.geometry module
"""
diff --git a/openpnm/models/misc/__init__.py b/openpnm/models/misc/__init__.py
index d01329346..addff073f 100644
--- a/openpnm/models/misc/__init__.py
+++ b/openpnm/models/misc/__init__.py
@@ -10,8 +10,7 @@ generating an array of random numbers.
"""
-from .statistical_distributions import generic_distribution, normal, weibull, random
-from .simple_equations import linear, polynomial, generic_function
-from .basic_math import constant, product, scaled, clip, normalize, summation
-from .basic_math import fraction, invert, blank
+from .statistical_distributions import *
+from .simple_equations import *
+from .basic_math import *
from .neighbor_lookups import from_neighbor_throats, from_neighbor_pores
diff --git a/openpnm/models/misc/basic_math.py b/openpnm/models/misc/basic_math.py
index ecad370a5..603bdedcb 100644
--- a/openpnm/models/misc/basic_math.py
+++ b/openpnm/models/misc/basic_math.py
@@ -5,6 +5,19 @@ from openpnm.utils import logging
logger = logging.getLogger(__name__)
+__all__ = [
+ 'blank',
+ 'clip',
+ 'constant',
+ 'fraction',
+ 'invert',
+ 'normalize',
+ 'scaled',
+ 'summation',
+ 'product',
+ ]
+
+
def blank(target):
pass
diff --git a/openpnm/models/misc/simple_equations.py b/openpnm/models/misc/simple_equations.py
index 994072d78..2e03cf895 100644
--- a/openpnm/models/misc/simple_equations.py
+++ b/openpnm/models/misc/simple_equations.py
@@ -5,6 +5,13 @@ from openpnm.utils import logging
logger = logging.getLogger(__name__)
+__all__ = [
+ 'linear',
+ 'polynomial',
+ 'generic_function',
+ ]
+
+
def generic_function(target, prop, func, **kwargs):
r"""
Runs an arbitrary function on the given data
diff --git a/openpnm/models/misc/statistical_distributions.py b/openpnm/models/misc/statistical_distributions.py
index 8024f21dd..d44cad6d7 100644
--- a/openpnm/models/misc/statistical_distributions.py
+++ b/openpnm/models/misc/statistical_distributions.py
@@ -5,6 +5,15 @@ from openpnm.utils import logging
logger = logging.getLogger(__name__)
+__all__ = [
+ 'random',
+ 'weibull',
+ 'normal',
+ 'generic_distribution',
+ 'match_histogram',
+ ]
+
+
def random(target, element, seed=None, num_range=[0, 1]):
r"""
Create an array of random numbers of a specified size.
@@ -197,3 +206,32 @@ def generic_distribution(target, seeds, func):
seeds = target[seeds]
value = func.ppf(seeds)
return value
+
+
+def match_histogram(target, bin_centers, bin_heights, element='pore'):
+ r"""
+ Generate values corresponding to a given histogram
+
+ Parameters
+ ----------
+ target : OpenPNM object
+ The object for which values are to be generated
+ bin_centers : array_like
+ The x-axis of the histogram, such as pore sizes
+ bin_heights : array_like
+ The y-axis of the histogram, such as the number of pores of each size
+ element : str
+ Controls how many values to generate. Can either be 'pore' or 'throat'.
+
+ Returns
+ -------
+ vals : ndarray
+ Values corresponding to ``bin_centers`` generated in proportion to the
+ respective ``bin_heights``.
+
+ """
+ N = target._count(element)
+ h = np.cumsum(bin_heights)
+ b = np.digitize(np.random.rand(N)*np.amax(h), bins=h)
+ vals = np.array(bin_centers)[b]
+ return vals
| PMEAL/OpenPNM | ddefbc73da05c936dcea0373cb89cb92121a35b0 | diff --git a/tests/unit/models/misc/MiscTest.py b/tests/unit/models/misc/MiscTest.py
index 884e76b3e..b93a60e77 100644
--- a/tests/unit/models/misc/MiscTest.py
+++ b/tests/unit/models/misc/MiscTest.py
@@ -288,6 +288,17 @@ class MiscTest:
model=mods.basic_math.invert)
assert net['pore.entry_pressure'][0] == 0.5
+ def test_match_histograms(self):
+ net = op.network.Cubic(shape=[5, 5, 5])
+ c = [0.1, 0.3, 0.8, 1.2]
+ h = [5, 20, 20, 100]
+ a = mods.match_histogram(target=net, bin_centers=c, bin_heights=h,
+ element='pore')
+ assert np.all(np.unique(a) == c)
+ vals, nums = np.unique(a, return_counts=True)
+ assert nums[3] == np.amax(nums)
+ assert nums[0] == np.amin(nums)
+
if __name__ == '__main__':
| Add a function for fitting PSD to experimental histograms
Following the discussion had in #2112, it might be a good idea to offer a function like the one I proposed.
Not quite sure how to implement this though...it doesn't quite fit with the pore-scale modeling framework. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/models/misc/MiscTest.py::MiscTest::test_match_histograms"
] | [
"tests/unit/models/misc/MiscTest.py::MiscTest::test_constant",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_product",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_generic_function",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_scaled",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_linear",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_polynomial",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_random_no_seed",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_random_with_seed",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_random_with_range",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_from_neighbor_throats_min",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_from_neighbor_throats_max",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_from_neighbor_throats_mean",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_neighbor_pores_with_nans",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_neighbor_throats_mode_min_with_nans",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_neighbor_throats_mode_max_with_nans",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_neighbor_throats_mode_mean_with_nans",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_from_neighbor_pores_min",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_from_neighbor_pores_max",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_from_neighbor_pores_mean",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_from_neighbors_multi_geom",
"tests/unit/models/misc/MiscTest.py::MiscTest::test_invert"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-11-13T16:52:47Z" | mit |
|
PMEAL__OpenPNM-2162 | diff --git a/openpnm/core/Base.py b/openpnm/core/Base.py
index d1e2840ad..7b66f3993 100644
--- a/openpnm/core/Base.py
+++ b/openpnm/core/Base.py
@@ -251,6 +251,14 @@ class Base(dict):
raise KeyError(key)
return vals
+ def __delitem__(self, key):
+ try:
+ super().__delitem__(key)
+ except KeyError as e:
+ d = self[key] # if key is a nested dict, get all values
+ for item in d.keys():
+ super().__delitem__(item)
+
def _set_name(self, name, validate=True):
old_name = self.settings['name']
if name == old_name:
| PMEAL/OpenPNM | aa445a2acec85ebf1e8f358e641fa5e2c4e141b7 | diff --git a/tests/unit/core/BaseTest.py b/tests/unit/core/BaseTest.py
index f82d99875..18ed0ae18 100644
--- a/tests/unit/core/BaseTest.py
+++ b/tests/unit/core/BaseTest.py
@@ -930,6 +930,19 @@ class BaseTest:
with pytest.raises(KeyError):
pn.get_conduit_data('blah')
+ def test_del_nested_dicts(self):
+ pn = op.network.Cubic(shape=[3, 3, 3])
+ geo = op.geometry.SpheresAndCylinders(network=pn,
+ pores=pn.Ps,
+ throats=pn.Ts)
+ assert 'throat.hydraulic_size_factors.pore1' in geo.keys()
+ del geo['throat.hydraulic_size_factors']
+ assert 'throat.hydraulic_size_factors.pore1' not in geo.keys()
+ with pytest.raises(KeyError):
+ geo['throat.hydraulic_size_factors']
+ with pytest.raises(KeyError):
+ del geo['pore.blah']
+
if __name__ == '__main__':
| Add the ability to delete hirarchical dictionary keys in one go
# Intended
```python
del geo['throat.conduit_lengths'] # this currently doesn't work
```
# Current
```python
del geo['throat.conduit_lengths.pore1']
del geo['throat.conduit_lengths.pore2']
del geo['throat.conduit_lengths.throat']
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/core/BaseTest.py::BaseTest::test_del_nested_dicts"
] | [
"tests/unit/core/BaseTest.py::BaseTest::test_clear_model_data",
"tests/unit/core/BaseTest.py::BaseTest::test_clear_model_data_when_model_returns_dictionary",
"tests/unit/core/BaseTest.py::BaseTest::test_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_one_label",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_two_labels_or",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_two_labels_xnor",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_two_labels_not_xor",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_two_labels_nor",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_two_labels_nand",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_bad_mode",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_empty_list",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_asmask",
"tests/unit/core/BaseTest.py::BaseTest::test_pores_and_throats_with_to_global",
"tests/unit/core/BaseTest.py::BaseTest::test_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_throats_asmask",
"tests/unit/core/BaseTest.py::BaseTest::test_throats_one_label",
"tests/unit/core/BaseTest.py::BaseTest::test_throats_two_labels_or",
"tests/unit/core/BaseTest.py::BaseTest::test_throats_two_labels_xnor",
"tests/unit/core/BaseTest.py::BaseTest::test_throats_two_labels_xor",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_no_label",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_one_label_as_string",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_one_label_as_list",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_two_labels_or",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_two_labels_xnor",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_two_labels_xnor_empty",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_two_labels_xor",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_two_labels_nor",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_empty_locations",
"tests/unit/core/BaseTest.py::BaseTest::test_filter_by_label_pores_and_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_tomask_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_tomask_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_tomask_pores_and_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_toindices_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_toindices_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_toindices_float_mask",
"tests/unit/core/BaseTest.py::BaseTest::test_toindices_invalid_mask",
"tests/unit/core/BaseTest.py::BaseTest::test_toindices_wrong_mask",
"tests/unit/core/BaseTest.py::BaseTest::test_count",
"tests/unit/core/BaseTest.py::BaseTest::test_num_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_num_pores_one_label",
"tests/unit/core/BaseTest.py::BaseTest::test_num_pores_two_labels_or",
"tests/unit/core/BaseTest.py::BaseTest::test_num_pores_two_labels_xnor",
"tests/unit/core/BaseTest.py::BaseTest::test_num_pores_two_labels_xor",
"tests/unit/core/BaseTest.py::BaseTest::test_num_pores_two_labels_nor",
"tests/unit/core/BaseTest.py::BaseTest::test_num_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_num_throats_one_label",
"tests/unit/core/BaseTest.py::BaseTest::test_num_throats_two_labels_or",
"tests/unit/core/BaseTest.py::BaseTest::test_num_throats_two_labels_xnor",
"tests/unit/core/BaseTest.py::BaseTest::test_num_throats_two_labels_xor",
"tests/unit/core/BaseTest.py::BaseTest::test_num_throats_two_labels_nor",
"tests/unit/core/BaseTest.py::BaseTest::test_keys_mode_skip",
"tests/unit/core/BaseTest.py::BaseTest::test_keys_mode_props",
"tests/unit/core/BaseTest.py::BaseTest::test_keys_mode_labels",
"tests/unit/core/BaseTest.py::BaseTest::test_keys_element_pores_mode_all",
"tests/unit/core/BaseTest.py::BaseTest::test_keys_element_throats_mode_all",
"tests/unit/core/BaseTest.py::BaseTest::test_keys_mode_props_and_labels",
"tests/unit/core/BaseTest.py::BaseTest::test_props_all",
"tests/unit/core/BaseTest.py::BaseTest::test_props_models",
"tests/unit/core/BaseTest.py::BaseTest::test_props_constants",
"tests/unit/core/BaseTest.py::BaseTest::test_props_pores_all",
"tests/unit/core/BaseTest.py::BaseTest::test_props_pores_models",
"tests/unit/core/BaseTest.py::BaseTest::test_props_pores_constants",
"tests/unit/core/BaseTest.py::BaseTest::test_props_hidden_keys",
"tests/unit/core/BaseTest.py::BaseTest::test_labels",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_pores_and_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_foo",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_all_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_all_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_one_pore",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_on_list_of_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_locations_boolean",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_pores_mode_or",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_pores_mode_and",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_pores_mode_xor",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_pores_mode_nand",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_pores_mode_xnor",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_pores_mode_nor",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_pores_mode_foo",
"tests/unit/core/BaseTest.py::BaseTest::test_labels_hidden_key",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_indices_boolean",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_indices_None",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_indices_int",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_indices_list",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_element_None",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_element_various_strings",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_element_bad_string",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_element_duplicate",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_element_single_true",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_element_props",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_labels_none",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_labels_string",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_labels_wildcards",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_labels_duplicates",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_mode_string",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_mode_single",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_mode_allowed",
"tests/unit/core/BaseTest.py::BaseTest::test_parse_mode_duplicate",
"tests/unit/core/BaseTest.py::BaseTest::test_setitem_wrong_prefix",
"tests/unit/core/BaseTest.py::BaseTest::test_setitem_wrong_length",
"tests/unit/core/BaseTest.py::BaseTest::test_setitem_replace_all",
"tests/unit/core/BaseTest.py::BaseTest::test_setitem_overwrite_into_all",
"tests/unit/core/BaseTest.py::BaseTest::test_setitem_subdict_conflicts",
"tests/unit/core/BaseTest.py::BaseTest::test_object_name_name_conflict",
"tests/unit/core/BaseTest.py::BaseTest::test_object_name_array_conflict",
"tests/unit/core/BaseTest.py::BaseTest::test_get_indices",
"tests/unit/core/BaseTest.py::BaseTest::test_get_indices_wildcard",
"tests/unit/core/BaseTest.py::BaseTest::test_write_dict",
"tests/unit/core/BaseTest.py::BaseTest::test_map_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_map_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_map_pores_unfiltered",
"tests/unit/core/BaseTest.py::BaseTest::test_map_pores_unfiltered_missing",
"tests/unit/core/BaseTest.py::BaseTest::test_map_pores_reverse",
"tests/unit/core/BaseTest.py::BaseTest::test_map_pores_missing",
"tests/unit/core/BaseTest.py::BaseTest::test_getitem_with_no_matches",
"tests/unit/core/BaseTest.py::BaseTest::test_interpolate_data",
"tests/unit/core/BaseTest.py::BaseTest::test_get_no_matches",
"tests/unit/core/BaseTest.py::BaseTest::test_get_string",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_getitem_on_network_from_network",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_getitem_on_network_from_one_geometry",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_getitem_on_network_from_two_geometries",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_getitem_on_phase_from_phase",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_getitem_on_phase_from_one_physics",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_getitem_on_phase_from_two_physics",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_getitem_on_one_geometry",
"tests/unit/core/BaseTest.py::BaseTest::test_subdict_lookup_errors",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_add_to_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_add_to_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_overwrite_on_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_overwrite_on_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_remove_from_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_remove_from_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_purge_from_pores",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_purge_from_throats",
"tests/unit/core/BaseTest.py::BaseTest::test_set_label_purge_nonexistent_label",
"tests/unit/core/BaseTest.py::BaseTest::test_renaming_to_current_name_is_allowed",
"tests/unit/core/BaseTest.py::BaseTest::test_object_names_must_be_unique_within_project",
"tests/unit/core/BaseTest.py::BaseTest::test_get_conduit_data"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2021-11-13T21:31:10Z" | mit |
|
PMEAL__OpenPNM-2173 | diff --git a/openpnm/algorithms/GenericAlgorithm.py b/openpnm/algorithms/GenericAlgorithm.py
index c2551b1ab..70bc915b5 100644
--- a/openpnm/algorithms/GenericAlgorithm.py
+++ b/openpnm/algorithms/GenericAlgorithm.py
@@ -21,7 +21,7 @@ class GenericAlgorithmSettings:
@docstr.get_sections(base='GenericAlgorithm', sections=['Parameters'])
@docstr.dedent
-class GenericAlgorithm(Base, LegacyMixin, LabelMixin):
+class GenericAlgorithm(Base, LegacyMixin):
r"""
Generic class to define the foundation of Algorithms
diff --git a/openpnm/algorithms/ReactiveTransport.py b/openpnm/algorithms/ReactiveTransport.py
index 185578d11..f0b815c14 100644
--- a/openpnm/algorithms/ReactiveTransport.py
+++ b/openpnm/algorithms/ReactiveTransport.py
@@ -91,7 +91,7 @@ class ReactiveTransport(GenericTransport):
return
# Remove item from label dictionary
for item in self.settings['sources']:
- self.pop(item)
+ self.pop(item, None)
# Reset the settings dict
self.settings['sources'] = []
@@ -131,7 +131,12 @@ class ReactiveTransport(GenericTransport):
locs_BC = np.isfinite(self['pore.bc_value']) + np.isfinite(self['pore.bc_rate'])
if (locs & locs_BC).any():
raise Exception("BCs present in given pores, can't assign source term")
- self.set_label(propname, pores=locs, mode=mode)
+ if mode == 'overwrite':
+ self[propname] = False
+ if mode == 'add':
+ if propname not in self.keys():
+ self[propname] = False
+ self[propname][locs] = True
# Check if propname already in source term list
if propname not in self.settings['sources']:
self.settings['sources'].append(propname)
@@ -148,9 +153,11 @@ class ReactiveTransport(GenericTransport):
The pore indices where the source term should be applied.
"""
- locs = self.tomask(pores=pores or self.Ps)
- self.set_label(propname, pores=locs, mode='remove')
- # TODO: if pores=None: remove the label -> reuse in reset method
+ propname = self._parse_prop(propname, 'pore')
+ if pores is None:
+ self.pop(propname, None)
+ else:
+ self[propname][pores] = False
def _update_iterative_props(self):
"""r
@@ -195,7 +202,7 @@ class ReactiveTransport(GenericTransport):
phase = self.project[self.settings.phase]
for item in self.settings['sources']:
# Fetch linearized values of the source term
- Ps = self.pores(item)
+ Ps = self[item]
S1, S2 = [phase[f"{item}.{Si}"] for Si in ["S1", "S2"]]
# Modify A and b: diag(A) += -S1, b += S2
diag = self.A.diagonal()
diff --git a/openpnm/core/Base.py b/openpnm/core/Base.py
index 5595e7e3a..3470016ba 100644
--- a/openpnm/core/Base.py
+++ b/openpnm/core/Base.py
@@ -51,7 +51,7 @@ class Base(dict):
class is a subclass of the standard ``dict`` so has the usual methods such
as ``pop`` and ``keys``, and has extra methods for working specifically
with OpenPNM data.
-
+
"""
def __new__(cls, *args, **kwargs):
@@ -834,6 +834,47 @@ class Base(dict):
T = self.interpolate_data(propname='pore.'+prop, mode=mode)
return np.vstack((P1, T, P2)).T
+ def _count(self, element=None):
+ r"""
+ Returns a dictionary containing the number of pores and throats in
+ the network, stored under the keys 'pore' or 'throat'
+
+ Parameters
+ ----------
+ element : string, optional
+ Can be either 'pore' , 'pores', 'throat' or 'throats', which
+ specifies which count to return.
+
+ Returns
+ -------
+ A dictionary containing the number of pores and throats under the
+ 'pore' and 'throat' key respectively.
+
+ See Also
+ --------
+ num_pores
+ num_throats
+
+ Notes
+ -----
+ The ability to send plurals is useful for some types of 'programmatic'
+ access. For instance, the standard argument for locations is pores
+ or throats. If these are bundled up in a **kwargs dict then you can
+ just use the dict key in count() without removing the 's'.
+
+ Examples
+ --------
+ >>> import openpnm as op
+ >>> pn = op.network.Cubic(shape=[5, 5, 5])
+ >>> pn._count('pore')
+ 125
+ >>> pn._count('throat')
+ 300
+ """
+ element = self._parse_element(element=element, single=True)
+ temp = np.size(self.__getitem__(element+'.all'))
+ return temp
+
def show_hist(self,
props=['pore.diameter', 'throat.diameter', 'throat.length'],
bins=20, fontsize=14, **kwargs):
diff --git a/openpnm/core/Mixins.py b/openpnm/core/Mixins.py
index a81443fbe..b3961f6a9 100644
--- a/openpnm/core/Mixins.py
+++ b/openpnm/core/Mixins.py
@@ -730,44 +730,3 @@ class LabelMixin:
Ts = self._get_indices(labels=labels, mode=mode, element='throat')
Nt = np.shape(Ts)[0]
return Nt
-
- def _count(self, element=None):
- r"""
- Returns a dictionary containing the number of pores and throats in
- the network, stored under the keys 'pore' or 'throat'
-
- Parameters
- ----------
- element : string, optional
- Can be either 'pore' , 'pores', 'throat' or 'throats', which
- specifies which count to return.
-
- Returns
- -------
- A dictionary containing the number of pores and throats under the
- 'pore' and 'throat' key respectively.
-
- See Also
- --------
- num_pores
- num_throats
-
- Notes
- -----
- The ability to send plurals is useful for some types of 'programmatic'
- access. For instance, the standard argument for locations is pores
- or throats. If these are bundled up in a **kwargs dict then you can
- just use the dict key in count() without removing the 's'.
-
- Examples
- --------
- >>> import openpnm as op
- >>> pn = op.network.Cubic(shape=[5, 5, 5])
- >>> pn._count('pore')
- 125
- >>> pn._count('throat')
- 300
- """
- element = self._parse_element(element=element, single=True)
- temp = np.size(self.__getitem__(element+'.all'))
- return temp
| PMEAL/OpenPNM | 486ed107160e587096d85cf308af7eadb5bef390 | diff --git a/tests/unit/algorithms/IPTest.py b/tests/unit/algorithms/IPTest.py
index 4803639bd..efdf7e3f8 100644
--- a/tests/unit/algorithms/IPTest.py
+++ b/tests/unit/algorithms/IPTest.py
@@ -57,7 +57,7 @@ class IPTest:
alg.set_inlets(pores=self.net.pores("top"))
alg.run()
alg.apply_trapping(outlets=self.net.pores("bottom"))
- assert "pore.trapped" in alg.labels()
+ assert "pore.trapped" in alg.keys()
def test_plot_intrusion_curve(self):
alg = op.algorithms.InvasionPercolation(network=self.net, phase=self.water)
diff --git a/tests/unit/algorithms/ReactiveTransportTest.py b/tests/unit/algorithms/ReactiveTransportTest.py
index 41b10b2fc..4646908c7 100644
--- a/tests/unit/algorithms/ReactiveTransportTest.py
+++ b/tests/unit/algorithms/ReactiveTransportTest.py
@@ -146,7 +146,7 @@ class ReactiveTransportTest:
self.alg.remove_source(propname='pore.reaction', pores=[0, 2])
assert self.alg['pore.reaction'].sum() == 2
self.alg.remove_source(propname='pore.reaction')
- assert self.alg['pore.reaction'].sum() == 0
+ assert 'pore.reaction' not in self.alg.keys()
def test_source_relaxation_consistency_w_base_solution(self):
self.alg.reset(bcs=True, source_terms=True)
| Algorithms probably don't need to inherit from the Base class
We did some digging and the algorithms rarely call any of the ``Base`` methods like ``num_pores`` etc. I think we might be able to just define ``GenericAlgorithm`` as a direct subclass of ``dict`` and that would free up the algorithms quite a bit. For instance, the data would no longer need to be ``Np`` and ``Nt`` long. This would allow solving for subsets of pores. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_remove_source"
] | [
"tests/unit/algorithms/IPTest.py::IPTest::test_set_inlets_overwrite",
"tests/unit/algorithms/IPTest.py::IPTest::test_run",
"tests/unit/algorithms/IPTest.py::IPTest::test_results",
"tests/unit/algorithms/IPTest.py::IPTest::test_trapping",
"tests/unit/algorithms/IPTest.py::IPTest::test_plot_intrusion_curve",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_settings",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_get_iterative_props",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_multiple_set_source_with_same_name_should_only_keep_one",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_one_value_one_source",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_source_over_BCs",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_BCs_over_source",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_multiple_source_terms_same_location",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_source_term_is_set_as_iterative_prop",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_quantity_relaxation_consistency_w_base_solution",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_set_source_with_modes",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_source_relaxation_consistency_w_base_solution",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_solution_should_diverge_w_large_relaxation",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_check_divergence_if_maxiter_reached",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_variable_conductance",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_reset",
"tests/unit/algorithms/ReactiveTransportTest.py::ReactiveTransportTest::test_ensure_settings_are_valid"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-11-17T16:30:12Z" | mit |
|
PMEAL__OpenPNM-2186 | diff --git a/openpnm/models/geometry/throat_volume.py b/openpnm/models/geometry/throat_volume.py
index 3cdfff587..13d4322f4 100644
--- a/openpnm/models/geometry/throat_volume.py
+++ b/openpnm/models/geometry/throat_volume.py
@@ -163,8 +163,8 @@ def lens(target, throat_diameter='throat.diameter',
"""
network = target.network
conns = network['throat.conns']
- Rp = target[pore_diameter]
- Rt = target[throat_diameter]
+ Rp = target[pore_diameter]/2
+ Rt = target[throat_diameter]/2
a = _np.atleast_2d(Rt).T
q = _np.arcsin(a/Rp[conns])
b = Rp[conns]*_np.cos(q)
@@ -210,8 +210,8 @@ def pendular_ring(target, throat_diameter='throat.diameter',
"""
network = target.network
conns = network['throat.conns']
- Rp = target[pore_diameter]
- Rt = target[throat_diameter]
+ Rp = target[pore_diameter]/2
+ Rt = target[throat_diameter]/2
a = _np.atleast_2d(Rt).T
q = _np.arcsin(a/Rp[conns])
b = Rp[conns]*_np.cos(q)
| PMEAL/OpenPNM | 4588835e40c14421dcc6c34b555595147006c417 | diff --git a/tests/unit/models/geometry/ThroatVolumeTest.py b/tests/unit/models/geometry/ThroatVolumeTest.py
index 0b612fac4..f459dd600 100644
--- a/tests/unit/models/geometry/ThroatVolumeTest.py
+++ b/tests/unit/models/geometry/ThroatVolumeTest.py
@@ -26,13 +26,13 @@ class ThroatVolumeTest:
net.add_model(propname='throat.lens_volume',
model=mod)
Vlens = net['throat.lens_volume']
- assert np.isclose(Vlens, 2*0.006733852203712552)
+ assert np.isclose(Vlens, 2*0.00084173)
mod = op.models.geometry.throat_volume.pendular_ring
net.add_model(propname='throat.ring_volume',
model=mod)
- Vcyl = 2*(0.01315292522620208)
+ Vcyl = 2*(0.00164412)
Vring = net['throat.ring_volume']
- assert np.isclose(Vcyl - Vring, 2*0.006733852203712552)
+ assert np.isclose(Vcyl - Vring, 2*0.00084173)
def test_cylinder(self):
self.geo.add_model(propname='throat.volume',
| Throat lens volume is higher that throat volume when pore sizes are >=0.8*max_pore_size.
**Describe the bug**
Throat lens volume is higher that throat volume when pore sizes are >=0.8*max_pore_size. Throat lens_volume should always be less than throat volume (as they are corrections on volume calculations). Lens values more than throat volume happens when pore sizes are defined >=0.8* max_pore_size. Even if we have space in the spacing to define a non-zero length throat between the pores, the throat volume calculation using lens volume will not give realistic results (as the amount of curvature of spherical overlap of pore in the throat will be more significant in throats with much smaller length).
**To Reproduce**
```
import openpnm as op
import numpy as np
from openpnm import models as mods
import pandas as pd
shape=[20,20,20]
fracs=np.arange(0.3,1.1,0.1)
vols={'pores':[],'throats':[], 'lens':[], 'lens/throats':[]}
for frac in fracs:
net = op.network.Cubic(shape=shape)
geo = op.geometry.GenericGeometry(network=net, pores=net.Ps, throats=net.Ts)
geo.add_model(propname='pore.max_size',
model=mods.geometry.pore_size.largest_sphere,
iters=10)
# pore geoms to calculate volume
geo['pore.diameter']=frac*geo['pore.max_size']
geo.add_model(propname='pore.volume',
model=mods.geometry.pore_volume.sphere,
pore_diameter='pore.diameter')
# throat geoms to calculate volume
geo.add_model(propname='throat.max_size',
model=mods.misc.from_neighbor_pores,
mode='min',
prop='pore.diameter')
geo.add_model(propname='throat.diameter',
model=mods.misc.scaled,
factor=0.5,
prop='throat.max_size')
geo.add_model(propname='throat.endpoints',
model=op.models.geometry.throat_endpoints.spherical_pores)
geo.add_model(propname='throat.conduit_lengths',
model=op.models.geometry.throat_length.conduit_lengths)
#geo['throat.length']=geo['throat.conduit_lengths.throat']
geo.add_model(propname='throat.length',
model=mods.geometry.throat_length.piecewise,
throat_endpoints='throat.endpoints')
geo.add_model(propname='throat.volume',
model=mods.geometry.throat_volume.cylinder)
lens_vol=op.models.geometry.throat_volume.lens(net)
vols['pores'].append(sum(net['pore.volume']))
vols['throats'].append(sum(net['throat.volume']))
vols['lens'].append(sum(lens_vol))
vols['lens/throats'].append(sum(lens_vol)/sum(net['throat.volume']))
data=pd.DataFrame(vols)
print(data)
```
![image](https://user-images.githubusercontent.com/43128873/108528601-95980080-72a1-11eb-9bfc-93c970f20684.png)
**Expected behavior**
For fracs>0.8 : lens_vol/throat_vol must be smaller than 1.
**Additional context**
In stick and ball geometry we use a range of [0.1,0.7] to select multipliers by max_pore_size and it prevents this issue. The issue is when someone defines extra large pores with diameter close to max_pore_size.
can we conclude that:
lens_vol correction should not be used for throats that connect extra_large pores?
or no extra large pores should be defined? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/models/geometry/ThroatVolumeTest.py::ThroatVolumeTest::test_lens_and_pendular_ring"
] | [
"tests/unit/models/geometry/ThroatVolumeTest.py::ThroatVolumeTest::test_cylinder",
"tests/unit/models/geometry/ThroatVolumeTest.py::ThroatVolumeTest::test_cube",
"tests/unit/models/geometry/ThroatVolumeTest.py::ThroatVolumeTest::test_rectangle",
"tests/unit/models/geometry/ThroatVolumeTest.py::ThroatVolumeTest::test_extrusion"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
} | "2021-11-22T14:30:55Z" | mit |
|
PMEAL__porespy-378 | diff --git a/porespy/filters/__funcs__.py b/porespy/filters/__funcs__.py
index f24d97b24..d971f21b4 100644
--- a/porespy/filters/__funcs__.py
+++ b/porespy/filters/__funcs__.py
@@ -1712,11 +1712,8 @@ def chunked_func(func,
strel = kwargs[item]
break
halo = np.array(strel.shape) * (divs > 1)
- slices = np.ravel(
- shape_split(
- im.shape, axis=divs, halo=halo.tolist(), tile_bounds_policy=ARRAY_BOUNDS
- )
- )
+ slices = np.ravel(shape_split(im.shape, axis=divs, halo=halo.tolist(),
+ tile_bounds_policy=ARRAY_BOUNDS))
# Apply func to each subsection of the image
res = []
# print('Image will be broken into the following chunks:')
@@ -1727,7 +1724,7 @@ def chunked_func(func,
res.append(apply_func(func=func, **kwargs))
# Have dask actually compute the function on each subsection in parallel
# with ProgressBar():
- # ims = dask.compute(res, num_workers=cores)[0]
+ # ims = dask.compute(res, num_workers=cores)[0]
ims = dask.compute(res, num_workers=cores)[0]
# Finally, put the pieces back together into a single master image, im2
im2 = np.zeros_like(im, dtype=im.dtype)
@@ -1752,7 +1749,11 @@ def chunked_func(func,
a = tuple(a)
b = tuple(b)
# Insert image chunk into main image
- im2[a] = ims[i][b]
+ try:
+ im2[a] = ims[i][b]
+ except ValueError:
+ raise IndexError('The applied filter seems to have returned a '
+ + 'larger image that it was sent.')
return im2
| PMEAL/porespy | 933b1fe1a93cce068aae088c4cf0c569182dc42a | diff --git a/test/unit/test_filters.py b/test/unit/test_filters.py
index 47773ff69..07989d3f3 100644
--- a/test/unit/test_filters.py
+++ b/test/unit/test_filters.py
@@ -424,6 +424,16 @@ class FilterTest():
b = ps.filters.fftmorphology(im, strel=s, mode='erosion')
assert np.all(a == b)
+ def test_chunked_func_w_ill_defined_filter(self):
+ import scipy.signal as spsg
+ im = ps.generators.blobs(shape=[100, 100, 100])
+ with pytest.raises(IndexError):
+ ps.filters.chunked_func(func=spsg.convolve,
+ in1=im*1.0,
+ in2=ps.tools.ps_ball(5),
+ im_arg='in1', strel_arg='in2',
+ overlap=5)
+
def test_prune_branches(self):
im = ps.generators.lattice_spheres(shape=[100, 100, 100], radius=4)
skel1 = skeletonize_3d(im)
| chunked_func should deal with funcs that return a result of different shape than the input
for instance, the convolve function can return an image wtih padding around the outside that breaks the chunking later on in the function | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/unit/test_filters.py::FilterTest::test_chunked_func_w_ill_defined_filter"
] | [
"test/unit/test_filters.py::FilterTest::test_im_in_not_im_out",
"test/unit/test_filters.py::FilterTest::test_porosimetry_compare_modes_2D",
"test/unit/test_filters.py::FilterTest::test_porosimetry_npts_10",
"test/unit/test_filters.py::FilterTest::test_porosimetry_compare_modes_3D",
"test/unit/test_filters.py::FilterTest::test_porosimetry_with_sizes",
"test/unit/test_filters.py::FilterTest::test_porosimetry_mio_mode_without_fft",
"test/unit/test_filters.py::FilterTest::test_porosimetry_hybrid_mode_without_fft",
"test/unit/test_filters.py::FilterTest::test_apply_chords_axis0",
"test/unit/test_filters.py::FilterTest::test_apply_chords_axis1",
"test/unit/test_filters.py::FilterTest::test_apply_chords_axis2",
"test/unit/test_filters.py::FilterTest::test_apply_chords_with_negative_spacing",
"test/unit/test_filters.py::FilterTest::test_apply_chords_without_trimming",
"test/unit/test_filters.py::FilterTest::test_apply_chords_3D",
"test/unit/test_filters.py::FilterTest::test_flood",
"test/unit/test_filters.py::FilterTest::test_find_disconnected_voxels_2d",
"test/unit/test_filters.py::FilterTest::test_find_disconnected_voxels_2d_conn4",
"test/unit/test_filters.py::FilterTest::test_find_disconnected_voxels_3d",
"test/unit/test_filters.py::FilterTest::test_find_disconnected_voxels_3d_conn6",
"test/unit/test_filters.py::FilterTest::test_trim_nonpercolating_paths_2d_axis0",
"test/unit/test_filters.py::FilterTest::test_trim_nonpercolating_paths_2d_axis1",
"test/unit/test_filters.py::FilterTest::test_trim_nonpercolating_paths_no_paths",
"test/unit/test_filters.py::FilterTest::test_trim_nonpercolating_paths_3d_axis2",
"test/unit/test_filters.py::FilterTest::test_trim_nonpercolating_paths_3d_axis1",
"test/unit/test_filters.py::FilterTest::test_trim_nonpercolating_paths_3d_axis0",
"test/unit/test_filters.py::FilterTest::test_trim_disconnected_blobs",
"test/unit/test_filters.py::FilterTest::test_fill_blind_pores",
"test/unit/test_filters.py::FilterTest::test_trim_floating_solid",
"test/unit/test_filters.py::FilterTest::test_trim_extrema_min",
"test/unit/test_filters.py::FilterTest::test_trim_extrema_max",
"test/unit/test_filters.py::FilterTest::test_local_thickness",
"test/unit/test_filters.py::FilterTest::test_local_thickness_known_sizes",
"test/unit/test_filters.py::FilterTest::test_porosimetry",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_dilate_2D",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_erode_2D",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_opening_2D",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_closing_2D",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_dilate_3D",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_erode_3D",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_opening_3D",
"test/unit/test_filters.py::FilterTest::test_morphology_fft_closing_3D",
"test/unit/test_filters.py::FilterTest::test_reduce_peaks",
"test/unit/test_filters.py::FilterTest::test_nphase_border_2d_no_diagonals",
"test/unit/test_filters.py::FilterTest::test_nphase_border_2d_diagonals",
"test/unit/test_filters.py::FilterTest::test_nphase_border_3d_no_diagonals",
"test/unit/test_filters.py::FilterTest::test_nphase_border_3d_diagonals",
"test/unit/test_filters.py::FilterTest::test_find_dt_artifacts",
"test/unit/test_filters.py::FilterTest::test_snow_partitioning_n",
"test/unit/test_filters.py::FilterTest::test_chunked_func_2D",
"test/unit/test_filters.py::FilterTest::test_chunked_func_3D",
"test/unit/test_filters.py::FilterTest::test_chunked_func_3D_w_strel",
"test/unit/test_filters.py::FilterTest::test_apply_padded",
"test/unit/test_filters.py::FilterTest::test_trim_small_clusters"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2021-02-17T02:32:36Z" | mit |
|
PMEAL__porespy-809 | diff --git a/porespy/networks/_snow2.py b/porespy/networks/_snow2.py
index 5aa197559..404ef5d99 100644
--- a/porespy/networks/_snow2.py
+++ b/porespy/networks/_snow2.py
@@ -244,4 +244,4 @@ def _parse_pad_width(pad_width, shape):
else:
raise Exception("pad_width components can't have 2+ elements")
- return np.array(tmp)
+ return np.array(tmp, dtype=int)
| PMEAL/porespy | 63ce07d4a1c0337bc5942a33777fae81adc6aba9 | diff --git a/test/unit/test_snow2.py b/test/unit/test_snow2.py
index be6057dbb..c22273b01 100644
--- a/test/unit/test_snow2.py
+++ b/test/unit/test_snow2.py
@@ -108,10 +108,11 @@ class Snow2Test:
]
for case, out_desired in test_cases_2d:
try:
- out = ps.networks._parse_pad_width(case, shape_2d).tolist()
+ out = ps.networks._parse_pad_width(case, shape_2d)
+ assert out.dtype == int
except Exception as e:
out = e.args[0]
- assert out == out_desired
+ assert np.all(out == out_desired)
def test_parse_pad_width_3d(self):
shape_3d = [10, 10, 10]
@@ -133,10 +134,11 @@ class Snow2Test:
]
for case, out_desired in test_cases_3d:
try:
- out = ps.networks._parse_pad_width(case, shape_3d).tolist()
+ out = ps.networks._parse_pad_width(case, shape_3d)
+ assert out.dtype == int
except Exception as e:
out = e.args[0]
- assert out == out_desired
+ assert np.all(out == out_desired)
def test_label_phases(self):
im = self.spheres2D
@@ -155,7 +157,7 @@ class Snow2Test:
def test_ensure_correct_sizes_are_returned_single_phase_2d(self):
im = self.spheres2D
snow = ps.networks.snow2(phases=im, parallelization=None)
- mode = spst.mode(snow.network['pore.extended_diameter'])
+ mode = spst.mode(snow.network['pore.extended_diameter'], keepdims=False)
assert mode[0] == 60
D = np.unique(snow.network['pore.extended_diameter'].astype(int))
assert np.all(D == np.array([30, 34, 60]))
@@ -164,7 +166,7 @@ class Snow2Test:
im = self.spheres2D
phases = im.astype(int) + 1
snow = ps.networks.snow2(phases=phases, parallelization=None)
- mode = spst.mode(snow.network['pore.extended_diameter'])
+ mode = spst.mode(snow.network['pore.extended_diameter'], keepdims=False)
assert mode[0] == 60
D = np.unique(snow.network['pore.extended_diameter'].astype(int))
assert np.all(D == np.array([15, 16, 17, 18, 19, 21,
@@ -173,7 +175,7 @@ class Snow2Test:
def test_ensure_correct_sizes_are_returned_single_phase_3d(self):
im = self.spheres3D
snow = ps.networks.snow2(phases=im, parallelization=None)
- mode = spst.mode(snow.network['pore.extended_diameter'])
+ mode = spst.mode(snow.network['pore.extended_diameter'], keepdims=False)
assert mode[0] == 30
D = np.unique(snow.network['pore.extended_diameter'].astype(int))
assert np.all(D == np.array([25, 30, 38]))
@@ -182,7 +184,7 @@ class Snow2Test:
im = self.spheres3D
phases = im.astype(int) + 1
snow = ps.networks.snow2(phases=phases, parallelization=None)
- mode = spst.mode(snow.network['pore.extended_diameter'])
+ mode = spst.mode(snow.network['pore.extended_diameter'], keepdims=False)
assert mode[0] == 30
D = np.unique(snow.network['pore.extended_diameter'].astype(int))
assert np.all(D == np.array([7, 12, 17, 19, 20, 22, 24, 25, 26,
| boundary_width in snow2 breaks when trying to specify padding on both ends of all axes
This works:
`b = ps.networks.snow2(im, boundary_width=[[5, 15], 10])`
This breaks:
`b = ps.networks.snow2(im, boundary_width=[[5, 15], [10, 10]])`
The complaint is coming from `np.pad`: `TypeError: `pad_width` must be of integral type.` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/unit/test_snow2.py::Snow2Test::test_parse_pad_width_2d",
"test/unit/test_snow2.py::Snow2Test::test_parse_pad_width_3d"
] | [
"test/unit/test_snow2.py::Snow2Test::test_single_phase_2d_serial",
"test/unit/test_snow2.py::Snow2Test::test_return_all_serial",
"test/unit/test_snow2.py::Snow2Test::test_multiphase_2d",
"test/unit/test_snow2.py::Snow2Test::test_single_phase_3d",
"test/unit/test_snow2.py::Snow2Test::test_multiphase_3d",
"test/unit/test_snow2.py::Snow2Test::test_label_phases",
"test/unit/test_snow2.py::Snow2Test::test_ensure_correct_sizes_are_returned_single_phase_2d",
"test/unit/test_snow2.py::Snow2Test::test_ensure_correct_sizes_are_returned_dual_phase_2d",
"test/unit/test_snow2.py::Snow2Test::test_ensure_correct_sizes_are_returned_single_phase_3d",
"test/unit/test_snow2.py::Snow2Test::test_ensure_correct_sizes_are_returned_dual_phase_3d",
"test/unit/test_snow2.py::Snow2Test::test_trim_saddle_points",
"test/unit/test_snow2.py::Snow2Test::test_trim_saddle_points_legacy",
"test/unit/test_snow2.py::Snow2Test::test_accuracy_standard",
"test/unit/test_snow2.py::Snow2Test::test_single_and_dual_phase_on_blobs",
"test/unit/test_snow2.py::Snow2Test::test_send_peaks_to_snow_partitioning",
"test/unit/test_snow2.py::Snow2Test::test_send_peaks_to_snow_partitioning_n",
"test/unit/test_snow2.py::Snow2Test::test_snow2_with_peaks",
"test/unit/test_snow2.py::Snow2Test::test_two_phases_and_boundary_nodes"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2023-03-15T19:17:20Z" | mit |
|
PMEAL__porespy-863 | diff --git a/porespy/metrics/_funcs.py b/porespy/metrics/_funcs.py
index d1001a351..e44651570 100644
--- a/porespy/metrics/_funcs.py
+++ b/porespy/metrics/_funcs.py
@@ -1104,7 +1104,7 @@ def pc_curve(im, sizes=None, pc=None, seq=None,
return pc_curve
-def satn_profile(satn, s, axis=0, span=10, mode='tile'):
+def satn_profile(satn, s=None, im=None, axis=0, span=10, mode='tile'):
r"""
Computes a saturation profile from an image of fluid invasion
@@ -1115,7 +1115,12 @@ def satn_profile(satn, s, axis=0, span=10, mode='tile'):
invasion. 0's are treated as solid and -1's are treated as uninvaded
void space.
s : scalar
- The global saturation value for which the profile is desired
+ The global saturation value for which the profile is desired. If `satn` is
+ a pre-thresholded boolean image then this is ignored, `im` is required.
+ im : ndarray
+ A boolean image with `True` values indicating the void phase. This is used
+ to compute the void volume if `satn` is given as a pre-thresholded boolean
+ mask.
axis : int
The axis along which to profile should be measured
span : int
@@ -1153,46 +1158,41 @@ def satn_profile(satn, s, axis=0, span=10, mode='tile'):
<https://porespy.org/examples/metrics/reference/satn_profile.html>`_
to view online example.
"""
- # @numba.njit()
- def func(satn, s, axis, span, mode):
- span = max(1, span)
- satn = np.swapaxes(satn, 0, axis)
- if mode == 'tile':
- y = np.zeros(int(satn.shape[0]/span))
- z = np.zeros_like(y)
- for i in range(int(satn.shape[0]/span)):
- void = satn[i*span:(i+1)*span, ...] != 0
- nwp = (satn[i*span:(i+1)*span, ...] < s) \
- *(satn[i*span:(i+1)*span, ...] > 0)
- y[i] = nwp.sum(dtype=np.int64)/void.sum(dtype=np.int64)
- z[i] = i*span + (span-1)/2
- if mode == 'slide':
- y = np.zeros(int(satn.shape[0]-span))
- z = np.zeros_like(y)
- for i in range(int(satn.shape[0]-span)):
- void = satn[i:i+span, ...] != 0
- nwp = (satn[i:i+span, ...] < s)*(satn[i:i+span, ...] > 0)
- y[i] = nwp.sum(dtype=np.int64)/void.sum(dtype=np.int64)
- z[i] = i + (span-1)/2
- return z, y
-
- z, y = func(satn=satn, s=s, axis=axis, span=span, mode=mode)
-
- class results(Results):
- r"""
-
- Attributes
- ----------
- position : ndarray
- The position along the given axis at which saturation values are
- computed. The units are in voxels.
- saturation : ndarray
- The computed saturation value at each position
-
- """
- position = z
- saturation = y
-
+ span = max(1, span)
+ if s is None:
+ if satn.dtype != bool:
+ msg = 'Must specify a target saturation if saturation map is provided'
+ raise Exception(msg)
+ s = 2 # Will find ALL voxels, then > 0 will limit to only True ones
+ satn = satn.astype(int)
+ satn[satn == 0] = -1
+ satn[~im] = 0
+ else:
+ msg = 'The maximum saturation in the image is less than the given threshold'
+ if satn.max() < s:
+ raise Exception(msg)
+
+ satn = np.swapaxes(satn, 0, axis)
+ if mode == 'tile':
+ y = np.zeros(int(satn.shape[0]/span))
+ z = np.zeros_like(y)
+ for i in range(int(satn.shape[0]/span)):
+ void = satn[i*span:(i+1)*span, ...] != 0
+ nwp = (satn[i*span:(i+1)*span, ...] <= s) \
+ *(satn[i*span:(i+1)*span, ...] > 0)
+ y[i] = nwp.sum(dtype=np.int64)/void.sum(dtype=np.int64)
+ z[i] = i*span + (span-1)/2
+ if mode == 'slide':
+ y = np.zeros(int(satn.shape[0]-span))
+ z = np.zeros_like(y)
+ for i in range(int(satn.shape[0]-span)):
+ void = satn[i:i+span, ...] != 0
+ nwp = (satn[i:i+span, ...] <= s)*(satn[i:i+span, ...] > 0)
+ y[i] = nwp.sum(dtype=np.int64)/void.sum(dtype=np.int64)
+ z[i] = i + (span-1)/2
+ results = Results()
+ results.position = z
+ results.saturation = y
return results
| PMEAL/porespy | 2b340e9cd1c6923f375496b0c040ea614d75e4d5 | diff --git a/test/unit/test_metrics.py b/test/unit/test_metrics.py
index 993b3fd81..8879d3846 100644
--- a/test/unit/test_metrics.py
+++ b/test/unit/test_metrics.py
@@ -256,6 +256,61 @@ class MetricsTest():
assert hasattr(pc, 'pc')
assert hasattr(pc, 'snwp')
+ def test_satn_profile_axis(self):
+ satn = np.tile(np.atleast_2d(np.linspace(1, 0.01, 100)), (100, 1))
+ satn[:25, :] = 0
+ satn[-25:, :] = -1
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5, axis=1, span=1, mode='tile')
+ assert len(prof1.saturation) == 100
+ assert prof1.saturation[0] == 0
+ assert prof1.saturation[-1] == 2/3
+ assert prof1.saturation[49] == 0
+ assert prof1.saturation[50] == 2/3
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5, axis=0, span=1, mode='tile')
+ assert len(prof1.saturation) == 100
+ assert np.isnan(prof1.saturation[0])
+ assert prof1.saturation[-1] == 0
+ assert prof1.saturation[50] == 0.5
+
+ def test_satn_profile_span(self):
+ satn = np.tile(np.atleast_2d(np.linspace(1, 0.01, 100)), (100, 1))
+ satn[:25, :] = 0
+ satn[-25:, :] = -1
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5, axis=1, span=20, mode='tile')
+ assert len(prof1.saturation) == 5
+ assert prof1.saturation[0] == 0
+ assert prof1.saturation[-1] == 2/3
+ assert prof1.saturation[2] == 1/3
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5, axis=1, span=20, mode='slide')
+ assert len(prof1.saturation) == 80
+ assert prof1.saturation[31] == 1/30
+ assert prof1.saturation[48] == 0.6
+
+ def test_satn_profile_threshold(self):
+ satn = np.tile(np.atleast_2d(np.linspace(1, 0.01, 100)), (100, 1))
+ satn[:25, :] = 0
+ satn[-25:, :] = -1
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5, axis=1, span=1, mode='tile')
+ t = (satn <= 0.5)*(satn > 0)
+ im = satn != 0
+ prof2 = ps.metrics.satn_profile(satn=t, im=im, axis=1, span=1, mode='tile')
+ assert len(prof1.saturation) == 100
+ assert len(prof2.saturation) == 100
+ assert np.all(prof1.saturation == prof2.saturation)
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5, axis=1, span=10, mode='tile')
+ prof2 = ps.metrics.satn_profile(satn=t, im=im, axis=1, span=10, mode='tile')
+ assert np.all(prof1.saturation == prof2.saturation)
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5, axis=1, span=20, mode='slide')
+ prof2 = ps.metrics.satn_profile(satn=t, im=im, axis=1, span=20, mode='slide')
+ assert np.all(prof1.saturation == prof2.saturation)
+
+ def test_satn_profile_exception(self):
+ satn = np.tile(np.atleast_2d(np.linspace(0.4, 0.01, 100)), (100, 1))
+ satn[:25, :] = 0
+ satn[-25:, :] = -1
+ with pytest.raises(Exception):
+ prof1 = ps.metrics.satn_profile(satn=satn, s=0.5)
+
if __name__ == '__main__':
t = MetricsTest()
| Update `satn_profile` to accept an already thresholded image | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_axis",
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_span",
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_threshold",
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_exception"
] | [
"test/unit/test_metrics.py::MetricsTest::test_porosity",
"test/unit/test_metrics.py::MetricsTest::test_tpcf_fft_2d",
"test/unit/test_metrics.py::MetricsTest::test_tpcf_fft_3d",
"test/unit/test_metrics.py::MetricsTest::test_tpcf_fft_3d_scaled",
"test/unit/test_metrics.py::MetricsTest::test_two_point_correlation_bf",
"test/unit/test_metrics.py::MetricsTest::test_rev",
"test/unit/test_metrics.py::MetricsTest::test_radial_density",
"test/unit/test_metrics.py::MetricsTest::test_props_to_DataFrame",
"test/unit/test_metrics.py::MetricsTest::test_prop_to_image",
"test/unit/test_metrics.py::MetricsTest::test_porosity_profile",
"test/unit/test_metrics.py::MetricsTest::test_porosity_profile_ndim_check",
"test/unit/test_metrics.py::MetricsTest::test_linear_density",
"test/unit/test_metrics.py::MetricsTest::test_chord_length_distribution_2D",
"test/unit/test_metrics.py::MetricsTest::test_chord_length_distribution_3D",
"test/unit/test_metrics.py::MetricsTest::test_chord_counts",
"test/unit/test_metrics.py::MetricsTest::test_region_surface_areas",
"test/unit/test_metrics.py::MetricsTest::test_phase_fraction",
"test/unit/test_metrics.py::MetricsTest::test_representative_elementary_volume",
"test/unit/test_metrics.py::MetricsTest::test_pc_curve",
"test/unit/test_metrics.py::MetricsTest::test_pc_curve_from_ibip"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2023-07-10T14:38:02Z" | mit |
|
PMEAL__porespy-865 | diff --git a/porespy/filters/_size_seq_satn.py b/porespy/filters/_size_seq_satn.py
index 6b83bc26d..ace920e73 100644
--- a/porespy/filters/_size_seq_satn.py
+++ b/porespy/filters/_size_seq_satn.py
@@ -8,6 +8,7 @@ __all__ = [
'size_to_satn',
'seq_to_satn',
'pc_to_satn',
+ 'pc_to_seq',
'satn_to_seq',
]
@@ -220,6 +221,72 @@ def seq_to_satn(seq, im=None, mode='drainage'):
return satn
+def pc_to_seq(pc, im, mode='drainage'):
+ r"""
+ Converts an image of capillary entry pressures to invasion sequence values
+
+ Parameters
+ ----------
+ pc : ndarray
+ A Numpy array with the value in each voxel indicating the capillary
+ pressure at which it was invaded. In order to accommodate the
+ possibility of both positive and negative capillary pressure values,
+ uninvaded voxels should be indicated by ``+inf`` and residual phase
+ by ``-inf``. Solid vs void phase is defined by ``im`` which is
+ mandatory.
+ im : ndarray
+ A Numpy array with ``True`` values indicating the void space
+ mode : str
+ Controls how the pressures are converted to sequence. The options are:
+
+ ============= ==============================================================
+ `mode` Description
+ ============= ==============================================================
+ 'drainage' The pressures are assumed to have been filled from smallest to
+ largest. Voxels with -np.inf are treated as though they are
+ invaded by non-wetting fluid at the start of the process, and
+ voxels with +np.inf are treated as though they are never
+ invaded.
+ 'imbibition' The pressures are assumed to have been filled from largest to
+ smallest. Voxels with -np.inf are treated as though they are
+ already occupied by non-wetting fluid at the start of the
+ process, and voxels with +np.inf are treated as though they
+ are filled with wetting phase.
+ ============= ==============================================================
+
+ Returns
+ -------
+ seq : ndarray
+ A Numpy array the same shape as `pc`, with each voxel value indicating
+ the sequence at which it was invaded, according to the specified `mode`.
+ Uninvaded voxels are set to -1.
+
+ Notes
+ -----
+ Voxels with `+inf` are treated as though they were never invaded so are given a
+ sequence value of -1. Voxels with `-inf` are treated as though they were
+ invaded by non-wetting phase at the start of the simulation so are given a
+ sequence number of 1 for both mode `drainage` and `imbibition`.
+
+ Examples
+ --------
+ `Click here
+ <https://porespy.org/examples/filters/reference/pc_to_seq.html>`_
+ to view online example.
+ """
+ inf = pc == np.inf # save for later
+ if mode == 'drainage':
+ bins = np.unique(pc)
+ elif mode == 'imbibition':
+ pc[pc == -np.inf] = np.inf
+ bins = np.unique(pc)[-1::-1]
+ a = np.digitize(pc, bins=bins)
+ a[~im] = 0
+ a[np.where(inf)] = -1
+ a = make_contiguous(a, mode='symmetric')
+ return a
+
+
def pc_to_satn(pc, im, mode='drainage'):
r"""
Converts an image of capillary entry pressures to saturation values
@@ -242,9 +309,9 @@ def pc_to_satn(pc, im, mode='drainage'):
`mode` Description
============= ==============================================================
'drainage' The pressures are assumed to have been filled from smallest to
- largest, ignoring +/- infs
+ largest.
'imbibition' The pressures are assumed to have been filled from largest to
- smallest, ignoring +/- infs
+ smallest
============= ==============================================================
Returns
@@ -252,7 +319,9 @@ def pc_to_satn(pc, im, mode='drainage'):
satn : ndarray
A Numpy array the same shape as `pc`, with each voxel value indicating
the global saturation at which it was invaded, according to the specified
- `mode`.
+ `mode`. Voxels with `-inf` are treated as though they were invaded
+ at the start of the simulation so are given a sequence number of 1 for both
+ mode `drainage` and `imbibition`.
Notes
-----
diff --git a/porespy/metrics/_funcs.py b/porespy/metrics/_funcs.py
index e44651570..328cf83f2 100644
--- a/porespy/metrics/_funcs.py
+++ b/porespy/metrics/_funcs.py
@@ -30,6 +30,7 @@ __all__ = [
"pc_curve",
"pc_curve_from_ibip",
"pc_curve_from_mio",
+ "pc_map_to_pc_curve",
]
@@ -1055,10 +1056,14 @@ def pc_curve(im, sizes=None, pc=None, seq=None,
for n in seqs:
pbar.update()
mask = seq == n
- # The following assumes only one size found, which was confirmed
- r = sizes[mask][0]*voxel_size
- pc = -2*sigma*np.cos(np.deg2rad(theta))/r
- x.append(pc)
+ if (pc is not None) and (sizes is not None):
+ raise Exception("Only one of pc or sizes can be specified")
+ elif pc is not None:
+ pressure = pc[mask][0]
+ elif sizes is not None:
+ r = sizes[mask][0]*voxel_size
+ pressure = -2*sigma*np.cos(np.deg2rad(theta))/r
+ x.append(pressure)
snwp = ((seq <= n)*(seq > 0) *
(im == 1)).sum(dtype=np.int64)/im.sum(dtype=np.int64)
y.append(snwp)
@@ -1104,6 +1109,62 @@ def pc_curve(im, sizes=None, pc=None, seq=None,
return pc_curve
+def pc_map_to_pc_curve(pc, im, seq=None):
+ r"""
+ Converts a pc map into a capillary pressure curve
+
+ Parameters
+ ----------
+ pc : ndarray
+ A numpy array with each voxel containing the capillary pressure at which
+ it was invaded. `-inf` indicates voxels which are already filled with
+ non-wetting fluid, and `+inf` indicates voxels that are not invaded by
+ non-wetting fluid (e.g., trapped wetting phase). Solids should be
+ noted by `+inf` but this is also enforced inside the function using `im`.
+ im : ndarray
+ A numpy array with `True` values indicating the void space and `False`
+ elsewhere. This is necessary to define the total void volume of the domain
+ for computing the saturation.
+ seq : ndarray, optional
+ A numpy array with each voxel containing the sequence at which it was
+ invaded. This is required when analyzing results from invasion percolation
+ since the pressures in `pc` do not correspond to the sequence in which
+ they were filled.
+
+ Returns
+ -------
+ results : dataclass-like
+ A dataclass like object with the following attributes:
+
+ ================== =========================================================
+ Attribute Description
+ ================== =========================================================
+ pc The capillary pressure
+ snwp The fraction of void space filled by non-wetting
+ phase at each pressure in ``pc``
+ ================== =========================================================
+
+ Notes
+ -----
+ To use this function with the results of `porosimetry` or `ibip` the sizes map
+ must be converted to a capillary pressure map first. `drainage` and `invasion`
+ both return capillary pressure maps which can be passed directly as `pc`.
+ """
+ pc[~im] = np.inf # Ensure solid voxels are set to inf invasion pressure
+ if seq is None:
+ pcs, counts = np.unique(pc, return_counts=True)
+ else:
+ vals, index, counts = np.unique(seq, return_index=True, return_counts=True)
+ pcs = pc.flatten()[index]
+ snwp = np.cumsum(counts[pcs < np.inf])/im.sum()
+ pcs = pcs[pcs < np.inf]
+
+ results = Results()
+ results.pc = pcs
+ results.snwp = snwp
+ return results
+
+
def satn_profile(satn, s=None, im=None, axis=0, span=10, mode='tile'):
r"""
Computes a saturation profile from an image of fluid invasion
| PMEAL/porespy | ff3029fad904dab8ac6f5acf02e3270a704cb07f | diff --git a/test/unit/test_filters_size_seq_satn.py b/test/unit/test_filters_size_seq_satn.py
index 2aca65070..b3ea28425 100644
--- a/test/unit/test_filters_size_seq_satn.py
+++ b/test/unit/test_filters_size_seq_satn.py
@@ -249,6 +249,17 @@ class SeqTest():
assert satn[0, 0] == 0.0
assert satn[0, 1] == 0.9
+ def test_pc_to_seq(self):
+ pc = 10.0*np.tile(np.atleast_2d(np.arange(0, 21)), [21, 1])
+ pc[:, 0] = 0
+ pc[:, -5] = np.inf
+ im = pc > 0
+ seq = ps.filters.pc_to_seq(pc=pc, im=im, mode='drainage')
+ assert seq[0, 0] == 0
+ assert seq[0, 1] == 1
+ assert seq[0, -1] == 19
+ assert seq[0, -5] == -1
+
if __name__ == '__main__':
t = SeqTest()
diff --git a/test/unit/test_metrics.py b/test/unit/test_metrics.py
index 8879d3846..595f59e10 100644
--- a/test/unit/test_metrics.py
+++ b/test/unit/test_metrics.py
@@ -309,7 +309,72 @@ class MetricsTest():
satn[:25, :] = 0
satn[-25:, :] = -1
with pytest.raises(Exception):
- prof1 = ps.metrics.satn_profile(satn=satn, s=0.5)
+ _ = ps.metrics.satn_profile(satn=satn, s=0.5)
+
+ def test_pc_map_to_pc_curve_drainage_with_trapping_and_residual(self):
+ vx = 50e-6
+ im = ps.generators.blobs(shape=[200, 200], porosity=0.5, blobiness=2, seed=0)
+ mio = ps.filters.porosimetry(im)
+ trapped = im*(~ps.filters.fill_blind_pores(im))
+ residual = im*(~trapped)*(mio < mio.mean())
+ pc = -2*0.072*np.cos(np.radians(110))/(mio*vx)
+ pc[trapped] = np.inf
+ pc[residual] = -np.inf
+ d = ps.metrics.pc_map_to_pc_curve(pc, im)
+ assert d.snwp[0] == residual.sum()/im.sum()
+ assert d.snwp[-1] == (im.sum() - trapped.sum())/im.sum()
+
+ def test_pc_map_to_pc_curve_invasion_with_trapping(self):
+ vx = 50e-6
+ im = ps.generators.blobs(shape=[200, 200], porosity=0.5, blobiness=2, seed=0)
+ ibip = ps.simulations.ibip(im=im)
+ pc = -2*0.072*np.cos(np.radians(110))/(ibip.inv_sizes*vx)
+ trapped = ibip.inv_sequence == -1
+ # residual = pc*im > 500
+ pc[trapped] = np.inf
+ seq = ibip.inv_sequence
+ d = ps.metrics.pc_map_to_pc_curve(pc=pc, im=im, seq=seq)
+ # assert d.snwp[0] == residual.sum()/im.sum()
+ assert d.snwp[-1] == (im.sum() - trapped.sum())/im.sum()
+
+ def test_pc_map_to_pc_curve_compare_invasion_to_drainage(self):
+ vx = 50e-6
+ im = ps.generators.blobs(shape=[200, 200], porosity=0.6, blobiness=1, seed=0)
+ im = ps.filters.fill_blind_pores(im, conn=8, surface=True)
+
+ # Do drainage without sequence
+ dt = edt(im)
+ mio = ps.filters.porosimetry(im, sizes=np.unique(dt)[1:].astype(int))
+ pc1 = -2*0.072*np.cos(np.radians(110))/(mio*vx)
+ d1 = ps.metrics.pc_map_to_pc_curve(pc=pc1, im=im)
+
+ # Ensure drainage works with sequence
+ seq = ps.filters.pc_to_seq(pc1, im)
+ d3 = ps.metrics.pc_map_to_pc_curve(pc=pc1, im=im, seq=seq)
+
+ # Using the original ibip, which requires that sequence be supplied
+ ibip = ps.simulations.ibip(im=im)
+ pc2 = -2*0.072*np.cos(np.radians(110))/(ibip.inv_sizes*vx)
+ pc2[ibip.inv_sequence < 0] = np.inf
+ seq = ibip.inv_sequence
+ d2 = ps.metrics.pc_map_to_pc_curve(pc=pc2, im=im, seq=seq)
+
+ # Ensure they all return the same Pc values
+ assert_allclose(np.unique(d1.pc), np.unique(d2.pc), rtol=1e-10)
+ assert_allclose(np.unique(d2.pc), np.unique(d3.pc), rtol=1e-10)
+ assert_allclose(np.unique(d1.pc), np.unique(d3.pc), rtol=1e-10)
+
+ # Ensure the high and low saturations are all the same
+ assert d1.snwp[0] == d2.snwp[0]
+ assert d1.snwp[-1] == d2.snwp[-1]
+ assert d2.snwp[0] == d3.snwp[0]
+ assert d2.snwp[-1] == d3.snwp[-1]
+
+ # These graphs should lie perfectly on top of each other
+ # import matplotlib.pyplot as plt
+ # plt.step(d1.pc, d1.snwp, 'r-o', where='post')
+ # plt.step(d3.pc, d3.snwp, 'b--', where='post')
+ # plt.step(d2.pc, d2.snwp, 'g.-', where='post')
if __name__ == '__main__':
| Add a `pc_to_seq` function
Not sure why we don't have this already. The only complication is that it does not work with ibip results since the Pcs are not filled sequentially. Not sure how to inidcate this. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_pc_to_seq",
"test/unit/test_metrics.py::MetricsTest::test_pc_map_to_pc_curve_drainage_with_trapping_and_residual",
"test/unit/test_metrics.py::MetricsTest::test_pc_map_to_pc_curve_invasion_with_trapping",
"test/unit/test_metrics.py::MetricsTest::test_pc_map_to_pc_curve_compare_invasion_to_drainage"
] | [
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_satn_to_seq",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_satn_to_seq_uninvaded",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_satn_to_seq_modes",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_size_to_seq_modes",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_size_to_seq_uninvaded",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_size_to_seq_int_bins",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_size_to_seq_too_many_bins",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_seq_to_satn_fully_filled",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_seq_to_satn_partially_filled",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_seq_to_satn_modes",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_seq_to_satn_uninvaded",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_size_to_satn",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_size_to_satn_modes",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_size_to_satn_uninvaded",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_compare_size_and_seq_to_satn",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_pc_to_satn_uninvaded_drainage",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_pc_to_satn_uninvaded_imbibition",
"test/unit/test_filters_size_seq_satn.py::SeqTest::test_pc_to_satn_positive_and_negative_pressures",
"test/unit/test_metrics.py::MetricsTest::test_porosity",
"test/unit/test_metrics.py::MetricsTest::test_tpcf_fft_2d",
"test/unit/test_metrics.py::MetricsTest::test_tpcf_fft_3d",
"test/unit/test_metrics.py::MetricsTest::test_tpcf_fft_3d_scaled",
"test/unit/test_metrics.py::MetricsTest::test_two_point_correlation_bf",
"test/unit/test_metrics.py::MetricsTest::test_rev",
"test/unit/test_metrics.py::MetricsTest::test_radial_density",
"test/unit/test_metrics.py::MetricsTest::test_props_to_DataFrame",
"test/unit/test_metrics.py::MetricsTest::test_prop_to_image",
"test/unit/test_metrics.py::MetricsTest::test_porosity_profile",
"test/unit/test_metrics.py::MetricsTest::test_porosity_profile_ndim_check",
"test/unit/test_metrics.py::MetricsTest::test_linear_density",
"test/unit/test_metrics.py::MetricsTest::test_chord_length_distribution_2D",
"test/unit/test_metrics.py::MetricsTest::test_chord_length_distribution_3D",
"test/unit/test_metrics.py::MetricsTest::test_chord_counts",
"test/unit/test_metrics.py::MetricsTest::test_region_surface_areas",
"test/unit/test_metrics.py::MetricsTest::test_phase_fraction",
"test/unit/test_metrics.py::MetricsTest::test_representative_elementary_volume",
"test/unit/test_metrics.py::MetricsTest::test_pc_curve",
"test/unit/test_metrics.py::MetricsTest::test_pc_curve_from_ibip",
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_axis",
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_span",
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_threshold",
"test/unit/test_metrics.py::MetricsTest::test_satn_profile_exception"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-07-10T15:07:56Z" | mit |
|
PSLmodels__microdf-153 | diff --git a/.github/workflows/check_jupyterbook.yml b/.github/workflows/check_jupyterbook.yml
index d2174d5..49f4e0d 100644
--- a/.github/workflows/check_jupyterbook.yml
+++ b/.github/workflows/check_jupyterbook.yml
@@ -15,7 +15,7 @@ jobs:
with:
activate-environment: microdf
environment-file: environment.yml
- python-version: 3.8
+ python-version: 3.9
auto-activate-base: false
- name: Build # Build Jupyter Book
diff --git a/.github/workflows/deploy_jupyterbook.yml b/.github/workflows/deploy_jupyterbook.yml
index 677b49a..f78dcc5 100644
--- a/.github/workflows/deploy_jupyterbook.yml
+++ b/.github/workflows/deploy_jupyterbook.yml
@@ -18,7 +18,7 @@ jobs:
with:
activate-environment: microdf
environment-file: environment.yml
- python-version: 3.8
+ python-version: 3.9
auto-activate-base: false
- name: Build
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index c887c4e..133ddf6 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.8]
+ python-version: [3.9]
steps:
- name: Checkout
diff --git a/microdf/inequality.py b/microdf/inequality.py
index 765a451..3259f17 100644
--- a/microdf/inequality.py
+++ b/microdf/inequality.py
@@ -3,7 +3,7 @@ import numpy as np
import microdf as mdf
-def gini(df, col, w=None, negatives=None):
+def gini(df, col, w=None, negatives=None, groupby=None):
"""Calculates Gini index.
:param df: DataFrame.
@@ -16,96 +16,117 @@ def gini(df, col, w=None, negatives=None):
when this minimum is negative. That is, it adds the absolute
minimum value.
Defaults to None, which leaves negative values as they are.
+ :param groupby: Column, or list of columns, to group by.
+
:returns: A float, the Gini index.
"""
- # Requires float numpy arrays (not pandas Series or lists) to work.
- x = np.array(df[col]).astype("float")
- if negatives == "zero":
- x[x < 0] = 0
- if negatives == "shift" and np.amin(x) < 0:
- x -= np.amin(x)
- if w is not None:
- w = np.array(df[w]).astype("float")
- sorted_indices = np.argsort(x)
- sorted_x = x[sorted_indices]
- sorted_w = w[sorted_indices]
- cumw = np.cumsum(sorted_w)
- cumxw = np.cumsum(sorted_x * sorted_w)
- return np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (
- cumxw[-1] * cumw[-1]
- )
- else:
- sorted_x = np.sort(x)
- n = len(x)
- cumxw = np.cumsum(sorted_x)
- # The above formula, with all weights equal to 1 simplifies to:
- return (n + 1 - 2 * np.sum(cumxw) / cumxw[-1]) / n
-
-
-def top_x_pct_share(df, col, top_x_pct, w=None):
+
+ def _gini(df, col, w=None, negatives=None):
+ # Requires float numpy arrays (not pandas Series or lists) to work.
+ x = np.array(df[col]).astype("float")
+ if negatives == "zero":
+ x[x < 0] = 0
+ if negatives == "shift" and np.amin(x) < 0:
+ x -= np.amin(x)
+ if w is not None:
+ w = np.array(df[w]).astype("float")
+ sorted_indices = np.argsort(x)
+ sorted_x = x[sorted_indices]
+ sorted_w = w[sorted_indices]
+ cumw = np.cumsum(sorted_w)
+ cumxw = np.cumsum(sorted_x * sorted_w)
+ return np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (
+ cumxw[-1] * cumw[-1]
+ )
+ else:
+ sorted_x = np.sort(x)
+ n = len(x)
+ cumxw = np.cumsum(sorted_x)
+ # The above formula, with all weights equal to 1 simplifies to:
+ return (n + 1 - 2 * np.sum(cumxw) / cumxw[-1]) / n
+
+ if groupby is None:
+ return _gini(df, col, w, negatives)
+ return df.groupby(groupby).apply(lambda x: _gini(x, col, w, negatives))
+
+
+def top_x_pct_share(df, col, top_x_pct, w=None, groupby=None):
"""Calculates top x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param top_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the top x%.
"""
- threshold = mdf.weighted_quantile(df, col, w, 1 - top_x_pct)
- top_x_pct_sum = mdf.weighted_sum(df[df[col] >= threshold], col, w)
- total_sum = mdf.weighted_sum(df, col, w)
- return top_x_pct_sum / total_sum
+
+ def _top_x_pct_share(df, col, top_x_pct, w=None):
+ threshold = mdf.weighted_quantile(df, col, w, 1 - top_x_pct)
+ top_x_pct_sum = mdf.weighted_sum(df[df[col] >= threshold], col, w)
+ total_sum = mdf.weighted_sum(df, col, w)
+ return top_x_pct_sum / total_sum
+
+ if groupby is None:
+ return _top_x_pct_share(df, col, top_x_pct, w)
+ return df.groupby(groupby).apply(
+ lambda x: _top_x_pct_share(x, col, top_x_pct, w)
+ )
-def bottom_x_pct_share(df, col, bottom_x_pct, w=None):
+def bottom_x_pct_share(df, col, bottom_x_pct, w=None, groupby=None):
"""Calculates bottom x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param bottom_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the bottom x%.
"""
- return 1 - top_x_pct_share(df, col, 1 - bottom_x_pct, w, top=False)
+ return 1 - top_x_pct_share(df, col, 1 - bottom_x_pct, w, groupby)
-def bottom_50_pct_share(df, col, w=None):
+def bottom_50_pct_share(df, col, w=None, groupby=None):
"""Calculates bottom 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the bottom 50%.
"""
- return bottom_x_pct_share(df, col, 0.5, w)
+ return bottom_x_pct_share(df, col, 0.5, w, groupby)
-def top_50_pct_share(df, col, w=None):
+def top_50_pct_share(df, col, w=None, groupby=None):
"""Calculates top 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the top 50%.
"""
- return top_x_pct_share(df, col, 0.5, w)
+ return top_x_pct_share(df, col, 0.5, w, groupby)
-def top_10_pct_share(df, col, w=None):
+def top_10_pct_share(df, col, w=None, groupby=None):
"""Calculates top 10% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the top 10%.
"""
- return top_x_pct_share(df, col, 0.1, w)
+ return top_x_pct_share(df, col, 0.1, w, groupby)
def top_1_pct_share(df, col, w=None):
@@ -114,32 +135,37 @@ def top_1_pct_share(df, col, w=None):
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w)
-def top_0_1_pct_share(df, col, w=None):
+def top_0_1_pct_share(df, col, w=None, groupby=None):
"""Calculates top 0.1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the top 0.1%.
"""
- return top_x_pct_share(df, col, 0.001, w)
+ return top_x_pct_share(df, col, 0.001, w, groupby)
-def t10_b50(df, col, w=None):
+def t10_b50(df, col, w=None, groupby=None):
"""Calculates ratio between the top 10% and bottom 50% shares.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
+ :param groupby: Column, or list of columns, to group by.
:returns: The share of w-weighted val held by the top 10% divided by
the share of w-weighted val held by the bottom 50%.
"""
- return top_10_pct_share(df, col, w) / bottom_50_pct_share(df, col, w)
+ t10 = top_10_pct_share(df, col, w, groupby)
+ b50 = bottom_50_pct_share(df, col, w, groupby)
+ return t10 / b50
| PSLmodels/microdf | a2422e8b0bf20eca60a9bf39e8a3ad14d98a62e9 | diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml
index 04146a8..daeab97 100644
--- a/.github/workflows/build_and_test.yml
+++ b/.github/workflows/build_and_test.yml
@@ -1,4 +1,4 @@
-name: Build and test [Python 3.6, 3.7, 3.8]
+name: Build and test [Python 3.7, 3.8, 3.9]
on: [push, pull_request]
@@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.6, 3.7, 3.8]
+ python-version: [3.7, 3.8, 3.9]
steps:
- name: Checkout
diff --git a/microdf/tests/test_weighted.py b/microdf/tests/test_weighted.py
index c481b4f..bc23251 100644
--- a/microdf/tests/test_weighted.py
+++ b/microdf/tests/test_weighted.py
@@ -49,3 +49,14 @@ def test_weighted_sum():
# Test grouped.
mdf.weighted_sum(dfg, "x", "w", "g")
mdf.weighted_sum(dfg, ["x", "y"], "w", "g")
+
+
+def test_gini():
+ # Unweighted
+ mdf.gini(df, "x")
+ # Weighted
+ mdf.gini(df, "x", "w")
+ # Unweighted, grouped
+ mdf.gini(dfg, "x", groupby="g")
+ # Weighted, grouped
+ mdf.gini(dfg, "x", "w", groupby="g")
| Test Python 3.9
Pending https://github.com/PSLmodels/Tax-Calculator/pull/2522 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"microdf/tests/test_weighted.py::test_gini"
] | [
"microdf/tests/test_weighted.py::test_weighted_quantile",
"microdf/tests/test_weighted.py::test_weighted_median",
"microdf/tests/test_weighted.py::test_weighted_mean",
"microdf/tests/test_weighted.py::test_weighted_sum"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-12-24T19:06:35Z" | mit |
|
PSLmodels__microdf-166 | diff --git a/microdf/generic.py b/microdf/generic.py
index 2b72bbd..654cba5 100644
--- a/microdf/generic.py
+++ b/microdf/generic.py
@@ -12,21 +12,29 @@ class MicroSeries(pd.Series):
:type weights: np.array
"""
super().__init__(*args, **kwargs)
- self.weights = weights
+ self.set_weights(weights)
def _init_micro(self, weights=None):
self.weights = weights
+ def handles_zero_weights(fn):
+ def safe_fn(*args, **kwargs):
+ try:
+ return fn(*args, **kwargs)
+ except ZeroDivisionError:
+ return np.NaN
+
+ return safe_fn
+
def set_weights(self, weights):
"""Sets the weight values.
:param weights: Array of weights.
:type weights: np.array.
-
- :returns: A Pandas Series multiplying the MicroSeries by its weight.
"""
- self.weights = weights
+ self.weights = pd.Series(weights)
+ @handles_zero_weights
def weight(self):
"""Calculates the weighted value of the MicroSeries.
@@ -34,13 +42,15 @@ class MicroSeries(pd.Series):
"""
return self.multiply(self.weights)
+ @handles_zero_weights
def sum(self):
"""Calculates the weighted sum of the MicroSeries.
:returns: The weighted sum.
"""
- return self.weight().sum()
+ return self.multiply(self.weights).sum()
+ @handles_zero_weights
def mean(self):
"""Calculates the weighted mean of the MicroSeries
@@ -48,6 +58,7 @@ class MicroSeries(pd.Series):
"""
return np.average(self.values, weights=self.weights)
+ @handles_zero_weights
def quantile(self, quantiles):
"""Calculates weighted quantiles of the MicroSeries.
@@ -76,6 +87,7 @@ class MicroSeries(pd.Series):
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
+ @handles_zero_weights
def median(self):
"""Calculates the weighted median of the MicroSeries.
@@ -83,6 +95,63 @@ class MicroSeries(pd.Series):
"""
return self.quantile(0.5)
+ def groupby(self, *args, **kwargs):
+ gb = super().groupby(*args, **kwargs)
+ gb.__class__ = MicroSeriesGroupBy
+ gb.weights = pd.Series(self.weights).groupby(*args, **kwargs)
+ return gb
+
+ def __getitem__(self, key):
+ result = super().__getitem__(key)
+ if isinstance(result, pd.Series):
+ weights = self.weights.__getitem__(key)
+ return MicroSeries(result, weights=weights)
+ return result
+
+
+class MicroSeriesGroupBy(pd.core.groupby.generic.SeriesGroupBy):
+ def __init__(self, weights=None, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.weights = weights
+
+ def _weighted_agg(func):
+ def via_micro_series(row, fn, *args, **kwargs):
+ return getattr(MicroSeries(row.a, weights=row.w), fn.__name__)(
+ *args, **kwargs
+ )
+
+ def _weighted_agg_fn(self, *args, **kwargs):
+ arrays = self.apply(np.array)
+ weights = self.weights.apply(np.array)
+ df = pd.DataFrame(dict(a=arrays, w=weights))
+ result = df.agg(
+ lambda row: via_micro_series(row, func, *args, **kwargs),
+ axis=1,
+ )
+ return result
+
+ return _weighted_agg_fn
+
+ @_weighted_agg
+ def weight(self):
+ return MicroSeries.weight(self)
+
+ @_weighted_agg
+ def sum(self):
+ return MicroSeries.sum(self)
+
+ @_weighted_agg
+ def mean(self):
+ return MicroSeries.mean(self)
+
+ @_weighted_agg
+ def quantile(self, quantiles):
+ return MicroSeries.quantile(self, quantiles)
+
+ @_weighted_agg
+ def median(self):
+ return MicroSeries.median(self)
+
class MicroDataFrame(pd.DataFrame):
def __init__(self, *args, weights=None, **kwargs):
@@ -96,6 +165,11 @@ class MicroDataFrame(pd.DataFrame):
super().__init__(*args, **kwargs)
self.weights = weights
self.weight_col = None
+ self._link_all_weights()
+
+ def __setitem__(self, *args, **kwargs):
+ super().__setitem__(*args, **kwargs)
+ self._link_all_weights()
def _link_weights(self, column):
# self[column] = ... triggers __setitem__, which forces pd.Series
@@ -132,3 +206,22 @@ class MicroDataFrame(pd.DataFrame):
self.weights = np.array(self[column])
self.weight_col = column
self._link_all_weights()
+
+ def groupby(self, by, *args, **kwargs):
+ """Returns a GroupBy object with MicroSeriesGroupBy objects for each column
+
+ :param by: column to group by
+ :type by: str
+
+ return: DataFrameGroupBy object with columns using weights
+ rtype: DataFrameGroupBy
+ """
+ gb = super().groupby(by, *args, **kwargs)
+ weights = pd.Series(self.weights).groupby(self[by], *args, **kwargs)
+ for col in self.columns: # df.groupby(...)[col]s use weights
+ if col != by:
+ res = gb[col]
+ res.__class__ = MicroSeriesGroupBy
+ res.weights = weights
+ setattr(gb, col, res)
+ return gb
| PSLmodels/microdf | db688dbab9b0fc84bbd7d5d096084ff8d6a9ff13 | diff --git a/microdf/tests/test_generic.py b/microdf/tests/test_generic.py
index 063e679..0cd6c57 100644
--- a/microdf/tests/test_generic.py
+++ b/microdf/tests/test_generic.py
@@ -2,6 +2,33 @@ import numpy as np
import microdf as mdf
+def test_df_init():
+ arr = np.array([0, 1, 1])
+ w = np.array([3, 0, 9])
+ df = mdf.MicroDataFrame({"a": arr}, weights=w)
+ assert df.a.mean() == np.average(arr, weights=w)
+
+ df = mdf.MicroDataFrame()
+ df["a"] = arr
+ df.set_weights(w)
+ assert df.a.mean() == np.average(arr, weights=w)
+
+ df = mdf.MicroDataFrame()
+ df["a"] = arr
+ df["w"] = w
+ df.set_weight_col("w")
+ assert df.a.mean() == np.average(arr, weights=w)
+
+
+def test_series_getitem():
+ arr = np.array([0, 1, 1])
+ w = np.array([3, 0, 9])
+ s = mdf.MicroSeries(arr, weights=w)
+ assert s[[1, 2]].sum() == np.sum(arr[[1, 2]] * w[[1, 2]])
+
+ assert s[1:3].sum() == np.sum(arr[1:3] * w[1:3])
+
+
def test_sum():
arr = np.array([0, 1, 1])
w = np.array([3, 0, 9])
| MicroSeries(list) causes TypeError: __init__() got an unexpected keyword argument 'column'
This crashes my ipython session:
```
import microdf as mdf
mdf.Series([1, 2, 3])
```
Error is long, includes `TypeError: __init__() got an unexpected keyword argument 'column'` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"microdf/tests/test_generic.py::test_df_init",
"microdf/tests/test_generic.py::test_series_getitem"
] | [
"microdf/tests/test_generic.py::test_sum",
"microdf/tests/test_generic.py::test_mean"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-02-07T19:22:30Z" | mit |
|
PSLmodels__microdf-188 | diff --git a/microdf/generic.py b/microdf/generic.py
index 9c19d0d..f6a91ad 100644
--- a/microdf/generic.py
+++ b/microdf/generic.py
@@ -239,6 +239,19 @@ class MicroSeries(pd.Series):
b50 = self.bottom_50_pct_share()
return t10 / b50
+ @vector_function
+ def cumsum(self) -> pd.Series:
+ return pd.Series(self * self.weights).cumsum()
+
+ @vector_function
+ def rank(self, pct=False) -> pd.Series:
+ order = np.argsort(self.values)
+ inverse_order = np.argsort(order)
+ ranks = np.array(self.weights)[order].cumsum()[inverse_order]
+ if pct:
+ ranks /= self.weights.sum()
+ return pd.Series(ranks)
+
def groupby(self, *args, **kwargs):
gb = super().groupby(*args, **kwargs)
gb.__class__ = MicroSeriesGroupBy
@@ -584,6 +597,20 @@ class MicroDataFrame(pd.DataFrame):
return MicroDataFrame(result, weights=weights)
return result
+ def catch_series_relapse(self):
+ for col in self.columns:
+ if self[col].__class__ == pd.Series:
+ self._link_weights(col)
+
+ def __setattr__(self, key, value):
+ super().__setattr__(key, value)
+ self.catch_series_relapse()
+
+ def reset_index(self):
+ res = super().reset_index()
+ res = MicroDataFrame(res, weights=self.weights)
+ return res
+
def groupby(self, by: Union[str, list], *args, **kwargs):
"""Returns a GroupBy object with MicroSeriesGroupBy objects for each column
| PSLmodels/microdf | 995a0c703af8bcc84809915df155336d9473d043 | diff --git a/microdf/tests/test_generic.py b/microdf/tests/test_generic.py
index ca55fc8..aadd6b0 100644
--- a/microdf/tests/test_generic.py
+++ b/microdf/tests/test_generic.py
@@ -1,4 +1,4 @@
-from microdf.generic import MicroDataFrame
+from microdf.generic import MicroDataFrame, MicroSeries
import numpy as np
import microdf as mdf
import pandas as pd
@@ -118,3 +118,37 @@ def test_concat():
mdf_wide = mdf.concat([df1, df2], axis=1)
assert isinstance(mdf_wide, mdf.MicroDataFrame)
assert mdf_wide.weights.equals(df1.weights)
+
+
+def test_set_index():
+ d = mdf.MicroDataFrame(dict(x=[1, 2, 3]), weights=[4, 5, 6])
+ assert d.x.__class__ == MicroSeries
+ d.index = [1, 2, 3]
+ assert d.x.__class__ == MicroSeries
+
+
+def test_reset_index():
+ d = mdf.MicroDataFrame(dict(x=[1, 2, 3]), weights=[4, 5, 6])
+ assert d.reset_index().__class__ == MicroDataFrame
+
+
+def test_cumsum():
+ s = mdf.MicroSeries([1, 2, 3], weights=[4, 5, 6])
+ assert np.array_equal(s.cumsum().values, [4, 14, 32])
+
+ s = mdf.MicroSeries([2, 1, 3], weights=[5, 4, 6])
+ assert np.array_equal(s.cumsum().values, [10, 14, 32])
+
+ s = mdf.MicroSeries([3, 1, 2], weights=[6, 4, 5])
+ assert np.array_equal(s.cumsum().values, [18, 22, 32])
+
+
+def test_rank():
+ s = mdf.MicroSeries([1, 2, 3], weights=[4, 5, 6])
+ assert np.array_equal(s.rank().values, [4, 9, 15])
+
+ s = mdf.MicroSeries([3, 1, 2], weights=[6, 4, 5])
+ assert np.array_equal(s.rank().values, [15, 4, 9])
+
+ s = mdf.MicroSeries([2, 1, 3], weights=[5, 4, 6])
+ assert np.array_equal(s.rank().values, [9, 4, 15])
| Changing a MicroDataFrame's index makes its columns Series instead of MicroSeries
```
d = mdf.MicroDataFrame(dict(x=[1, 2, 3]), weights=[4, 5, 6])
d.x.__class__
```
>microdf.generic.MicroSeries
```
d = mdf.MicroDataFrame(dict(x=[1, 2, 3]), weights=[4, 5, 6])
d.index = [1, 2, 3]
d.x.__class__
```
>pandas.core.series.Series | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"microdf/tests/test_generic.py::test_set_index",
"microdf/tests/test_generic.py::test_reset_index",
"microdf/tests/test_generic.py::test_cumsum",
"microdf/tests/test_generic.py::test_rank"
] | [
"microdf/tests/test_generic.py::test_df_init",
"microdf/tests/test_generic.py::test_series_getitem",
"microdf/tests/test_generic.py::test_sum",
"microdf/tests/test_generic.py::test_mean",
"microdf/tests/test_generic.py::test_poverty_count",
"microdf/tests/test_generic.py::test_median",
"microdf/tests/test_generic.py::test_unweighted_groupby",
"microdf/tests/test_generic.py::test_multiple_groupby",
"microdf/tests/test_generic.py::test_concat"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2021-02-25T09:09:41Z" | mit |
|
PSLmodels__microdf-219 | diff --git a/microdf/generic.py b/microdf/generic.py
index 4b31aba..b0906cf 100644
--- a/microdf/generic.py
+++ b/microdf/generic.py
@@ -373,6 +373,11 @@ class MicroSeries(pd.Series):
def __pos__(self, other):
return MicroSeries(super().__pos__(other), weights=self.weights)
+ def __repr__(self):
+ return pd.DataFrame(
+ dict(value=self.values, weight=self.weights.values)
+ ).__repr__()
+
MicroSeries.SCALAR_FUNCTIONS = [
fn
@@ -620,7 +625,10 @@ class MicroDataFrame(pd.DataFrame):
def __getitem__(self, key):
result = super().__getitem__(key)
if isinstance(result, pd.DataFrame):
- weights = self.weights
+ try:
+ weights = self.weights[key]
+ except Exception:
+ weights = self.weights
return MicroDataFrame(result, weights=weights)
return result
@@ -755,3 +763,8 @@ class MicroDataFrame(pd.DataFrame):
"""
in_poverty = income < threshold
return in_poverty.sum()
+
+ def __repr__(self):
+ df = pd.DataFrame(self)
+ df["weight"] = self.weights
+ return df[[df.columns[-1]] + list(df.columns[:-1])].__repr__()
| PSLmodels/microdf | 8802af33cdae9b37733a33aa307ee991246b0716 | diff --git a/microdf/tests/test_generic.py b/microdf/tests/test_generic.py
index 3dfaffa..3a1a58e 100644
--- a/microdf/tests/test_generic.py
+++ b/microdf/tests/test_generic.py
@@ -200,3 +200,9 @@ def test_subset():
df_no_z_diff_weights = df_no_z.copy()
df_no_z_diff_weights.weights += 1
assert not df[["x", "y"]].equals(df_no_z_diff_weights)
+
+
+def test_value_subset():
+ d = mdf.MicroDataFrame({"x": [1, 2, 3], "y": [1, 2, 2]}, weights=[4, 5, 6])
+ d2 = d[d.y > 1]
+ assert d2.y.shape == d2.weights.shape
| Subsetting a MicroDataFrame doesn't subset weights
Example:
```
d = mdf.MicroDataFrame({"x": [1, 2, 3], "y": [1, 2, 2]}, weights=[4, 5, 6])
d2 = d[d.y > 1]
d2.shape # (2, 2)
d2.weights.shape # (3,)
```
Among other things, this breaks commands like `d[d.y > 1].mean()` with:
>TypeError: Axis must be specified when shapes of a and weights differ.
This doesn't affect MicroSeries:
```
s = mdf.MicroSeries([1, 2, 3], weights=[4, 5, 6])
s2 = s[s > 1]
s2.shape # (2,)
s2.weights.shape # (2,)
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"microdf/tests/test_generic.py::test_value_subset"
] | [
"microdf/tests/test_generic.py::test_df_init",
"microdf/tests/test_generic.py::test_series_getitem",
"microdf/tests/test_generic.py::test_sum",
"microdf/tests/test_generic.py::test_mean",
"microdf/tests/test_generic.py::test_poverty_count",
"microdf/tests/test_generic.py::test_median",
"microdf/tests/test_generic.py::test_unweighted_groupby",
"microdf/tests/test_generic.py::test_multiple_groupby",
"microdf/tests/test_generic.py::test_concat",
"microdf/tests/test_generic.py::test_set_index",
"microdf/tests/test_generic.py::test_reset_index",
"microdf/tests/test_generic.py::test_cumsum",
"microdf/tests/test_generic.py::test_rank",
"microdf/tests/test_generic.py::test_percentile_rank",
"microdf/tests/test_generic.py::test_quartile_rank",
"microdf/tests/test_generic.py::test_quintile_rank",
"microdf/tests/test_generic.py::test_decile_rank_rank",
"microdf/tests/test_generic.py::test_copy_equals",
"microdf/tests/test_generic.py::test_subset"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2021-03-29T11:35:13Z" | mit |
|
PagerDuty__pdpyras-54 | diff --git a/pdpyras.py b/pdpyras.py
index 506440d..2143baf 100644
--- a/pdpyras.py
+++ b/pdpyras.py
@@ -211,7 +211,7 @@ def tokenize_url_path(url, baseurl='https://api.pagerduty.com'):
:type method: str
:type url: str
:type baseurl: str
- :rtype: tuple
+ :rtype: tuple
"""
urlnparams = url.split('#')[0].split('?') # Ignore all #'s / params
url_nodes = urlnparams[0].lstrip('/').split('/')
@@ -236,7 +236,7 @@ def tokenize_url_path(url, baseurl='https://api.pagerduty.com'):
# Tokenize / classify the URL now:
tokenized_nodes = [path_nodes[0]]
if len(path_nodes) >= 3:
- # It's an endpoint like one of the following
+ # It's an endpoint like one of the following
# /{resource}/{id}/{sub-resource}
# We're interested in {resource} and {sub_resource}.
# More deeply-nested endpoints not known to exist.
@@ -340,7 +340,7 @@ class PDSession(requests.Session):
* ``-1`` to retry infinitely
* ``0`` to return the `requests.Response`_ object and exit (which is the
- default behavior)
+ default behavior)
* ``n``, where ``n > 0``, to retry ``n`` times (or up
to :attr:`max_http_attempts` total for all statuses, whichever is
encountered first), and raise a :class:`PDClientError` after that many
@@ -609,7 +609,7 @@ class PDSession(requests.Session):
def stagger_cooldown(self, val):
if type(val) not in [float, int] or val<0:
raise ValueError("Cooldown randomization factor stagger_cooldown "
- "must be a positive real number")
+ "must be a positive real number")
self._stagger_cooldown = val
@property
@@ -642,7 +642,7 @@ class EventsAPISession(PDSession):
@property
def auth_header(self):
- return {'X-Routing-Key': self.api_key}
+ return {}
def acknowledge(self, dedup_key):
"""
@@ -691,10 +691,16 @@ class EventsAPISession(PDSession):
:returns:
The deduplication key of the incident, if any.
"""
+
actions = ('trigger', 'acknowledge', 'resolve')
if action not in actions:
raise ValueError("Event action must be one of: "+', '.join(actions))
- event = {'event_action':action}
+
+ event = {
+ 'event_action':action,
+ 'routing_key': self.api_key
+ }
+
event.update(properties)
if isinstance(dedup_key, string_types):
event['dedup_key'] = dedup_key
@@ -797,7 +803,7 @@ class APISession(PDSession):
:members:
"""
- api_call_counts = None
+ api_call_counts = None
"""A dict object recording the number of API calls per endpoint"""
api_time = None
@@ -834,7 +840,7 @@ class APISession(PDSession):
@property
def auth_type(self):
"""
- Defines the method of API authentication.
+ Defines the method of API authentication.
By default this is "token"; if "oauth2", the API key will be used.
"""
@@ -917,12 +923,12 @@ class APISession(PDSession):
query_params.update({'query':query})
# When determining uniqueness, web/the API doesn't care about case.
simplify = lambda s: s.lower()
- search_term = simplify(query)
+ search_term = simplify(query)
equiv = lambda s: simplify(s[attribute]) == search_term
obj_iter = self.iter_all(resource, params=query_params)
return next(iter(filter(equiv, obj_iter)), None)
- def iter_all(self, path, params=None, paginate=True, page_size=None,
+ def iter_all(self, path, params=None, paginate=True, page_size=None,
item_hook=None, total=False):
"""
Iterator for the contents of an index endpoint or query.
@@ -947,8 +953,8 @@ class APISession(PDSession):
pagination yet, i.e. "nested" endpoints like (as of this writing):
``/users/{id}/contact_methods`` and ``/services/{id}/integrations``
:param page_size:
- If set, the ``page_size`` argument will override the ``default_page_size``
- parameter on the session and set the ``limit`` parameter to a custom
+ If set, the ``page_size`` argument will override the ``default_page_size``
+ parameter on the session and set the ``limit`` parameter to a custom
value (default is 100), altering the number of pagination results.
:param item_hook:
Callable object that will be invoked for each iteration, i.e. for
@@ -1017,7 +1023,7 @@ class APISession(PDSession):
break
try:
response = r.json()
- except ValueError:
+ except ValueError:
self.log.debug("Stopping iteration on endpoint \"%s\"; API "
"responded with invalid JSON.", path)
break
@@ -1035,7 +1041,7 @@ class APISession(PDSession):
"however many can be gotten, will be included.", path)
if 'total' in response:
total_count = response['total']
- else:
+ else:
self.log.debug("Pagination and the \"total\" parameter "
"are enabled in iteration, but the index endpoint %s "
"responded with no \"total\" property in the response. "
@@ -1043,7 +1049,7 @@ class APISession(PDSession):
"first retrieving all records.", path)
offset += data['limit']
for result in response[r_name]:
- n += 1
+ n += 1
# Call a callable object for each item, i.e. to print progress:
if hasattr(item_hook, '__call__'):
item_hook(result, n, total_count)
@@ -1054,7 +1060,7 @@ class APISession(PDSession):
"""
Performs a GET request, returning the JSON-decoded body as a dictionary
- :raises PDClientError: In the event of HTTP error
+ :raises PDClientError: In the event of HTTP error
"""
return self.get(path, **kw)
@@ -1063,7 +1069,7 @@ class APISession(PDSession):
"""
Performs a POST request, returning the JSON-decoded body as a dictionary
- :raises PDClientError: In the event of HTTP error
+ :raises PDClientError: In the event of HTTP error
"""
return self.post(path, **kw)
@@ -1130,9 +1136,9 @@ class APISession(PDSession):
:param method:
Method of the request
- :param response:
+ :param response:
Response object
- :param suffix:
+ :param suffix:
Optional suffix to append to the key
:type method: str
:type response: `requests.Response`_
@@ -1296,7 +1302,7 @@ class APISession(PDSession):
"""Truncated token for secure display/identification purposes."""
return last_4(self.api_key)
-class PDClientError(Exception):
+class PDClientError(Exception):
"""
General API errors base class.
"""
| PagerDuty/pdpyras | 4fb715ab215a500262cb61005b1a9c70ba190179 | diff --git a/test_pdpyras.py b/test_pdpyras.py
index 76217fd..a2d1899 100755
--- a/test_pdpyras.py
+++ b/test_pdpyras.py
@@ -91,12 +91,15 @@ class EventsSessionTest(SessionTest):
'https://events.pagerduty.com/v2/enqueue',
parent.request.call_args[0][1])
self.assertDictContainsSubset(
- {'Content-Type': 'application/json',
- 'X-Routing-Key': 'routingkey'},
+ {'Content-Type': 'application/json'},
+ parent.request.call_args[1]['headers'])
+ self.assertNotIn(
+ 'X-Routing-Key',
parent.request.call_args[1]['headers'])
self.assertEqual(
{
'event_action':'trigger',
+ 'routing_key':'routingkey',
'payload':{
'summary': 'testing 123',
'source': 'triggered.from.pdpyras',
@@ -108,12 +111,20 @@ class EventsSessionTest(SessionTest):
parent.request.call_args[1]['json'])
ddk = sess.resolve('abc123')
self.assertEqual(
- {'event_action':'resolve', 'dedup_key':'abc123'},
+ {
+ 'event_action':'resolve',
+ 'dedup_key':'abc123',
+ 'routing_key':'routingkey',
+ },
parent.request.call_args[1]['json'])
ddk = sess.acknowledge('abc123')
self.assertEqual(
- {'event_action':'acknowledge', 'dedup_key':'abc123'},
+ {
+ 'event_action':'acknowledge',
+ 'dedup_key':'abc123',
+ 'routing_key':'routingkey',
+ },
parent.request.call_args[1]['json'])
class APISessionTest(SessionTest):
| EventsAPISession uses `X-Routing-Key` instead of setting `routing_key` in payload
EventsAPISession uses the `X-Routing-Key` header to specify the routing key and does not set the `routing_key` parameter in the REST Payload.
API specification requires that the `routing_key` parameter is a required parameter.
https://developer.pagerduty.com/docs/events-api-v2/trigger-events/ | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test_pdpyras.py::EventsSessionTest::test_send_event"
] | [
"test_pdpyras.py::APISessionTest::test_find",
"test_pdpyras.py::APISessionTest::test_iter_all",
"test_pdpyras.py::APISessionTest::test_oauth_headers",
"test_pdpyras.py::APISessionTest::test_persist",
"test_pdpyras.py::APISessionTest::test_postprocess",
"test_pdpyras.py::APISessionTest::test_profiler_key",
"test_pdpyras.py::APISessionTest::test_raise_on_error",
"test_pdpyras.py::APISessionTest::test_request",
"test_pdpyras.py::APISessionTest::test_resource_envelope",
"test_pdpyras.py::APISessionTest::test_resource_path",
"test_pdpyras.py::APISessionTest::test_rget",
"test_pdpyras.py::APISessionTest::test_subdomain",
"test_pdpyras.py::APISessionTest::test_truncated_token",
"test_pdpyras.py::APIUtilsTest::test_plural_deplural",
"test_pdpyras.py::APIUtilsTest::test_tokenize_url_path"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-09T04:58:55Z" | mit |
|
Parallel-in-Time__pySDC-235 | diff --git a/pySDC/implementations/convergence_controller_classes/adaptivity.py b/pySDC/implementations/convergence_controller_classes/adaptivity.py
index b26fec527..06fc27f07 100644
--- a/pySDC/implementations/convergence_controller_classes/adaptivity.py
+++ b/pySDC/implementations/convergence_controller_classes/adaptivity.py
@@ -325,6 +325,7 @@ class AdaptivityResidual(AdaptivityBase):
rule to update the step size. Instead of giving a local tolerance that we try to hit as closely as possible, we set
two thresholds for the residual. When we exceed the upper one, we reduce the step size by a factor of 2 and if the
residual falls below the lower threshold, we double the step size.
+ Please setup these parameters as "e_tol" and "e_tol_low".
"""
def setup(self, controller, params, description, **kwargs):
@@ -349,7 +350,7 @@ class AdaptivityResidual(AdaptivityBase):
"control_order": -45,
"e_tol_low": 0,
"e_tol": np.inf,
- "max_restarts": 2 if "e_tol_low" in params else None,
+ "max_restarts": 99 if "e_tol_low" in params else None,
}
return {**defaults, **params}
diff --git a/pySDC/projects/Resilience/piline.py b/pySDC/projects/Resilience/piline.py
index 5ad160dc3..0d301cab5 100644
--- a/pySDC/projects/Resilience/piline.py
+++ b/pySDC/projects/Resilience/piline.py
@@ -128,8 +128,9 @@ def get_data(stats, recomputed=False):
'v1': np.array([me[1][0] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
'v2': np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
'p3': np.array([me[1][2] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
- 't': np.array(get_sorted(stats, type='u', recomputed=recomputed))[:, 0],
- 'dt': np.array(get_sorted(stats, type='dt', recomputed=recomputed)),
+ 't': np.array([me[0] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
+ 'dt': np.array([me[1] for me in get_sorted(stats, type='dt', recomputed=recomputed)]),
+ 't_dt': np.array([me[0] for me in get_sorted(stats, type='dt', recomputed=recomputed)]),
'e_em': np.array(get_sorted(stats, type='e_embedded', recomputed=recomputed))[:, 1],
'e_ex': np.array(get_sorted(stats, type='e_extrapolated', recomputed=recomputed))[:, 1],
'restarts': np.array(get_sorted(stats, type='restart', recomputed=None))[:, 1],
@@ -154,7 +155,7 @@ def plot_error(data, ax, use_adaptivity=True, plot_restarts=False):
None
"""
setup_mpl_from_accuracy_check()
- ax.plot(data['dt'][:, 0], data['dt'][:, 1], color='black')
+ ax.plot(data['t_dt'], data['dt'], color='black')
e_ax = ax.twinx()
e_ax.plot(data['t'], data['e_em'], label=r'$\epsilon_\mathrm{embedded}$')
@@ -286,7 +287,7 @@ def check_solution(data, use_adaptivity, num_procs, generate_reference=False):
'p3': data['p3'][-1],
'e_em': data['e_em'][-1],
'e_ex': data['e_ex'][data['e_ex'] != [None]][-1],
- 'dt': data['dt'][-1][1],
+ 'dt': data['dt'][-1],
'restarts': data['restarts'].sum(),
'sweeps': data['sweeps'].sum(),
't': data['t'][-1],
@@ -309,6 +310,37 @@ def check_solution(data, use_adaptivity, num_procs, generate_reference=False):
), f'{error_msg} Expected {k}={expected[k]:.4e}, got {k}={got[k]:.4e}'
+def residual_adaptivity(plot=False):
+ """
+ Make a run with adaptivity based on the residual.
+ """
+ from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityResidual
+
+ max_res = 1e-8
+ custom_description = {'convergence_controllers': {}}
+ custom_description['convergence_controllers'][AdaptivityResidual] = {
+ 'e_tol': max_res,
+ 'e_tol_low': max_res / 10,
+ }
+ stats, _, _ = run_piline(custom_description, num_procs=1)
+
+ residual = get_sorted(stats, type='residual_post_step', recomputed=False)
+ dt = get_sorted(stats, type='dt', recomputed=False)
+
+ if plot:
+ fig, ax = plt.subplots()
+ dt_ax = ax.twinx()
+
+ ax.plot([me[0] for me in residual], [me[1] for me in residual])
+ dt_ax.plot([me[0] for me in dt], [me[1] for me in dt], color='black')
+ plt.show()
+
+ max_residual = max([me[1] for me in residual])
+ assert max_residual < max_res, f'Max. allowed residual is {max_res:.2e}, but got {max_residual:.2e}!'
+ dt_std = np.std([me[1] for me in dt])
+ assert dt_std != 0, f'Expected the step size to change, but standard deviation is {dt_std:.2e}!'
+
+
def main():
"""
Make a variety of tests to see if Hot Rod and Adaptivity work in serial as well as MSSDC.
@@ -342,4 +374,5 @@ def main():
if __name__ == "__main__":
+ residual_adaptivity()
main()
| Parallel-in-Time/pySDC | 994530de81e58b85952343cd34e3ba7890cc1975 | diff --git a/pySDC/tests/test_projects/test_resilience/test_piline.py b/pySDC/tests/test_projects/test_resilience/test_piline.py
index 62afc601f..5a2896de0 100644
--- a/pySDC/tests/test_projects/test_resilience/test_piline.py
+++ b/pySDC/tests/test_projects/test_resilience/test_piline.py
@@ -6,3 +6,10 @@ def test_main():
from pySDC.projects.Resilience.piline import main
main()
+
+
[email protected]
+def test_residual_adaptivity():
+ from pySDC.projects.Resilience.piline import residual_adaptivity
+
+ residual_adaptivity()
| Default methods in convergence controller
It looks like the default methods of the `AdaptivityResidual` class are [never called](https://parallel-in-time.org/pySDC/coverage/d_c28af68f8f845bbd_adaptivity_py.html#t290). The same for the [`BasicRestartingMPI`](https://parallel-in-time.org/pySDC/coverage/d_c28af68f8f845bbd_basic_restarting_py.html#t232) class. Would it make sense to make this an abstract class? Or add a test for the default case? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"pySDC/tests/test_projects/test_resilience/test_piline.py::test_main",
"pySDC/tests/test_projects/test_resilience/test_piline.py::test_residual_adaptivity"
] | [] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-01-02T15:07:01Z" | bsd-2-clause |
|
Parquery__icontract-112 | diff --git a/icontract/_checkers.py b/icontract/_checkers.py
index 1b27f5a..1dc79d3 100644
--- a/icontract/_checkers.py
+++ b/icontract/_checkers.py
@@ -66,6 +66,27 @@ def _kwargs_from_call(param_names: List[str], kwdefaults: Dict[str, Any], args:
return mapping
+def _not_check(check: Any, contract: Contract) -> bool:
+ """
+ Negate the check value of a condition and capture missing boolyness (*e.g.*, when check is a numpy array).
+
+ :param check: value of the evaluated condition
+ :param contract: corresponding to the check
+ :return: negated check
+ :raise: ValueError if the check could not be negated
+ """
+ try:
+ return not check
+ except Exception as err: # pylint: disable=broad-except
+ msg_parts = [] # type: List[str]
+ if contract.location is not None:
+ msg_parts.append("{}:\n".format(contract.location))
+
+ msg_parts.append('Failed to negate the evaluation of the condition.')
+
+ raise ValueError(''.join(msg_parts)) from err
+
+
def _assert_precondition(contract: Contract, resolved_kwargs: Mapping[str, Any]) -> None:
"""
Assert that the contract holds as a precondition.
@@ -88,7 +109,7 @@ def _assert_precondition(contract: Contract, resolved_kwargs: Mapping[str, Any])
check = contract.condition(**condition_kwargs)
- if not check:
+ if _not_check(check=check, contract=contract):
if contract.error is not None and (inspect.ismethod(contract.error) or inspect.isfunction(contract.error)):
assert contract.error_arg_set is not None, "Expected error_arg_set non-None if contract.error a function."
assert contract.error_args is not None, "Expected error_args non-None if contract.error a function."
@@ -127,7 +148,7 @@ def _assert_invariant(contract: Contract, instance: Any) -> None:
else:
check = contract.condition()
- if not check:
+ if _not_check(check=check, contract=contract):
if contract.error is not None and (inspect.ismethod(contract.error) or inspect.isfunction(contract.error)):
assert contract.error_arg_set is not None, "Expected error_arg_set non-None if contract.error a function."
assert contract.error_args is not None, "Expected error_args non-None if contract.error a function."
@@ -199,7 +220,7 @@ def _assert_postcondition(contract: Contract, resolved_kwargs: Mapping[str, Any]
check = contract.condition(**condition_kwargs)
- if not check:
+ if _not_check(check=check, contract=contract):
if contract.error is not None and (inspect.ismethod(contract.error) or inspect.isfunction(contract.error)):
assert contract.error_arg_set is not None, "Expected error_arg_set non-None if contract.error a function."
assert contract.error_args is not None, "Expected error_args non-None if contract.error a function."
| Parquery/icontract | 013cb1bb0294097eaa8c43776bfd4d73342a1655 | diff --git a/tests/test_invariant.py b/tests/test_invariant.py
index 114649e..302bb5d 100644
--- a/tests/test_invariant.py
+++ b/tests/test_invariant.py
@@ -9,6 +9,7 @@ from typing import Optional # pylint: disable=unused-import
import icontract
import tests.error
+import tests.mock
class TestOK(unittest.TestCase):
@@ -587,3 +588,19 @@ class TestInvalid(unittest.TestCase):
self.assertIsNotNone(val_err)
self.assertEqual("Expected an invariant condition with at most an argument 'self', but got: ['self', 'z']",
str(val_err))
+
+ def test_no_boolyness(self):
+ @icontract.invariant(lambda self: tests.mock.NumpyArray([True, False]))
+ class A:
+ def __init__(self) -> None:
+ pass
+
+ value_error = None # type: Optional[ValueError]
+ try:
+ _ = A()
+ except ValueError as err:
+ value_error = err
+
+ self.assertIsNotNone(value_error)
+ self.assertEqual('Failed to negate the evaluation of the condition.',
+ tests.error.wo_mandatory_location(str(value_error)))
diff --git a/tests/test_postcondition.py b/tests/test_postcondition.py
index 3ea09f0..7e27bdc 100644
--- a/tests/test_postcondition.py
+++ b/tests/test_postcondition.py
@@ -11,6 +11,7 @@ from typing import Optional, List, Type # pylint: disable=unused-import
import icontract
import tests.error
+import tests.mock
class TestOK(unittest.TestCase):
@@ -438,3 +439,18 @@ class TestInvalid(unittest.TestCase):
self.assertEqual("The argument(s) of the postcondition error have not been set: ['z']. "
"Does the original function define them? Did you supply them in the call?",
tests.error.wo_mandatory_location(str(type_error)))
+
+ def test_no_boolyness(self):
+ @icontract.ensure(lambda: tests.mock.NumpyArray([True, False]))
+ def some_func() -> None:
+ pass
+
+ value_error = None # type: Optional[ValueError]
+ try:
+ some_func()
+ except ValueError as err:
+ value_error = err
+
+ self.assertIsNotNone(value_error)
+ self.assertEqual('Failed to negate the evaluation of the condition.',
+ tests.error.wo_mandatory_location(str(value_error)))
diff --git a/tests/test_precondition.py b/tests/test_precondition.py
index 01b3e99..83ba7eb 100644
--- a/tests/test_precondition.py
+++ b/tests/test_precondition.py
@@ -12,6 +12,7 @@ from typing import Optional # pylint: disable=unused-import
import icontract
import tests.error
+import tests.mock
class TestOK(unittest.TestCase):
@@ -450,6 +451,21 @@ class TestInvalid(unittest.TestCase):
"Does the original function define them? Did you supply them in the call?",
tests.error.wo_mandatory_location(str(type_error)))
+ def test_no_boolyness(self):
+ @icontract.require(lambda: tests.mock.NumpyArray([True, False]))
+ def some_func() -> None:
+ pass
+
+ value_error = None # type: Optional[ValueError]
+ try:
+ some_func()
+ except ValueError as err:
+ value_error = err
+
+ self.assertIsNotNone(value_error)
+ self.assertEqual('Failed to negate the evaluation of the condition.',
+ tests.error.wo_mandatory_location(str(value_error)))
+
if __name__ == '__main__':
unittest.main()
| catch exceptions in boolyness of check
This is very much related to #97
Consider this:
```
$ cat i.py
from icontract import require, ensure
import numpy as np
@require(lambda x: np.array([True, False]))
def frobulate(x: int) -> int:
return x
$ python -c "import i; i.frobulate(1)" Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/james/.conda/envs/time_gym35/lib/python3.5/site-packages/icontract/_checkers.py", line 297, in wrapper
_assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs)
File "/home/james/.conda/envs/time_gym35/lib/python3.5/site-packages/icontract/_checkers.py", line 91, in _assert_precondition
if not check:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
If you are doing numeric programming with icontract it's very likely at some point you will mess up and return a bool vector rather than a bool. At the moment, its impossible to know where you did this. If you have many requirements you have no decent way of debugging them. You might end up commenting them out one by one or moving them to plain old asserts in the function.
Its annoying that numpy arrays are not true or false but as such a large part of the ecosystem, perhaps we could catch the value error during contract evaluation...
i.e.
try:
falsiness = not check
except ValueError:
print('useful info on where the exception occurred')
raise from None
if falsisness:
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_invariant.py::TestInvalid::test_no_boolyness",
"tests/test_postcondition.py::TestInvalid::test_no_boolyness",
"tests/test_precondition.py::TestInvalid::test_no_boolyness"
] | [
"tests/test_invariant.py::TestOK::test_class_method",
"tests/test_invariant.py::TestOK::test_init",
"tests/test_invariant.py::TestOK::test_instance_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_private_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_protected_method",
"tests/test_invariant.py::TestOK::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestOK::test_magic_method",
"tests/test_invariant.py::TestOK::test_private_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_protected_method_may_violate_inv",
"tests/test_invariant.py::TestViolation::test_init",
"tests/test_invariant.py::TestViolation::test_inv_as_precondition",
"tests/test_invariant.py::TestViolation::test_inv_ok_but_post_violated",
"tests/test_invariant.py::TestViolation::test_inv_violated_after_pre",
"tests/test_invariant.py::TestViolation::test_inv_violated_but_post_ok",
"tests/test_invariant.py::TestViolation::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestViolation::test_magic_method",
"tests/test_invariant.py::TestViolation::test_method",
"tests/test_invariant.py::TestViolation::test_multiple_invs_first_violated",
"tests/test_invariant.py::TestViolation::test_multiple_invs_last_violated",
"tests/test_invariant.py::TestProperty::test_property_deleter",
"tests/test_invariant.py::TestProperty::test_property_getter",
"tests/test_invariant.py::TestProperty::test_property_setter",
"tests/test_invariant.py::TestError::test_as_function",
"tests/test_invariant.py::TestError::test_as_function_with_empty_args",
"tests/test_invariant.py::TestToggling::test_disabled",
"tests/test_invariant.py::TestInvalid::test_with_invalid_arguments",
"tests/test_postcondition.py::TestOK::test_with_condition_as_lambda",
"tests/test_postcondition.py::TestViolation::test_only_result",
"tests/test_postcondition.py::TestViolation::test_with_condition",
"tests/test_postcondition.py::TestViolation::test_with_default_values_outer",
"tests/test_postcondition.py::TestViolation::test_with_description",
"tests/test_postcondition.py::TestViolation::test_with_stacked_decorators",
"tests/test_postcondition.py::TestError::test_as_function",
"tests/test_postcondition.py::TestError::test_with_different_args_from_condition",
"tests/test_postcondition.py::TestError::test_with_empty_args",
"tests/test_postcondition.py::TestToggling::test_disabled",
"tests/test_postcondition.py::TestInClass::test_deleter",
"tests/test_postcondition.py::TestInClass::test_getter",
"tests/test_postcondition.py::TestInClass::test_postcondition_in_abstract_class_method",
"tests/test_postcondition.py::TestInClass::test_postcondition_in_abstract_static_method",
"tests/test_postcondition.py::TestInClass::test_postcondition_in_class_method",
"tests/test_postcondition.py::TestInClass::test_postcondition_in_static_method",
"tests/test_postcondition.py::TestInClass::test_setter",
"tests/test_postcondition.py::TestInvalid::test_conflicting_OLD_argument",
"tests/test_postcondition.py::TestInvalid::test_conflicting_result_argument",
"tests/test_postcondition.py::TestInvalid::test_error_with_invalid_arguments",
"tests/test_postcondition.py::TestInvalid::test_invalid_postcondition_arguments",
"tests/test_precondition.py::TestOK::test_that_it_works",
"tests/test_precondition.py::TestViolation::test_condition_as_function",
"tests/test_precondition.py::TestViolation::test_only_with_condition_arg",
"tests/test_precondition.py::TestViolation::test_with_default_values",
"tests/test_precondition.py::TestViolation::test_with_description",
"tests/test_precondition.py::TestViolation::test_with_multiple_comparators",
"tests/test_precondition.py::TestViolation::test_with_pathlib",
"tests/test_precondition.py::TestViolation::test_with_stacked_decorators",
"tests/test_precondition.py::TestError::test_as_function",
"tests/test_precondition.py::TestError::test_as_function_with_outer_scope",
"tests/test_precondition.py::TestError::test_with_different_args_from_condition",
"tests/test_precondition.py::TestError::test_with_empty_args",
"tests/test_precondition.py::TestToggling::test_enabled",
"tests/test_precondition.py::TestInClass::test_deleter",
"tests/test_precondition.py::TestInClass::test_getter",
"tests/test_precondition.py::TestInClass::test_instance_method",
"tests/test_precondition.py::TestInClass::test_setter",
"tests/test_precondition.py::TestInvalid::test_error_with_invalid_arguments",
"tests/test_precondition.py::TestInvalid::test_unexpected_precondition_arguments"
] | {
"failed_lite_validators": [
"has_issue_reference",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2019-05-11T06:28:25Z" | mit |
|
Parquery__icontract-137 | diff --git a/icontract/_checkers.py b/icontract/_checkers.py
index de92e3e..af1bceb 100644
--- a/icontract/_checkers.py
+++ b/icontract/_checkers.py
@@ -2,7 +2,8 @@
import contextlib
import functools
import inspect
-from typing import Callable, Any, Iterable, Optional, Tuple, List, Mapping, MutableMapping, Dict
+import threading
+from typing import cast, Callable, Any, Iterable, Optional, Tuple, List, Mapping, MutableMapping, Dict, Set
import icontract._represent
from icontract._globals import CallableT
@@ -299,6 +300,15 @@ class _Old:
return "a bunch of OLD values"
+_THREAD_LOCAL = threading.local()
+
+# This flag is used to avoid recursively checking contracts for the same function or instance while
+# contract checking is already in progress.
+#
+# The value refers to the id() of the function (preconditions and postconditions) or instance (invariants).
+_THREAD_LOCAL.in_progress = cast(Set[int], set())
+
+
def decorate_with_checker(func: CallableT) -> CallableT:
"""Decorate the function with a checker that verifies the preconditions and postconditions."""
# pylint: disable=too-many-statements
@@ -323,14 +333,9 @@ def decorate_with_checker(func: CallableT) -> CallableT:
if param.default != inspect.Parameter.empty:
kwdefaults[param.name] = param.default
- # This flag is used to avoid recursively checking contracts for the same function while contract checking is already
- # in progress.
- in_progress = False
-
def unset_checking_in_progress() -> None:
"""Mark that the checking of the contract is finished."""
- nonlocal in_progress
- in_progress = False
+ _THREAD_LOCAL.in_progress.discard(id(func))
def wrapper(*args, **kwargs): # type: ignore
"""Wrap func by checking the preconditions and postconditions."""
@@ -341,11 +346,10 @@ def decorate_with_checker(func: CallableT) -> CallableT:
# If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop
# by skipping any subsequent contract checks for the same function.
- nonlocal in_progress
- if in_progress: # pylint: disable=used-before-assignment
+ if id(func) in _THREAD_LOCAL.in_progress:
return func(*args, **kwargs)
- in_progress = True
+ _THREAD_LOCAL.in_progress.add(id(func))
preconditions = getattr(wrapper, "__preconditions__") # type: List[List[Contract]]
snapshots = getattr(wrapper, "__postcondition_snapshots__") # type: List[Snapshot]
@@ -457,14 +461,14 @@ def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
assert instance is not None, "Expected to find `self` in the parameters, but found none."
- setattr(instance, '__dbc_invariant_check_is_in_progress__', True)
+ _THREAD_LOCAL.in_progress.add(id(instance))
- def remove_in_progress_dunder() -> None:
- """Remove the dunder which signals that an invariant is already being checked down the call stack."""
- delattr(instance, '__dbc_invariant_check_is_in_progress__')
+ def remove_from_in_progress() -> None:
+ """Remove the flag which signals that an invariant is already being checked down the call stack."""
+ _THREAD_LOCAL.in_progress.discard(id(instance))
with contextlib.ExitStack() as exit_stack:
- exit_stack.callback(remove_in_progress_dunder) # pylint: disable=no-member
+ exit_stack.callback(remove_from_in_progress) # pylint: disable=no-member
for contract in instance.__class__.__invariants__:
_assert_invariant(contract=contract, instance=instance)
@@ -481,18 +485,18 @@ def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
assert instance is not None, "Expected to find `self` in the parameters, but found none."
- if not hasattr(instance, '__dbc_invariant_check_is_in_progress__'):
- setattr(instance, '__dbc_invariant_check_is_in_progress__', True)
+ if id(instance) not in _THREAD_LOCAL.in_progress:
+ _THREAD_LOCAL.in_progress.add(id(instance))
else:
# Do not check any invariants to avoid endless recursion.
return func(*args, **kwargs)
- def remove_in_progress_dunder() -> None:
- """Remove the dunder which signals that an invariant is already being checked down the call stack."""
- delattr(instance, '__dbc_invariant_check_is_in_progress__')
+ def remove_from_in_progress() -> None:
+ """Remove the flag which signals that an invariant is already being checked down the call stack."""
+ _THREAD_LOCAL.in_progress.discard(id(instance))
with contextlib.ExitStack() as exit_stack:
- exit_stack.callback(remove_in_progress_dunder) # pylint: disable=no-member
+ exit_stack.callback(remove_from_in_progress) # pylint: disable=no-member
instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
| Parquery/icontract | 0bf4b66fd1590fc18a73208b69a0a17b228dd79b | diff --git a/tests/test_invariant.py b/tests/test_invariant.py
index 583cd96..d45172c 100644
--- a/tests/test_invariant.py
+++ b/tests/test_invariant.py
@@ -146,6 +146,17 @@ class TestOK(unittest.TestCase):
_ = A()
+ def test_no_dict_pollution(self) -> None:
+ testSelf = self
+
+ @icontract.invariant(lambda self: self.mustHold())
+ class A:
+ def mustHold(self) -> bool:
+ testSelf.assertDictEqual({}, self.__dict__)
+ return True
+
+ _ = A()
+
class TestViolation(unittest.TestCase):
def test_init(self) -> None:
@@ -664,3 +675,7 @@ class TestInvalid(unittest.TestCase):
self.assertIsNotNone(value_error)
self.assertEqual('Failed to negate the evaluation of the condition.',
tests.error.wo_mandatory_location(str(value_error)))
+
+
+if __name__ == '__main__':
+ unittest.main()
| Class invariant changes __dict__ unexpectedly
Usually `__dict__` can be used to compare if two instances have equal attributes. Adding an class invariant falsifies a `__eq__` method which is based on this assumption:
```Python
from icontract import invariant
@invariant(lambda self: all(" " not in n for n in self.parts))
class ID:
def __init__(self, identifier: str) -> None:
self.parts = identifier.split(".")
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
print(self.__dict__)
print(other.__dict__)
return self.__dict__ == other.__dict__
return NotImplemented
print(ID("A") == ID("A"))
```
```Console
{'parts': ['A'], '__dbc_invariant_check_is_in_progress__': True}
{'parts': ['A']}
False
```
In the shown example it is of course quite simple to fix the issue. In my use case multiple classes are inherited from a base class containing such a `__eq__` method. Each inherited class contains different additional attributes. Adding a separate `__eq__` method for each class containing an invariant would be cumbersome. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_invariant.py::TestOK::test_no_dict_pollution"
] | [
"tests/test_invariant.py::TestOK::test_class_method",
"tests/test_invariant.py::TestOK::test_init",
"tests/test_invariant.py::TestOK::test_instance_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_private_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_protected_method",
"tests/test_invariant.py::TestOK::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestOK::test_magic_method",
"tests/test_invariant.py::TestOK::test_private_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_protected_method_may_violate_inv",
"tests/test_invariant.py::TestViolation::test_condition_as_function",
"tests/test_invariant.py::TestViolation::test_condition_as_function_with_default_argument_value",
"tests/test_invariant.py::TestViolation::test_init",
"tests/test_invariant.py::TestViolation::test_inv_as_precondition",
"tests/test_invariant.py::TestViolation::test_inv_ok_but_post_violated",
"tests/test_invariant.py::TestViolation::test_inv_violated_after_pre",
"tests/test_invariant.py::TestViolation::test_inv_violated_but_post_ok",
"tests/test_invariant.py::TestViolation::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestViolation::test_magic_method",
"tests/test_invariant.py::TestViolation::test_method",
"tests/test_invariant.py::TestViolation::test_multiple_invs_first_violated",
"tests/test_invariant.py::TestViolation::test_multiple_invs_last_violated",
"tests/test_invariant.py::TestProperty::test_property_deleter",
"tests/test_invariant.py::TestProperty::test_property_getter",
"tests/test_invariant.py::TestProperty::test_property_setter",
"tests/test_invariant.py::TestError::test_as_function",
"tests/test_invariant.py::TestError::test_as_function_with_empty_args",
"tests/test_invariant.py::TestError::test_as_type",
"tests/test_invariant.py::TestToggling::test_disabled",
"tests/test_invariant.py::TestInvalid::test_no_boolyness",
"tests/test_invariant.py::TestInvalid::test_with_invalid_arguments"
] | {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-05-06T16:14:40Z" | mit |
|
Parquery__icontract-151 | diff --git a/README.rst b/README.rst
index 12c3b36..62fdfc3 100644
--- a/README.rst
+++ b/README.rst
@@ -711,6 +711,19 @@ in progress and removed once the invariants checking finished. As long as the du
``__dbc_invariant_check_is_in_progress__`` is present, the wrappers that check invariants simply return the result of
the function.
+Invariant checks also need to be disabled during the construction since calling member functions would trigger invariant
+checks which, on their hand, might check on yet-to-be-defined instance attributes. See the following snippet:
+
+.. code-block:: python
+
+ @icontract.invariant(lambda self: self.some_attribute > 0)
+ class SomeClass(icontract.DBC):
+ def __init__(self) -> None:
+ self.some_attribute = self.some_func()
+
+ def some_func(self) -> int:
+ return 1984
+
Linter
------
We provide a linter that statically verifies the arguments of the contracts (*i.e.* that they are
@@ -797,9 +810,9 @@ Benchmarking invariant at __init__:
========================= ============ ============== =======================
Case Total time Time per run Relative time per run
========================= ============ ============== =======================
-`ClassWithIcontract` 1.37 s 1.37 ΞΌs 296%
-`ClassWithDpcontracts` 0.46 s 0.46 ΞΌs 100%
-`ClassWithInlineContract` 0.27 s 0.27 ΞΌs 59%
+`ClassWithIcontract` 1.43 s 1.43 ΞΌs 306%
+`ClassWithDpcontracts` 0.47 s 0.47 ΞΌs 100%
+`ClassWithInlineContract` 0.27 s 0.27 ΞΌs 57%
========================= ============ ============== =======================
Benchmarking invariant at a function:
@@ -807,9 +820,9 @@ Benchmarking invariant at a function:
========================= ============ ============== =======================
Case Total time Time per run Relative time per run
========================= ============ ============== =======================
-`ClassWithIcontract` 2.14 s 2.14 ΞΌs 452%
-`ClassWithDpcontracts` 0.47 s 0.47 ΞΌs 100%
-`ClassWithInlineContract` 0.25 s 0.25 ΞΌs 53%
+`ClassWithIcontract` 2.00 s 2.00 ΞΌs 445%
+`ClassWithDpcontracts` 0.45 s 0.45 ΞΌs 100%
+`ClassWithInlineContract` 0.23 s 0.23 ΞΌs 52%
========================= ============ ============== =======================
Benchmarking precondition:
@@ -817,9 +830,9 @@ Benchmarking precondition:
=============================== ============ ============== =======================
Case Total time Time per run Relative time per run
=============================== ============ ============== =======================
-`function_with_icontract` 0.02 s 2.41 ΞΌs 5%
-`function_with_dpcontracts` 0.53 s 53.20 ΞΌs 100%
-`function_with_inline_contract` 0.00 s 0.16 ΞΌs 0%
+`function_with_icontract` 0.02 s 2.38 ΞΌs 5%
+`function_with_dpcontracts` 0.51 s 50.89 ΞΌs 100%
+`function_with_inline_contract` 0.00 s 0.15 ΞΌs 0%
=============================== ============ ============== =======================
Benchmarking postcondition:
@@ -827,9 +840,9 @@ Benchmarking postcondition:
=============================== ============ ============== =======================
Case Total time Time per run Relative time per run
=============================== ============ ============== =======================
-`function_with_icontract` 0.03 s 2.51 ΞΌs 5%
-`function_with_dpcontracts` 0.52 s 52.42 ΞΌs 100%
-`function_with_inline_contract` 0.00 s 0.17 ΞΌs 0%
+`function_with_icontract` 0.02 s 2.48 ΞΌs 5%
+`function_with_dpcontracts` 0.51 s 50.93 ΞΌs 100%
+`function_with_inline_contract` 0.00 s 0.15 ΞΌs 0%
=============================== ============ ============== =======================
diff --git a/icontract/_checkers.py b/icontract/_checkers.py
index d499064..7882875 100644
--- a/icontract/_checkers.py
+++ b/icontract/_checkers.py
@@ -458,15 +458,17 @@ def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
def wrapper(*args, **kwargs): # type: ignore
"""Wrap __init__ method of a class by checking the invariants *after* the invocation."""
- result = func(*args, **kwargs)
instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
assert instance is not None, "Expected to find `self` in the parameters, but found none."
+ # We need to disable the invariants check during the constructor.
id_instance = str(id(instance))
setattr(_IN_PROGRESS, id_instance, True)
# ExitStack is not used here due to performance.
try:
+ result = func(*args, **kwargs)
+
for contract in instance.__class__.__invariants__:
_assert_invariant(contract=contract, instance=instance)
| Parquery/icontract | f99d7436e3ef1fee8b83f7dc6b2ea8500ebdfa68 | diff --git a/tests/test_recursion.py b/tests/test_recursion.py
index 3ca0eb5..db46ebf 100644
--- a/tests/test_recursion.py
+++ b/tests/test_recursion.py
@@ -227,6 +227,23 @@ class TestInvariant(unittest.TestCase):
some_instance.another_func()
self.assertListEqual(['some_func', 'another_func', 'some_func'], order)
+ def test_member_function_call_in_constructor(self) -> None:
+ order = [] # type: List[str]
+
+ @icontract.invariant(lambda self: self.some_attribute > 0) # pylint: disable=no-member
+ class SomeClass(icontract.DBC):
+ def __init__(self) -> None:
+ order.append('__init__ enters')
+ self.some_attribute = self.some_func()
+ order.append('__init__ exits')
+
+ def some_func(self) -> int:
+ order.append('some_func')
+ return 3
+
+ _ = SomeClass()
+ self.assertListEqual(['__init__ enters', 'some_func', '__init__ exits'], order)
+
if __name__ == '__main__':
unittest.main()
| AttributeError in contract caused by method call in constructor
When a method is called in the constructor, an `AttributeError` is raised in the contract. Here is a reproducer:
```Python
from icontract import invariant
@invariant(lambda self: all(" " not in part for part in self.parts))
class ID:
def __init__(self, identifier: str) -> None:
self.parts = identifier.split(self.separator())
def separator(self) -> str:
return "."
ID("A")
```
```Console
$ python test.py
Traceback (most recent call last):
File "test.py", line 14, in <module>
ID("A")
File "/home/tr/.local/lib/python3.8/site-packages/icontract/_checkers.py", line 461, in wrapper
result = func(*args, **kwargs)
File "test.py", line 7, in __init__
self.parts = identifier.split(self.separator)
File "/home/tr/.local/lib/python3.8/site-packages/icontract/_checkers.py", line 498, in wrapper
_assert_invariant(contract=contract, instance=instance)
File "/home/tr/.local/lib/python3.8/site-packages/icontract/_checkers.py", line 162, in _assert_invariant
check = contract.condition(self=instance)
File "test.py", line 4, in <lambda>
@invariant(lambda self: all(" " not in part for part in self.parts))
AttributeError: 'ID' object has no attribute 'parts'
```
The contract works correctly when `self.separator()` is replaced by a string literal.
### Versions
icontract 2.3.4
asttokens 2.0.4
Python 3.8.5 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_recursion.py::TestInvariant::test_member_function_call_in_constructor"
] | [
"tests/test_recursion.py::TestPrecondition::test_ok",
"tests/test_recursion.py::TestPrecondition::test_recover_after_exception",
"tests/test_recursion.py::TestPostcondition::test_ok",
"tests/test_recursion.py::TestPostcondition::test_recover_after_exception",
"tests/test_recursion.py::TestInvariant::test_ok",
"tests/test_recursion.py::TestInvariant::test_recover_after_exception"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-09-14T21:35:50Z" | mit |
|
Parquery__icontract-166 | diff --git a/.travis.yml b/.travis.yml
index 32624d8..7155fe1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,7 +3,7 @@ python:
- "3.5"
- "3.6"
- "3.7"
- - "3.8"
+ - "3.8.5"
install:
- pip3 install -e .[dev]
- pip3 install coveralls
diff --git a/icontract/_recompute.py b/icontract/_recompute.py
index cbe7709..00faac1 100644
--- a/icontract/_recompute.py
+++ b/icontract/_recompute.py
@@ -5,7 +5,8 @@ import builtins
import functools
import platform
import sys
-from typing import Any, Mapping, Dict, List, Optional, Union, Tuple, Set, Callable # pylint: disable=unused-import
+from typing import Any, Mapping, Dict, List, Optional, Union, Tuple, Set, Callable, \
+ cast # pylint: disable=unused-import
class Placeholder:
@@ -37,8 +38,11 @@ class Visitor(ast.NodeVisitor):
:param variable_lookup: list of lookup tables to look-up the values of the variables, sorted by precedence
"""
- # Resolve precedence of variable lookup
+ # _name_to_value maps the variable names to variable values.
+ # This is important for Load contexts as well as Store contexts in, e.g., named expressions.
self._name_to_value = dict() # type: Dict[str, Any]
+
+ # Resolve precedence of variable lookups
for lookup in variable_lookup:
for name, value in lookup.items():
if name not in self._name_to_value:
@@ -314,6 +318,24 @@ class Visitor(ast.NodeVisitor):
self.recomputed_values[node] = result
return result
+ if sys.version_info >= (3, 8):
+ # pylint: disable=no-member
+ def visit_NamedExpr(self, node: ast.NamedExpr) -> Any:
+ """Visit the node's ``value`` and assign it to both this node and the target."""
+ value = self.visit(node=node.value)
+ self.recomputed_values[node] = value
+
+ # This assignment is needed to make mypy happy.
+ target = cast(ast.Name, node.target)
+
+ if not isinstance(target.ctx, ast.Store):
+ raise NotImplementedError(
+ "Expected Store context in the target of a named expression, but got: {}".format(target.ctx))
+
+ self._name_to_value[target.id] = value
+
+ return value
+
def visit_Index(self, node: ast.Index) -> Any:
"""Visit the node's ``value``."""
result = self.visit(node=node.value)
diff --git a/icontract/_represent.py b/icontract/_represent.py
index 81a2082..934b052 100644
--- a/icontract/_represent.py
+++ b/icontract/_represent.py
@@ -3,9 +3,10 @@ import ast
import inspect
import re
import reprlib
+import sys
import textwrap
import uuid
-from typing import Any, Mapping, MutableMapping, Callable, List, Dict, Iterable # pylint: disable=unused-import
+from typing import Any, Mapping, MutableMapping, Callable, List, Dict, Iterable, cast # pylint: disable=unused-import
from typing import Optional, Tuple # pylint: disable=unused-import
import asttokens
@@ -88,6 +89,21 @@ class Visitor(ast.NodeVisitor):
self.generic_visit(node=node)
+ if sys.version_info >= (3, 8):
+ # pylint: disable=no-member
+ def visit_NamedExpr(self, node: ast.NamedExpr) -> Any:
+ """Represent the target with the value of the node."""
+ if node in self._recomputed_values:
+ value = self._recomputed_values[node]
+
+ # This is necessary in order to make mypy happy.
+ target = cast(ast.Name, node.target)
+
+ if _representable(value=value):
+ self.reprs[target.id] = value
+
+ self.generic_visit(node=node)
+
def visit_Call(self, node: ast.Call) -> None:
"""Represent the call by dumping its source code."""
if node in self._recomputed_values:
diff --git a/precommit.py b/precommit.py
index 1592666..926809e 100755
--- a/precommit.py
+++ b/precommit.py
@@ -24,6 +24,10 @@ def main() -> int:
print("YAPF'ing...")
yapf_targets = ["tests", "icontract", "setup.py", "precommit.py", "benchmark.py", "benchmarks", "tests_with_others"]
+
+ if sys.version_info >= (3, 8, 5):
+ yapf_targets.append('tests_3_8')
+
if overwrite:
subprocess.check_call(
["yapf", "--in-place", "--style=style.yapf", "--recursive"] + yapf_targets, cwd=str(repo_root))
@@ -32,10 +36,18 @@ def main() -> int:
["yapf", "--diff", "--style=style.yapf", "--recursive"] + yapf_targets, cwd=str(repo_root))
print("Mypy'ing...")
- subprocess.check_call(["mypy", "--strict", "icontract", "tests"], cwd=str(repo_root))
+ mypy_targets = ["icontract", "tests"]
+ if sys.version_info >= (3, 8):
+ mypy_targets.append('tests_3_8')
+
+ subprocess.check_call(["mypy", "--strict"] + mypy_targets, cwd=str(repo_root))
print("Pylint'ing...")
- subprocess.check_call(["pylint", "--rcfile=pylint.rc", "tests", "icontract"], cwd=str(repo_root))
+ pylint_targets = ['icontract', 'tests']
+
+ if sys.version_info >= (3, 8):
+ pylint_targets.append('tests_3_8')
+ subprocess.check_call(["pylint", "--rcfile=pylint.rc"] + pylint_targets, cwd=str(repo_root))
print("Pydocstyle'ing...")
subprocess.check_call(["pydocstyle", "icontract"], cwd=str(repo_root))
@@ -45,10 +57,14 @@ def main() -> int:
env['ICONTRACT_SLOW'] = 'true'
# yapf: disable
+ unittest_targets = ['tests']
+ if sys.version_info > (3, 8):
+ unittest_targets.append('tests_3_8')
+
subprocess.check_call(
["coverage", "run",
"--source", "icontract",
- "-m", "unittest", "discover", "tests"],
+ "-m", "unittest", "discover"] + unittest_targets,
cwd=str(repo_root),
env=env)
# yapf: enable
| Parquery/icontract | 9e8451b28b9cb5bdd02c1ae0b194af58ea80854d | diff --git a/tests_3_8/__init__.py b/tests_3_8/__init__.py
new file mode 100644
index 0000000..774c18e
--- /dev/null
+++ b/tests_3_8/__init__.py
@@ -0,0 +1,7 @@
+"""
+Test Python 3.8-specific features.
+
+For example, one such feature is walrus operator used in named expressions.
+We have to exclude these tests running on prior versions of Python since the syntax would be considered
+invalid.
+"""
diff --git a/tests_3_8/test_represent.py b/tests_3_8/test_represent.py
new file mode 100644
index 0000000..2aa9d99
--- /dev/null
+++ b/tests_3_8/test_represent.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+# pylint: disable=missing-docstring,invalid-name,too-many-public-methods,no-self-use
+# pylint: disable=unused-argument
+
+import textwrap
+import unittest
+from typing import Optional # pylint: disable=unused-import
+
+import icontract._represent
+import tests.error
+import tests.mock
+
+
+class TestReprValues(unittest.TestCase):
+ def test_named_expression(self) -> None:
+ @icontract.require(lambda x: (t := x + 1) and t > 1) # pylint: disable=undefined-variable
+ def func(x: int) -> int:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=0)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ textwrap.dedent('''\
+ (t := x + 1) and t > 1:
+ t was 1
+ x was 0'''), tests.error.wo_mandatory_location(str(violation_err)))
+
+
+if __name__ == '__main__':
+ unittest.main()
| Violating contract with assignment expression produces NotImplementedError
An assignment expression in a lambda expression in a contract produces a `NotImplementedError` from `incontract._recompute` when a call violates the contract. A call that conforms to the contract does not produce the error. Using Python 3.8.5 and icontract 2.3.7.
Example file `test_foo.py`:
``` python
from icontract import ViolationError, require
from pytest import raises
@require(lambda x: (t := x + 1) and t > 1)
def include(x: int) -> int:
return x
def test_include_a():
with raises(ViolationError):
include(0)
def test_include_b():
assert include(1) == 1
```
``` bash
$ pytest test_foo.py
============================= test session starts ==============================
platform linux -- Python 3.8.5, pytest-6.1.2, py-1.9.0, pluggy-0.13.1
rootdir: /tmp/foo
collected 2 items
test_foo.py F. [100%]
=================================== FAILURES ===================================
________________________________ test_include_a ________________________________
def test_include_a():
with raises(ViolationError):
> include(0)
test_foo.py:10:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test-env/lib/python3.8/site-packages/icontract/_checkers.py:373: in wrapper
_assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs)
test-env/lib/python3.8/site-packages/icontract/_checkers.py:152: in _assert_precondition
msg = icontract._represent.generate_message(contract=contract, condition_kwargs=condition_kwargs)
test-env/lib/python3.8/site-packages/icontract/_represent.py:387: in generate_message
repr_vals = repr_values(
test-env/lib/python3.8/site-packages/icontract/_represent.py:334: in repr_values
recompute_visitor.visit(node=lambda_inspection.node.body)
/nix/store/z65l1jqvxa58zzwwa3bvglb6asj4y8cv-python3-3.8.5/lib/python3.8/ast.py:363: in visit
return visitor(node)
test-env/lib/python3.8/site-packages/icontract/_recompute.py:213: in visit_BoolOp
values = [self.visit(value_node) for value_node in node.values]
test-env/lib/python3.8/site-packages/icontract/_recompute.py:213: in <listcomp>
values = [self.visit(value_node) for value_node in node.values]
/nix/store/z65l1jqvxa58zzwwa3bvglb6asj4y8cv-python3-3.8.5/lib/python3.8/ast.py:363: in visit
return visitor(node)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <icontract._recompute.Visitor object at 0x7fb534f9ea00>
node = <_ast.NamedExpr object at 0x7fb534f9e250>
def generic_visit(self, node: ast.AST) -> None:
"""Raise an exception that this node has not been handled."""
> raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
E NotImplementedError: Unhandled recomputation of the node: <class '_ast.NamedExpr'> <_ast.NamedExpr object at 0x7fb534f9e250>
test-env/lib/python3.8/site-packages/icontract/_recompute.py:449: NotImplementedError
=========================== short test summary info ============================
FAILED test_foo.py::test_include_a - NotImplementedError: Unhandled recomputa...
========================= 1 failed, 1 passed in 0.31s ==========================
```
Using a named function instead of a lambda in the contract avoids the error.
Example file `test_bar.py`:
``` python
from icontract import ViolationError, require
from pytest import raises
def want(x: int):
t = x + 1
return t > 1
@require(want)
def include2(x: int) -> int:
return x
def test_include2_a():
with raises(ViolationError):
include2(0)
def test_include2_b():
assert include2(1) == 1
```
``` bash
$ pytest test_bar.py
============================= test session starts ==============================
platform linux -- Python 3.8.5, pytest-6.1.2, py-1.9.0, pluggy-0.13.1
rootdir: /tmp/foo
collected 2 items
test_bar.py .. [100%]
============================== 2 passed in 0.04s ===============================
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests_3_8/test_represent.py::TestReprValues::test_named_expression"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-09T20:27:33Z" | mit |
|
Parquery__icontract-168 | diff --git a/README.rst b/README.rst
index 80615a0..927040a 100644
--- a/README.rst
+++ b/README.rst
@@ -164,7 +164,8 @@ We consider the following methods to be "public":
* All methods not prefixed with ``_``
* All magic methods (prefix ``__`` and suffix ``__``)
-Class methods can not observe the invariant since they are not associated with an instance of the class.
+Class methods (marked with ``@classmethod`` or special dunders such as ``__new__``) can not observe the invariant
+since they are not associated with an instance of the class.
We exempt ``__getattribute__``, ``__setattr__`` and ``__delattr__`` methods from observing the invariant since
these functions alter the state of the instance and thus can not be considered "public".
diff --git a/icontract/_checkers.py b/icontract/_checkers.py
index 7882875..83fa17b 100644
--- a/icontract/_checkers.py
+++ b/icontract/_checkers.py
@@ -429,13 +429,16 @@ def decorate_with_checker(func: CallableT) -> CallableT:
def _find_self(param_names: List[str], args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
"""Find the instance of ``self`` in the arguments."""
- instance_i = param_names.index("self")
- if instance_i < len(args):
- instance = args[instance_i]
- else:
- instance = kwargs["self"]
+ instance_i = None
+ try:
+ instance_i = param_names.index("self")
+ except ValueError:
+ pass
- return instance
+ if instance_i is not None:
+ return args[instance_i]
+
+ return kwargs["self"]
def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
@@ -458,8 +461,12 @@ def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
def wrapper(*args, **kwargs): # type: ignore
"""Wrap __init__ method of a class by checking the invariants *after* the invocation."""
- instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
- assert instance is not None, "Expected to find `self` in the parameters, but found none."
+ try:
+ instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
+ except KeyError as err:
+ raise KeyError(("The parameter 'self' could not be found in the call to function {!r}: "
+ "the param names were {!r}, the args were {!r} and kwargs were {!r}").format(
+ func, param_names, args, kwargs)) from err
# We need to disable the invariants check during the constructor.
id_instance = str(id(instance))
@@ -481,12 +488,15 @@ def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
def wrapper(*args, **kwargs): # type: ignore
"""Wrap a function of a class by checking the invariants *before* and *after* the invocation."""
- #
+ try:
+ instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
+ except KeyError as err:
+ raise KeyError(("The parameter 'self' could not be found in the call to function {!r}: "
+ "the param names were {!r}, the args were {!r} and kwargs were {!r}").format(
+ func, param_names, args, kwargs)) from err
+
# The following dunder indicates whether another invariant is currently being checked. If so,
# we need to suspend any further invariant check to avoid endless recursion.
- instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
- assert instance is not None, "Expected to find `self` in the parameters, but found none."
-
id_instance = str(id(instance))
if not hasattr(_IN_PROGRESS, id_instance):
setattr(_IN_PROGRESS, id_instance, True)
@@ -543,10 +553,11 @@ def add_invariant_checks(cls: type) -> None:
# Filter out entries in the directory which are certainly not candidates for decoration.
for name, value in [(name, getattr(cls, name)) for name in dir(cls)]:
+ # __new__ is a special class method (though not marked properly with @classmethod!).
# We need to ignore __repr__ to prevent endless loops when generating error messages.
# __getattribute__, __setattr__ and __delattr__ are too invasive and alter the state of the instance.
# Hence we don't consider them "public".
- if name in ["__repr__", "__getattribute__", "__setattr__", "__delattr__"]:
+ if name in ["__new__", "__repr__", "__getattribute__", "__setattr__", "__delattr__"]:
continue
if name == "__init__":
| Parquery/icontract | cb64cef40abccd3b212ca14987244e71782ee146 | diff --git a/tests/test_invariant.py b/tests/test_invariant.py
index d45172c..e8e7a67 100644
--- a/tests/test_invariant.py
+++ b/tests/test_invariant.py
@@ -5,7 +5,7 @@
import time
import unittest
-from typing import Optional # pylint: disable=unused-import
+from typing import Dict, Iterator, Mapping, Optional, Any # pylint: disable=unused-import
import icontract
import tests.error
@@ -157,6 +157,63 @@ class TestOK(unittest.TestCase):
_ = A()
+ def test_new_exempted(self) -> None:
+ # This test is related to the issue #167.
+ new_call_counter = 0
+ init_call_counter = 0
+
+ @icontract.invariant(lambda self: True)
+ class Foo:
+ def __new__(cls, *args, **kwargs) -> 'Foo': # type: ignore
+ nonlocal new_call_counter
+ new_call_counter += 1
+ return super(Foo, cls).__new__(cls) # type: ignore
+
+ def __init__(self) -> None:
+ nonlocal init_call_counter
+ init_call_counter += 1
+
+ _ = Foo()
+ self.assertEqual(1, new_call_counter)
+ self.assertEqual(1, init_call_counter)
+
+ def test_subclass_of_generic_mapping(self) -> None:
+ # This test is related to the issue #167.
+ counter = 0
+
+ def increase_counter(self: Any) -> bool:
+ nonlocal counter
+ counter += 1
+ return True
+
+ @icontract.invariant(increase_counter)
+ class Foo(Mapping[str, int]):
+ def __init__(self, table: Dict[str, int]) -> None:
+ self._table = table
+
+ def __getitem__(self, key: str) -> int:
+ return self._table[key]
+
+ def __iter__(self) -> Iterator[str]:
+ return iter(self._table)
+
+ def __len__(self) -> int:
+ return len(self._table)
+
+ def __str__(self) -> str:
+ return '{}({})'.format(self.__class__.__name__, self._table)
+
+ f = Foo({'a': 1}) # test the constructor
+ _ = f['a'] # test __getitem__
+ _ = iter(f) # test __iter__
+ _ = len(f) # test __len__
+ _ = str(f) # test __str__
+
+ # 1 invariant check after the constructor +
+ # 4 checks before the methods +
+ # 4 checks after the methods.
+ self.assertEqual(9, counter)
+
class TestViolation(unittest.TestCase):
def test_init(self) -> None:
| Invariant on subclass of generic Mapping produces ValueError
An `invariant` decorator on a subclass of a generic `typing.Mapping` class produces a `ValueError`. Using Python 3.8.5 and icontract 2.3.7.
Example file `foo.py`:
``` python
from icontract import invariant
from typing import Dict, Iterator, Mapping
@invariant(lambda self: True)
class Foo(Mapping[str, int]):
def __init__(self, table: Dict[str, int]) -> None:
self._table = table
def __getitem__(self, key: str) -> int:
return self._table[key]
def __iter__(self) -> Iterator[str]:
return iter(self._table)
def __len__(self) -> int:
return len(self._table)
def __str__(self) -> str:
return f'{self.__class__.__name__}({self._table})'
print(Foo({'a': 1}))
```
```
$ python foo.py
Traceback (most recent call last):
File "foo.py", line 21, in <module>
print(Foo({'a': 1}))
File "/tmp/foo/test-env/lib/python3.8/site-packages/icontract/_checkers.py", line 487, in wrapper
instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
File "/tmp/foo/test-env/lib/python3.8/site-packages/icontract/_checkers.py", line 432, in _find_self
instance_i = param_names.index("self")
ValueError: 'self' is not in list
```
Disabling asssertions removes the error:
```
$ python -O foo.py
Foo({'a': 1})
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_invariant.py::TestOK::test_new_exempted"
] | [
"tests/test_invariant.py::TestOK::test_class_method",
"tests/test_invariant.py::TestOK::test_init",
"tests/test_invariant.py::TestOK::test_instance_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_private_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_protected_method",
"tests/test_invariant.py::TestOK::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestOK::test_magic_method",
"tests/test_invariant.py::TestOK::test_no_dict_pollution",
"tests/test_invariant.py::TestOK::test_private_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_protected_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_subclass_of_generic_mapping",
"tests/test_invariant.py::TestViolation::test_condition_as_function",
"tests/test_invariant.py::TestViolation::test_condition_as_function_with_default_argument_value",
"tests/test_invariant.py::TestViolation::test_init",
"tests/test_invariant.py::TestViolation::test_inv_as_precondition",
"tests/test_invariant.py::TestViolation::test_inv_ok_but_post_violated",
"tests/test_invariant.py::TestViolation::test_inv_violated_after_pre",
"tests/test_invariant.py::TestViolation::test_inv_violated_but_post_ok",
"tests/test_invariant.py::TestViolation::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestViolation::test_magic_method",
"tests/test_invariant.py::TestViolation::test_method",
"tests/test_invariant.py::TestViolation::test_multiple_invs_first_violated",
"tests/test_invariant.py::TestViolation::test_multiple_invs_last_violated",
"tests/test_invariant.py::TestProperty::test_property_deleter",
"tests/test_invariant.py::TestProperty::test_property_getter",
"tests/test_invariant.py::TestProperty::test_property_setter",
"tests/test_invariant.py::TestError::test_as_function",
"tests/test_invariant.py::TestError::test_as_function_with_empty_args",
"tests/test_invariant.py::TestError::test_as_type",
"tests/test_invariant.py::TestToggling::test_disabled",
"tests/test_invariant.py::TestInvalid::test_no_boolyness",
"tests/test_invariant.py::TestInvalid::test_with_invalid_arguments"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-16T15:45:25Z" | mit |
|
Parquery__icontract-170 | diff --git a/icontract/_recompute.py b/icontract/_recompute.py
index 00faac1..0427994 100644
--- a/icontract/_recompute.py
+++ b/icontract/_recompute.py
@@ -88,6 +88,44 @@ class Visitor(ast.NodeVisitor):
self.recomputed_values[node] = node.value
return node.value
+ if sys.version_info >= (3, 6):
+
+ def visit_FormattedValue(self, node: ast.FormattedValue) -> Any:
+ """Format the node value."""
+ fmt = ['{']
+ # See https://docs.python.org/3/library/ast.html#ast.FormattedValue for these
+ # constants
+ if node.conversion == -1:
+ pass
+ elif node.conversion == 115:
+ fmt.append('!s')
+ elif node.conversion == 114:
+ fmt.append('!r')
+ elif node.conversion == 97:
+ fmt.append('!a')
+ else:
+ raise NotImplementedError("Unhandled conversion of a formatted value node {!r}: {}".format(
+ node, node.conversion))
+
+ if node.format_spec is not None:
+ fmt.append(":")
+
+ # The following assert serves only documentation purposes so that the code is easier to follow.
+ assert isinstance(node.format_spec, ast.JoinedStr)
+ fmt.append(self.visit(node.format_spec))
+
+ fmt.append('}')
+
+ recomputed_value = self.visit(node.value)
+ return ''.join(fmt).format(recomputed_value)
+
+ def visit_JoinedStr(self, node: ast.JoinedStr) -> Any:
+ """Visit the values and concatenate them."""
+ joined_str = ''.join(self.visit(value_node) for value_node in node.values)
+
+ self.recomputed_values[node] = joined_str
+ return joined_str
+
# pylint: enable=no-member
def visit_List(self, node: ast.List) -> List[Any]:
diff --git a/icontract/_represent.py b/icontract/_represent.py
index 934b052..f3fb8ee 100644
--- a/icontract/_represent.py
+++ b/icontract/_represent.py
@@ -55,6 +55,17 @@ class Visitor(ast.NodeVisitor):
self.reprs = dict() # type: MutableMapping[str, str]
self._atok = atok
+ if sys.version_info >= (3, 6):
+ # pylint: disable=no-member
+ def visit_JoinedStr(self, node: ast.JoinedStr) -> None:
+ """Show the whole joined strings without descending into the values."""
+ if node in self._recomputed_values:
+ value = self._recomputed_values[node]
+
+ if _representable(value=value):
+ text = self._atok.get_text(node)
+ self.reprs[text] = value
+
def visit_Name(self, node: ast.Name) -> None:
"""
Resolve the name from the variable look-up and the built-ins.
diff --git a/precommit.py b/precommit.py
index 926809e..2e9fc99 100755
--- a/precommit.py
+++ b/precommit.py
@@ -57,14 +57,10 @@ def main() -> int:
env['ICONTRACT_SLOW'] = 'true'
# yapf: disable
- unittest_targets = ['tests']
- if sys.version_info > (3, 8):
- unittest_targets.append('tests_3_8')
-
subprocess.check_call(
["coverage", "run",
"--source", "icontract",
- "-m", "unittest", "discover"] + unittest_targets,
+ "-m", "unittest", "discover"],
cwd=str(repo_root),
env=env)
# yapf: enable
| Parquery/icontract | 38968d2d4b79f44522417f40e9d8cfd6b5762870 | diff --git a/tests_3_6/__init__.py b/tests_3_6/__init__.py
new file mode 100644
index 0000000..c706c20
--- /dev/null
+++ b/tests_3_6/__init__.py
@@ -0,0 +1,12 @@
+"""
+Test Python 3.6-specific features.
+
+For example, one such feature is literal string interpolation.
+"""
+
+import sys
+
+if sys.version_info < (3, 6):
+ def load_tests(loader, suite, pattern): # pylint: disable=unused-argument
+ """Ignore all the tests for lower Python versions."""
+ return suite
diff --git a/tests_3_6/test_represent.py b/tests_3_6/test_represent.py
new file mode 100644
index 0000000..65e5c4c
--- /dev/null
+++ b/tests_3_6/test_represent.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+# pylint: disable=missing-docstring,invalid-name,too-many-public-methods,no-self-use
+# pylint: disable=unused-argument
+
+import textwrap
+import unittest
+import math
+from typing import Optional # pylint: disable=unused-import
+
+import icontract._represent
+import tests.error
+import tests.mock
+
+
+class TestLiteralStringInterpolation(unittest.TestCase):
+ def test_plain_string(self) -> None:
+ @icontract.require(lambda x: f"something" == '')
+ def func(x: float) -> float:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=0)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ 'f"something" == \'\': f"something" was \'something\'',
+ tests.error.wo_mandatory_location(str(violation_err)))
+
+ def test_simple_interpolation(self) -> None:
+ @icontract.require(lambda x: f"{x}" == '')
+ def func(x: float) -> float:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=0)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ 'f"{x}" == \'\': f"{x}" was \'0\'',
+ tests.error.wo_mandatory_location(str(violation_err)))
+
+ def test_string_formatting(self) -> None:
+ @icontract.require(lambda x: f"{x!s}" == '')
+ def func(x: float) -> float:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=1.984)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ 'f"{x!s}" == \'\': f"{x!s}" was \'1.984\'',
+ tests.error.wo_mandatory_location(str(violation_err)))
+
+ def test_repr_formatting(self) -> None:
+ @icontract.require(lambda x: f"{x!r}" == '')
+ def func(x: float) -> float:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=1.984)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ 'f"{x!r}" == \'\': f"{x!r}" was \'1.984\'',
+ tests.error.wo_mandatory_location(str(violation_err)))
+
+ def test_ascii_formatting(self) -> None:
+ @icontract.require(lambda x: f"{x!a}" == '')
+ def func(x: float) -> float:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=1.984)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ 'f"{x!a}" == \'\': f"{x!a}" was \'1.984\'',
+ tests.error.wo_mandatory_location(str(violation_err)))
+
+ def test_format_spec(self) -> None:
+ @icontract.require(lambda x: f"{x:.3}" == '')
+ def func(x: float) -> float:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=1.984)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ 'f"{x:.3}" == \'\': f"{x:.3}" was \'1.98\'',
+ tests.error.wo_mandatory_location(str(violation_err)))
+
+ def test_conversion_and_format_spec(self) -> None:
+ @icontract.require(lambda x: f"{x!r:.3}" == '')
+ def func(x: float) -> float:
+ return x
+
+ violation_err = None # type: Optional[icontract.ViolationError]
+ try:
+ func(x=1.984)
+ except icontract.ViolationError as err:
+ violation_err = err
+
+ self.assertIsNotNone(violation_err)
+ self.assertEqual(
+ 'f"{x!r:.3}" == \'\': f"{x!r:.3}" was \'1.9\'',
+ tests.error.wo_mandatory_location(str(violation_err)))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests_3_8/__init__.py b/tests_3_8/__init__.py
index 774c18e..040310e 100644
--- a/tests_3_8/__init__.py
+++ b/tests_3_8/__init__.py
@@ -5,3 +5,11 @@ For example, one such feature is walrus operator used in named expressions.
We have to exclude these tests running on prior versions of Python since the syntax would be considered
invalid.
"""
+
+import sys
+
+if sys.version_info < (3, 8):
+
+ def load_tests(loader, suite, pattern): # pylint: disable=unused-argument
+ """Ignore all the tests for lower Python versions."""
+ return suite
| Violating contract with f-string produces NotImplementedError
An f-string in a lambda expression in a contract produces a `NotImplementedError` from `incontract._recompute` when a call violates the contract. A call that conforms to the contract does not produce the error. Using Python 3.8.5 and icontract 2.3.7.
Example file `test_foo.py`:
``` python
from icontract import ViolationError, require
from pytest import raises
@require(lambda text: text != f'{text}{text}')
def include(text: str) -> str:
return text
def test_include_a() -> None:
with raises(ViolationError):
include('')
def test_include_b() -> None:
assert include('foo') == 'foo'
```
```
$ pytest test_foo.py
============================= test session starts ==============================
platform linux -- Python 3.8.5, pytest-6.1.2, py-1.9.0, pluggy-0.13.1
rootdir: /tmp/foo
collected 2 items
test_foo.py F. [100%]
=================================== FAILURES ===================================
________________________________ test_include_a ________________________________
def test_include_a() -> None:
with raises(ViolationError):
> include('')
test_foo.py:10:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test-env/lib/python3.8/site-packages/icontract/_checkers.py:373: in wrapper
_assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs)
test-env/lib/python3.8/site-packages/icontract/_checkers.py:152: in _assert_precondition
msg = icontract._represent.generate_message(contract=contract, condition_kwargs=condition_kwargs)
test-env/lib/python3.8/site-packages/icontract/_represent.py:403: in generate_message
repr_vals = repr_values(
test-env/lib/python3.8/site-packages/icontract/_represent.py:350: in repr_values
recompute_visitor.visit(node=lambda_inspection.node.body)
/nix/store/z65l1jqvxa58zzwwa3bvglb6asj4y8cv-python3-3.8.5/lib/python3.8/ast.py:363: in visit
return visitor(node)
test-env/lib/python3.8/site-packages/icontract/_recompute.py:234: in visit_Compare
comparators = [self.visit(node=comparator) for comparator in node.comparators]
test-env/lib/python3.8/site-packages/icontract/_recompute.py:234: in <listcomp>
comparators = [self.visit(node=comparator) for comparator in node.comparators]
/nix/store/z65l1jqvxa58zzwwa3bvglb6asj4y8cv-python3-3.8.5/lib/python3.8/ast.py:363: in visit
return visitor(node)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <icontract._recompute.Visitor object at 0x7f1794bf39a0>
node = <_ast.JoinedStr object at 0x7f1794bf3280>
def generic_visit(self, node: ast.AST) -> None:
"""Raise an exception that this node has not been handled."""
> raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
E NotImplementedError: Unhandled recomputation of the node: <class '_ast.JoinedStr'> <_ast.JoinedStr object at 0x7f1794bf3280>
test-env/lib/python3.8/site-packages/icontract/_recompute.py:471: NotImplementedError
=========================== short test summary info ============================
FAILED test_foo.py::test_include_a - NotImplementedError: Unhandled recomputa...
========================= 1 failed, 1 passed in 0.31s ==========================
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_ascii_formatting",
"tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_conversion_and_format_spec",
"tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_format_spec",
"tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_plain_string",
"tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_repr_formatting",
"tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_simple_interpolation",
"tests_3_6/test_represent.py::TestLiteralStringInterpolation::test_string_formatting"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-19T11:22:22Z" | mit |
|
Parquery__icontract-172 | diff --git a/icontract/_checkers.py b/icontract/_checkers.py
index 83fa17b..90d4a44 100644
--- a/icontract/_checkers.py
+++ b/icontract/_checkers.py
@@ -441,6 +441,32 @@ def _find_self(param_names: List[str], args: Tuple[Any, ...], kwargs: Dict[str,
return kwargs["self"]
+def _decorate_new_with_invariants(new_func: CallableT) -> CallableT:
+ """
+ Decorate the ``__new__`` of a class s.t. the invariants are checked on the result.
+
+ This is necessary for optimized classes such as ``namedtuple`` which use ``object.__init__``
+ as constructor and do not expect a wrapping around the constructor.
+ """
+ if _already_decorated_with_invariants(func=new_func):
+ return new_func
+
+ def wrapper(*args, **kwargs): # type: ignore
+ """Pass the arguments to __new__ and check invariants on the result."""
+ instance = new_func(*args, **kwargs)
+
+ for contract in instance.__class__.__invariants__:
+ _assert_invariant(contract=contract, instance=instance)
+
+ return instance
+
+ functools.update_wrapper(wrapper=wrapper, wrapped=new_func)
+
+ setattr(wrapper, "__is_invariant_check__", True)
+
+ return wrapper # type: ignore
+
+
def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
"""
Decorate the function ``func`` of the class ``cls`` with invariant checks.
@@ -546,6 +572,8 @@ def _already_decorated_with_invariants(func: CallableT) -> bool:
def add_invariant_checks(cls: type) -> None:
"""Decorate each of the class functions with invariant checks if not already decorated."""
+ # pylint: disable=too-many-branches
+
# Candidates for the decoration as list of (name, dir() value)
init_name_func = None # type: Optional[Tuple[str, Callable[..., None]]]
names_funcs = [] # type: List[Tuple[str, Callable[..., None]]]
@@ -590,8 +618,16 @@ def add_invariant_checks(cls: type) -> None:
if init_name_func:
name, func = init_name_func
- wrapper = _decorate_with_invariants(func=func, is_init=True)
- setattr(cls, name, wrapper)
+
+ # We have to distinguish this special case which is used by named
+ # tuples and possibly other optimized data structures.
+ # In those cases, we have to wrap __new__ instead of __init__.
+ if func == object.__init__ and hasattr(cls, "__new__"):
+ new_func = getattr(cls, "__new__")
+ setattr(cls, "__new__", _decorate_new_with_invariants(new_func))
+ else:
+ wrapper = _decorate_with_invariants(func=func, is_init=True)
+ setattr(cls, name, wrapper)
for name, func in names_funcs:
wrapper = _decorate_with_invariants(func=func, is_init=False)
| Parquery/icontract | e23b0df674b18842f38a1da4e1835c9217c7c84a | diff --git a/tests/test_invariant.py b/tests/test_invariant.py
index e8e7a67..d47cf44 100644
--- a/tests/test_invariant.py
+++ b/tests/test_invariant.py
@@ -5,7 +5,7 @@
import time
import unittest
-from typing import Dict, Iterator, Mapping, Optional, Any # pylint: disable=unused-import
+from typing import Dict, Iterator, Mapping, Optional, Any, NamedTuple # pylint: disable=unused-import
import icontract
import tests.error
diff --git a/tests_3_8/test_invariant.py b/tests_3_8/test_invariant.py
new file mode 100644
index 0000000..ff2929a
--- /dev/null
+++ b/tests_3_8/test_invariant.py
@@ -0,0 +1,57 @@
+# pylint: disable=missing-docstring
+# pylint: disable=invalid-name
+# pylint: disable=unused-argument
+# pylint: disable=no-member
+import textwrap
+import unittest
+from typing import NamedTuple, Optional # pylint: disable=unused-import
+
+import icontract
+import tests.error
+
+
+class TestOK(unittest.TestCase):
+ def test_on_named_tuple(self) -> None:
+ # This test is related to the issue #171.
+ #
+ # The test could not be executed under Python 3.6 as the ``inspect`` module
+ # could not figure out the type of getters.
+ @icontract.invariant(lambda self: self.first > 0)
+ class RightHalfPlanePoint(NamedTuple):
+ first: int
+ second: int
+
+ _ = RightHalfPlanePoint(1, 0)
+
+ self.assertEqual('Create new instance of RightHalfPlanePoint(first, second)',
+ RightHalfPlanePoint.__new__.__doc__)
+
+
+class TestViolation(unittest.TestCase):
+ def test_on_named_tuple(self) -> None:
+ # This test is related to the issue #171.
+ #
+ # The test could not be executed under Python 3.6 as the ``inspect`` module
+ # could not figure out the type of getters.
+ @icontract.invariant(lambda self: self.second > 0)
+ @icontract.invariant(lambda self: self.first > 0)
+ class RightHalfPlanePoint(NamedTuple):
+ first: int
+ second: int
+
+ violation_error = None # type: Optional[icontract.ViolationError]
+ try:
+ _ = RightHalfPlanePoint(1, -1)
+ except icontract.ViolationError as err:
+ violation_error = err
+
+ self.assertIsNotNone(violation_error)
+ self.assertEqual(
+ textwrap.dedent('''\
+ self.second > 0:
+ self was RightHalfPlanePoint(first=1, second=-1)
+ self.second was -1'''), tests.error.wo_mandatory_location(str(violation_error)))
+
+
+if __name__ == '__main__':
+ unittest.main()
| Invariant on subclass of NamedTuple produces TypeError
An `invariant` decorator on a subclass of `typing.NamedTuple` produces a `TypeError`. Using Python 3.8.5 and icontract 2.3.7.
Example file `foo.py`:
``` python
from typing import NamedTuple
from icontract import invariant
@invariant(lambda self: self.first > 0)
class RightHalfPlanePoint(NamedTuple):
first: int
second: int
print(RightHalfPlanePoint(1, 0))
```
```
$ python foo.py
Traceback (most recent call last):
File "foo.py", line 9, in <module>
print(RightHalfPlanePoint(1, 0))
File "/tmp/foo/test-env/lib/python3.8/site-packages/icontract/_checkers.py", line 477, in wrapper
result = func(*args, **kwargs)
TypeError: object.__init__() takes exactly one argument (the instance to initialize)
```
Disabling asssertions removes the error:
```
$ python -O foo.py
RightHalfPlanePoint(first=1, second=0)
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests_3_8/test_invariant.py::TestOK::test_on_named_tuple",
"tests_3_8/test_invariant.py::TestViolation::test_on_named_tuple"
] | [
"tests/test_invariant.py::TestOK::test_class_method",
"tests/test_invariant.py::TestOK::test_init",
"tests/test_invariant.py::TestOK::test_instance_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_private_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_protected_method",
"tests/test_invariant.py::TestOK::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestOK::test_magic_method",
"tests/test_invariant.py::TestOK::test_new_exempted",
"tests/test_invariant.py::TestOK::test_no_dict_pollution",
"tests/test_invariant.py::TestOK::test_private_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_protected_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_subclass_of_generic_mapping",
"tests/test_invariant.py::TestViolation::test_condition_as_function",
"tests/test_invariant.py::TestViolation::test_condition_as_function_with_default_argument_value",
"tests/test_invariant.py::TestViolation::test_init",
"tests/test_invariant.py::TestViolation::test_inv_as_precondition",
"tests/test_invariant.py::TestViolation::test_inv_ok_but_post_violated",
"tests/test_invariant.py::TestViolation::test_inv_violated_after_pre",
"tests/test_invariant.py::TestViolation::test_inv_violated_but_post_ok",
"tests/test_invariant.py::TestViolation::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestViolation::test_magic_method",
"tests/test_invariant.py::TestViolation::test_method",
"tests/test_invariant.py::TestViolation::test_multiple_invs_first_violated",
"tests/test_invariant.py::TestViolation::test_multiple_invs_last_violated",
"tests/test_invariant.py::TestProperty::test_property_deleter",
"tests/test_invariant.py::TestProperty::test_property_getter",
"tests/test_invariant.py::TestProperty::test_property_setter",
"tests/test_invariant.py::TestError::test_as_function",
"tests/test_invariant.py::TestError::test_as_function_with_empty_args",
"tests/test_invariant.py::TestError::test_as_type",
"tests/test_invariant.py::TestToggling::test_disabled",
"tests/test_invariant.py::TestInvalid::test_no_boolyness",
"tests/test_invariant.py::TestInvalid::test_with_invalid_arguments"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2020-11-21T07:26:32Z" | mit |
|
Parquery__icontract-176 | diff --git a/README.rst b/README.rst
index 9fb69cb..db5af98 100644
--- a/README.rst
+++ b/README.rst
@@ -639,6 +639,78 @@ Here is an example of the error given as a callable:
If you left ``enabled`` argument to its default ``__debug__``, the contract will *not* be verified in
``-O`` mode.)
+Variable Positional and Keyword Arguments
+-----------------------------------------
+Certain functions do not name their arguments explicitly, but operate on variable positional and/or
+keyword arguments supplied at the function call (*e.g.*, ``def some_func(*args, **kwargs): ...``).
+Contract conditions thus need a mechanism to refer to these variable arguments.
+To that end, we introduced two special condition arguments, ``_ARGS`` and ``_KWARGS``, that
+icontract will populate before evaluating the condition to capture the positional and keyword
+arguments, respectively, of the function call.
+
+To avoid intricacies of Python's argument resolution at runtime, icontract simply captures *all*
+positional and keyword arguments in these two variables, regardless of whether the function defines
+them or not. However, we would recommend you to explicitly name arguments in your conditions and
+use ``_ARGS`` and ``_KWARGS`` only for the variable arguments for readability.
+
+We present in the following a couple of valid contracts to demonstrate how to use these special
+arguments:
+
+.. code-block:: python
+
+ # The contract refers to the positional arguments of the *call*,
+ # though the decorated function does not handle
+ # variable positional arguments.
+ >>> @icontract.require(lambda _ARGS: _ARGS[0] > 0)
+ ... def function_a(x: int) -> int:
+ ... return 123
+ >>> function_a(1)
+ 123
+
+ # The contract refers to the keyword arguments of the *call*,
+ # though the decorated function does not handle variable keyword arguments.
+ >>> @icontract.require(lambda _KWARGS: _KWARGS["x"] > 0)
+ ... def function_b(x: int) -> int:
+ ... return 123
+ >>> function_b(x=1)
+ 123
+
+ # The contract refers both to the named argument and keyword arguments.
+ # The decorated function specifies an argument and handles
+ # variable keyword arguments at the same time.
+ >>> @icontract.require(lambda x, _KWARGS: x < _KWARGS["y"])
+ ... def function_c(x: int, **kwargs) -> int:
+ ... return 123
+ >>> function_c(1, y=3)
+ 123
+
+ # The decorated functions accepts only variable keyboard arguments.
+ >>> @icontract.require(lambda _KWARGS: _KWARGS["x"] > 0)
+ ... def function_d(**kwargs) -> int:
+ ... return 123
+ >>> function_d(x=1)
+ 123
+
+ # The decorated functions accepts only variable keyboard arguments.
+ # The keyword arguments are given an uncommon name (``parameters`` instead
+ # of ``kwargs``).
+ >>> @icontract.require(lambda _KWARGS: _KWARGS["x"] > 0)
+ ... def function_e(**parameters) -> int:
+ ... return 123
+ >>> function_e(x=1)
+ 123
+
+As a side note, we agree that the names picked for the placeholders are indeed a bit ugly.
+We decided against more aesthetic or ergonomic identifiers (such as ``_`` and ``__`` or
+``A`` and ``KW``) to avoid potential naming conflicts.
+
+The underscore in front of the placeholders is meant to motivate a bit deeper understanding
+of the condition.
+For example, the reader needs to be aware that the logic for resolving the keyword arguments
+passed to the function is *different* in condition and that ``_KWARGS`` *does not* refer to
+arbitrary keyword arguments *passed to the condition*. Though this might be obvious for some
+readers, we are almost certain that ``_ARGS`` and ``_KWARGS`` will cause some confusion.
+We hope that a small hint like an underscore will eventually help the reading.
Implementation Details
----------------------
@@ -802,7 +874,7 @@ Here is a short code snippet to demonstrate where the current implementation fai
>>> some_func(x=0)
Traceback (most recent call last):
...
- SyntaxError: Decorator corresponding to the line 1 could not be found in file <doctest README.rst[64]>: 'require_x_positive = icontract.require(lambda x: x > 0)\n'
+ SyntaxError: Decorator corresponding to the line 1 could not be found in file <doctest README.rst[74]>: 'require_x_positive = icontract.require(lambda x: x > 0)\n'
However, we haven't faced a situation in the code base where we would do something like the above, so we are unsure
whether this is a big issue. As long as decorators are directly applied to functions and classes, everything
diff --git a/icontract/_checkers.py b/icontract/_checkers.py
index 90d4a44..8b40d21 100644
--- a/icontract/_checkers.py
+++ b/icontract/_checkers.py
@@ -52,7 +52,15 @@ def _kwargs_from_call(param_names: List[str], kwdefaults: Dict[str, Any], args:
:return: resolved arguments as they would be passed to the function
"""
# pylint: disable=too-many-arguments
- resolved_kwargs = dict() # type: MutableMapping[str, Any]
+
+ # (Marko Ristin, 2020-12-01)
+ # Insert _ARGS and _KWARGS preemptively even if they are not needed by any contract.
+ # This makes the code logic much simpler since we do not explicitly check if a contract would
+ # need them, though it might incur a subtle computational overhead
+ # (*e.g.*, when the contracts do not need them or don't use any argument at all).
+ # We need to have a concrete issue where profiling helps us determine if this is a real
+ # bottleneck or not and not optimize for no real benefit.
+ resolved_kwargs = {'_ARGS': args, '_KWARGS': kwargs}
# Set the default argument values as condition parameters.
for param_name, param_value in kwdefaults.items():
@@ -224,7 +232,9 @@ def _assert_postcondition(contract: Contract, resolved_kwargs: Mapping[str, Any]
both argument values captured in snapshots and actual argument values and the result of a function.
:param contract: contract to be verified
- :param resolved_kwargs: resolved keyword arguments (including the default values, ``result`` and ``OLD``)
+ :param resolved_kwargs:
+ resolved keyword arguments (including the default values, ``result``,``OLD``
+ ``_ARGS`` and ``_KWARGS``)
:return:
"""
assert 'result' in resolved_kwargs, \
@@ -324,6 +334,16 @@ def decorate_with_checker(func: CallableT) -> CallableT:
"per function)."
sign = inspect.signature(func)
+ if '_ARGS' in sign.parameters:
+ raise ValueError(
+ 'The arguments of the function to be decorated with a contract checker include "_ARGS" which is '
+ 'a reserved placeholder for positional arguments in the condition.')
+
+ if '_KWARGS' in sign.parameters:
+ raise ValueError(
+ 'The arguments of the function to be decorated with a contract checker include "_KWARGS" which is '
+ 'a reserved placeholder for keyword arguments in the condition.')
+
param_names = list(sign.parameters.keys())
# Determine the default argument values.
@@ -340,6 +360,14 @@ def decorate_with_checker(func: CallableT) -> CallableT:
"""Wrap func by checking the preconditions and postconditions."""
# pylint: disable=too-many-branches
+ if '_ARGS' in kwargs:
+ raise TypeError('The arguments of the function call include "_ARGS" which is '
+ 'a placeholder for positional arguments in a condition.')
+
+ if '_KWARGS' in kwargs:
+ raise TypeError('The arguments of the function call include "_KWARGS" which is '
+ 'a placeholder for keyword arguments in a condition.')
+
# Use try-finally instead of ExitStack for performance.
try:
# If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop
| Parquery/icontract | 021f478672e480372d277d45384e3e65d1639d1a | diff --git a/tests/test_args_and_kwargs_in_contract.py b/tests/test_args_and_kwargs_in_contract.py
new file mode 100644
index 0000000..e94343c
--- /dev/null
+++ b/tests/test_args_and_kwargs_in_contract.py
@@ -0,0 +1,239 @@
+# pylint: disable=missing-docstring
+# pylint: disable=no-self-use
+# pylint: disable=invalid-name
+# pylint: disable=unused-argument
+# pylint: disable=no-member
+# pylint: disable=unnecessary-lambda
+import copy
+import textwrap
+import unittest
+from typing import Optional, Any, Tuple, Dict
+
+import icontract
+import tests.error
+
+
+class TestArgs(unittest.TestCase):
+ def test_args_without_variable_positional_arguments(self) -> None:
+ recorded_args = None # type: Optional[Tuple[Any, ...]]
+
+ def set_args(args: Tuple[Any, ...]) -> bool:
+ nonlocal recorded_args
+ recorded_args = copy.copy(args)
+ return True
+
+ @icontract.require(lambda _ARGS: set_args(_ARGS))
+ def some_func(x: int) -> None:
+ pass
+
+ some_func(3)
+
+ assert recorded_args is not None
+ self.assertTupleEqual((3, ), recorded_args)
+
+ def test_args_with_named_and_variable_positional_arguments(self) -> None:
+ recorded_args = None # type: Optional[Tuple[Any, ...]]
+
+ def set_args(args: Tuple[Any, ...]) -> bool:
+ nonlocal recorded_args
+ recorded_args = copy.copy(args)
+ return True
+
+ @icontract.require(lambda _ARGS: set_args(_ARGS))
+ def some_func(x: int, *args: Any) -> None:
+ pass
+
+ some_func(3, 2)
+
+ assert recorded_args is not None
+ self.assertTupleEqual((3, 2), recorded_args)
+
+ def test_args_with_only_variable_positional_arguments(self) -> None:
+ recorded_args = None # type: Optional[Tuple[Any, ...]]
+
+ def set_args(args: Tuple[Any, ...]) -> bool:
+ nonlocal recorded_args
+ recorded_args = copy.copy(args)
+ return True
+
+ @icontract.require(lambda _ARGS: set_args(_ARGS))
+ def some_func(*args: Any) -> None:
+ pass
+
+ some_func(3, 2, 1)
+
+ assert recorded_args is not None
+ self.assertTupleEqual((3, 2, 1), recorded_args)
+
+ def test_args_with_uncommon_variable_positional_arguments(self) -> None:
+ recorded_args = None # type: Optional[Tuple[Any, ...]]
+
+ def set_args(args: Tuple[Any, ...]) -> bool:
+ nonlocal recorded_args
+ recorded_args = copy.copy(args)
+ return True
+
+ @icontract.require(lambda _ARGS: set_args(_ARGS))
+ def some_func(*arguments: Any) -> None:
+ pass
+
+ some_func(3, 2, 1, 0)
+
+ assert recorded_args is not None
+ self.assertTupleEqual((3, 2, 1, 0), recorded_args)
+
+ def test_fail(self) -> None:
+ @icontract.require(lambda _ARGS: len(_ARGS) > 2)
+ def some_func(*args: Any) -> None:
+ pass
+
+ violation_error = None # type: Optional[icontract.ViolationError]
+ try:
+ some_func(3)
+ except icontract.ViolationError as err:
+ violation_error = err
+
+ assert violation_error is not None
+ self.assertEqual(
+ textwrap.dedent('''\
+ len(_ARGS) > 2:
+ _ARGS was (3,)
+ len(_ARGS) was 1'''), tests.error.wo_mandatory_location(str(violation_error)))
+
+
+class TestKwargs(unittest.TestCase):
+ def test_kwargs_without_variable_keyword_arguments(self) -> None:
+ recorded_kwargs = None # type: Optional[Dict[str, Any]]
+
+ def set_kwargs(kwargs: Dict[str, Any]) -> bool:
+ nonlocal recorded_kwargs
+ recorded_kwargs = copy.copy(kwargs)
+ return True
+
+ @icontract.require(lambda _KWARGS: set_kwargs(_KWARGS))
+ def some_func(x: int) -> None:
+ pass
+
+ some_func(x=3)
+
+ assert recorded_kwargs is not None
+ self.assertDictEqual({"x": 3}, recorded_kwargs)
+
+ def test_kwargs_with_named_and_variable_keyword_arguments(self) -> None:
+ recorded_kwargs = None # type: Optional[Dict[str, Any]]
+
+ def set_kwargs(kwargs: Dict[str, Any]) -> bool:
+ nonlocal recorded_kwargs
+ recorded_kwargs = copy.copy(kwargs)
+ return True
+
+ @icontract.require(lambda _KWARGS: set_kwargs(_KWARGS))
+ def some_func(x: int, **kwargs: Any) -> None:
+ pass
+
+ some_func(x=3, y=2)
+
+ assert recorded_kwargs is not None
+ self.assertDictEqual({"x": 3, "y": 2}, recorded_kwargs)
+
+ def test_kwargs_with_only_variable_keyword_arguments(self) -> None:
+ recorded_kwargs = None # type: Optional[Dict[str, Any]]
+
+ def set_kwargs(kwargs: Dict[str, Any]) -> bool:
+ nonlocal recorded_kwargs
+ recorded_kwargs = copy.copy(kwargs)
+ return True
+
+ @icontract.require(lambda _KWARGS: set_kwargs(_KWARGS))
+ def some_func(**kwargs: Any) -> None:
+ pass
+
+ some_func(x=3, y=2, z=1)
+
+ assert recorded_kwargs is not None
+ self.assertDictEqual({"x": 3, "y": 2, "z": 1}, recorded_kwargs)
+
+ def test_kwargs_with_uncommon_argument_name_for_variable_keyword_arguments(self) -> None:
+ recorded_kwargs = None # type: Optional[Dict[str, Any]]
+
+ def set_kwargs(kwargs: Dict[str, Any]) -> bool:
+ nonlocal recorded_kwargs
+ recorded_kwargs = copy.copy(kwargs)
+ return True
+
+ @icontract.require(lambda _KWARGS: set_kwargs(_KWARGS))
+ def some_func(**parameters: Any) -> None:
+ pass
+
+ some_func(x=3, y=2, z=1, a=0)
+
+ assert recorded_kwargs is not None
+ self.assertDictEqual({"x": 3, "y": 2, "z": 1, "a": 0}, recorded_kwargs)
+
+ def test_fail(self) -> None:
+ @icontract.require(lambda _KWARGS: 'x' in _KWARGS)
+ def some_func(**kwargs: Any) -> None:
+ pass
+
+ violation_error = None # type: Optional[icontract.ViolationError]
+ try:
+ some_func(y=3)
+ except icontract.ViolationError as err:
+ violation_error = err
+
+ assert violation_error is not None
+ self.assertEqual(
+ textwrap.dedent("'x' in _KWARGS: _KWARGS was {'y': 3}"),
+ tests.error.wo_mandatory_location(str(violation_error)))
+
+
+class TestArgsAndKwargs(unittest.TestCase):
+ def test_that_args_and_kwargs_are_both_passed_as_placeholders(self) -> None:
+ recorded_args = None # type: Optional[Tuple[Any, ...]]
+ recorded_kwargs = None # type: Optional[Dict[str, Any]]
+
+ def set_args_and_kwargs(args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> bool:
+ nonlocal recorded_args
+ nonlocal recorded_kwargs
+ recorded_args = copy.copy(args)
+ recorded_kwargs = copy.copy(kwargs)
+ return True
+
+ @icontract.require(lambda _ARGS, _KWARGS: set_args_and_kwargs(_ARGS, _KWARGS))
+ def some_func(*args: Any, **kwargs: Any) -> None:
+ pass
+
+ some_func(5, x=10, y=20, z=30)
+
+ assert recorded_args is not None
+ self.assertTupleEqual((5, ), recorded_args)
+
+ assert recorded_kwargs is not None
+ self.assertDictEqual({"x": 10, "y": 20, "z": 30}, recorded_kwargs)
+
+ def test_a_very_mixed_case(self) -> None:
+ recorded_args = None # type: Optional[Tuple[Any, ...]]
+ recorded_kwargs = None # type: Optional[Dict[str, Any]]
+
+ def set_args_and_kwargs(args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> bool:
+ nonlocal recorded_args
+ nonlocal recorded_kwargs
+ recorded_args = copy.copy(args)
+ recorded_kwargs = copy.copy(kwargs)
+ return True
+
+ @icontract.require(lambda _ARGS, _KWARGS: set_args_and_kwargs(_ARGS, _KWARGS))
+ def some_func(x: int, y: int, *args: Any, **kwargs: Any) -> None:
+ pass
+
+ some_func(5, 10, 20, z=30)
+
+ assert recorded_args is not None
+ self.assertTupleEqual((5, 10, 20), recorded_args)
+
+ assert recorded_kwargs is not None
+ self.assertDictEqual({"z": 30}, recorded_kwargs)
+
+
+if __name__ == '__main__':
+ unittest.main()
| *args and **kwargs not supported
Given the following code snippet:
```py
from icontract import require
def test_args():
@require(lambda *args: args)
def args(*args, **kwargs):
pass
args(0, x=0)
def test_kwargs():
@require(lambda **kwargs: kwargs)
def args(*args, **kwargs):
pass
args(0, x=0)
def test_both():
@require(lambda *args, **kwargs: args and kwargs)
def args(*args, **kwargs):
pass
args(0, x=0)
```
I get errors saying that precondition arguments could not be set (from `_checkers.py:114`).
Unless I've missed it, the README didn't explicitly state that * and ** aren't supported. My use case is a rather generic decorator applied to some endpoints. It shouldn't matter how these endpoints are called, but they must contain a request (for the decorator to do its magic), for which I want to create a contract.
Some observations:
- `@require(lambda args: ...)` without the asterisk only passes the first argument supplied to the function to the check (i.e. 0, not [0])
- `@require(lambda kwargs: ...)` without the asterisks raises the same error
I would gladly help with fixing/implementing this since I believe every language today should provide tools for DBC, and your library does a good job. Any thoughts on the feasibility of this? :) | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_args_and_kwargs_in_contract.py::TestArgs::test_args_with_named_and_variable_positional_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestArgs::test_args_with_only_variable_positional_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestArgs::test_args_with_uncommon_variable_positional_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestArgs::test_args_without_variable_positional_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestArgs::test_fail",
"tests/test_args_and_kwargs_in_contract.py::TestKwargs::test_fail",
"tests/test_args_and_kwargs_in_contract.py::TestKwargs::test_kwargs_with_named_and_variable_keyword_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestKwargs::test_kwargs_with_only_variable_keyword_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestKwargs::test_kwargs_with_uncommon_argument_name_for_variable_keyword_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestKwargs::test_kwargs_without_variable_keyword_arguments",
"tests/test_args_and_kwargs_in_contract.py::TestArgsAndKwargs::test_a_very_mixed_case",
"tests/test_args_and_kwargs_in_contract.py::TestArgsAndKwargs::test_that_args_and_kwargs_are_both_passed_as_placeholders"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-12-02T08:05:42Z" | mit |
|
Parquery__icontract-187 | diff --git a/README.rst b/README.rst
index d5660c1..08b5f1f 100644
--- a/README.rst
+++ b/README.rst
@@ -29,8 +29,9 @@ It also gives a base for a flourishing of a wider ecosystem:
`Hypothesis <https://hypothesis.readthedocs.io/en/latest/>`_ strategies based on the contracts,
* together with IDE integrations such as
- `icontract-hypothesis-vim <https://github.com/mristin/icontract-hypothesis-vim>`_ and
- `icontract-hypothesis-pycharm <https://github.com/mristin/icontract-hypothesis-pycharm>`_,
+ `icontract-hypothesis-vim <https://github.com/mristin/icontract-hypothesis-vim>`_,
+ `icontract-hypothesis-pycharm <https://github.com/mristin/icontract-hypothesis-pycharm>`_, and
+ `icontract-hypothesis-vscode <https://github.com/mristin/icontract-hypothesis-vscode>`_,
* An ongoing integration with `CrossHair <https://github.com/pschanely/CrossHair>`_, and
* An ongoing integration with `FastAPI <https://github.com/tiangolo/fastapi/issues/1996>`_.
@@ -925,7 +926,7 @@ The following scripts were run:
* `benchmarks/against_others/compare_postcondition.py <https://github.com/Parquery/icontract/tree/master/benchmarks/against_others/compare_postcondition.py>`_
The benchmarks were executed on Intel(R) Xeon(R) E-2276M CPU @ 2.80GHz.
-We used Python 3.8.5, icontract 2.3.5, deal 4.2.0 and dpcontracts 0.6.0.
+We used Python 3.8.5, icontract 2.4.1, deal 4.1.0 and dpcontracts 0.6.0.
The following tables summarize the results.
@@ -934,10 +935,10 @@ Benchmarking invariant at __init__:
========================= ============ ============== =======================
Case Total time Time per run Relative time per run
========================= ============ ============== =======================
-`ClassWithIcontract` 1.74 s 1.74 ΞΌs 100%
-`ClassWithDpcontracts` 0.55 s 0.55 ΞΌs 32%
-`ClassWithDeal` 3.26 s 3.26 ΞΌs 187%
-`ClassWithInlineContract` 0.33 s 0.33 ΞΌs 19%
+`ClassWithIcontract` 1.36 s 1.36 ΞΌs 100%
+`ClassWithDpcontracts` 0.46 s 0.46 ΞΌs 34%
+`ClassWithDeal` 2.65 s 2.65 ΞΌs 195%
+`ClassWithInlineContract` 0.27 s 0.27 ΞΌs 20%
========================= ============ ============== =======================
Benchmarking invariant at a function:
@@ -945,10 +946,10 @@ Benchmarking invariant at a function:
========================= ============ ============== =======================
Case Total time Time per run Relative time per run
========================= ============ ============== =======================
-`ClassWithIcontract` 2.48 s 2.48 ΞΌs 100%
-`ClassWithDpcontracts` 0.56 s 0.56 ΞΌs 22%
-`ClassWithDeal` 9.76 s 9.76 ΞΌs 393%
-`ClassWithInlineContract` 0.28 s 0.28 ΞΌs 11%
+`ClassWithIcontract` 1.94 s 1.94 ΞΌs 100%
+`ClassWithDpcontracts` 0.46 s 0.46 ΞΌs 24%
+`ClassWithDeal` 7.14 s 7.14 ΞΌs 368%
+`ClassWithInlineContract` 0.23 s 0.23 ΞΌs 12%
========================= ============ ============== =======================
Benchmarking precondition:
@@ -956,10 +957,10 @@ Benchmarking precondition:
=============================== ============ ============== =======================
Case Total time Time per run Relative time per run
=============================== ============ ============== =======================
-`function_with_icontract` 0.03 s 3.17 ΞΌs 100%
-`function_with_dpcontracts` 0.65 s 64.62 ΞΌs 2037%
-`function_with_deal` 0.16 s 16.04 ΞΌs 506%
-`function_with_inline_contract` 0.00 s 0.17 ΞΌs 6%
+`function_with_icontract` 0.03 s 2.61 ΞΌs 100%
+`function_with_dpcontracts` 0.51 s 50.52 ΞΌs 1939%
+`function_with_deal` 0.13 s 12.59 ΞΌs 483%
+`function_with_inline_contract` 0.00 s 0.15 ΞΌs 6%
=============================== ============ ============== =======================
Benchmarking postcondition:
@@ -967,11 +968,11 @@ Benchmarking postcondition:
=============================== ============ ============== =======================
Case Total time Time per run Relative time per run
=============================== ============ ============== =======================
-`function_with_icontract` 0.03 s 3.01 ΞΌs 100%
-`function_with_dpcontracts` 0.66 s 65.78 ΞΌs 2187%
-`function_with_deal_post` 0.01 s 1.12 ΞΌs 37%
-`function_with_deal_ensure` 0.02 s 1.62 ΞΌs 54%
-`function_with_inline_contract` 0.00 s 0.18 ΞΌs 6%
+`function_with_icontract` 0.03 s 2.63 ΞΌs 100%
+`function_with_dpcontracts` 0.51 s 50.59 ΞΌs 1921%
+`function_with_deal_post` 0.01 s 0.89 ΞΌs 34%
+`function_with_deal_ensure` 0.01 s 1.23 ΞΌs 47%
+`function_with_inline_contract` 0.00 s 0.14 ΞΌs 5%
=============================== ============ ============== =======================
diff --git a/icontract/_checkers.py b/icontract/_checkers.py
index aaa5b97..7fde7be 100644
--- a/icontract/_checkers.py
+++ b/icontract/_checkers.py
@@ -638,7 +638,9 @@ def add_invariant_checks(cls: type) -> None:
names_properties = [] # type: List[Tuple[str, property]]
# Filter out entries in the directory which are certainly not candidates for decoration.
- for name, value in [(name, getattr(cls, name)) for name in dir(cls)]:
+ for name in dir(cls):
+ value = getattr(cls, name)
+
# __new__ is a special class method (though not marked properly with @classmethod!).
# We need to ignore __repr__ to prevent endless loops when generating error messages.
# __getattribute__, __setattr__ and __delattr__ are too invasive and alter the state of the instance.
@@ -658,15 +660,21 @@ def add_invariant_checks(cls: type) -> None:
not isinstance(value, property):
continue
- # Ignore class methods
- if getattr(value, "__self__", None) is cls:
- continue
-
# Ignore "protected"/"private" methods
if name.startswith("_") and not (name.startswith("__") and name.endswith("__")):
continue
if inspect.isfunction(value) or isinstance(value, _SLOT_WRAPPER_TYPE):
+ # Ignore class methods
+ if getattr(value, "__self__", None) is cls:
+ continue
+
+ # Ignore static methods
+ # See https://stackoverflow.com/questions/14187973/python3-check-if-method-is-static
+ bound_value = inspect.getattr_static(cls, name, None)
+ if isinstance(bound_value, staticmethod):
+ continue
+
names_funcs.append((name, value))
elif isinstance(value, property):
| Parquery/icontract | 8d7639edb5fe2bb305e214203dc2fb8b3207fa9d | diff --git a/tests/test_invariant.py b/tests/test_invariant.py
index d47cf44..6a0e1fd 100644
--- a/tests/test_invariant.py
+++ b/tests/test_invariant.py
@@ -62,6 +62,46 @@ class TestOK(unittest.TestCase):
inst = SomeClass()
self.assertEqual(100, inst.x)
+ def test_static_method(self) -> None:
+ # Adapted from https://github.com/Parquery/icontract/issues/186
+ @icontract.invariant(lambda self: A.some_static_method(self.x))
+ @icontract.invariant(lambda self: self.some_instance_method())
+ class A:
+ def __init__(self) -> None:
+ self.x = 10
+
+ def some_instance_method(self) -> bool:
+ # We need this instance method for easier debugging.
+ return self.x < 100
+
+ @staticmethod
+ def some_static_method(x: int) -> bool:
+ return x > 0
+
+ _ = A()
+
+ def test_inherited_static_method(self) -> None:
+ @icontract.invariant(lambda self: A.some_static_method(self.x))
+ @icontract.invariant(lambda self: self.some_instance_method())
+ class A:
+ def __init__(self) -> None:
+ self.x = 10
+
+ def some_instance_method(self) -> bool:
+ # We need this instance method for easier debugging.
+ return self.x < 100
+
+ @staticmethod
+ def some_static_method(x: int) -> bool:
+ return x > 0
+
+ # We need to test for inheritance.
+ # See https://stackoverflow.com/questions/14187973/#comment74562120_37147128
+ class B(A):
+ pass
+
+ _ = B()
+
def test_protected_method_may_violate_inv(self) -> None:
@icontract.invariant(lambda self: self.x > 0)
class SomeClass:
| KeyError in contract caused by staticmethod and class invariant
```Python
from icontract import invariant
@invariant(lambda self: all(" " not in part for part in self.parts))
class ID:
def __init__(self, identifier: str) -> None:
self.parts = identifier.split(self.separator())
@staticmethod
def separator() -> str:
return "."
ID("A")
```
```Console
$ python test.py
Traceback (most recent call last):
File "/tmp/venv/lib/python3.9/site-packages/icontract/_checkers.py", line 576, in wrapper
instance = _find_self(param_names=param_names, args=args, kwargs=kwargs)
File "/tmp/venv/lib/python3.9/site-packages/icontract/_checkers.py", line 499, in _find_self
return kwargs["self"]
KeyError: 'self'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/tmp/test.py", line 14, in <module>
ID("A")
File "/tmp/venv/lib/python3.9/site-packages/icontract/_checkers.py", line 561, in wrapper
result = func(*args, **kwargs)
File "/tmp/test.py", line 7, in __init__
self.parts = identifier.split(self.separator())
File "/tmp/venv/lib/python3.9/site-packages/icontract/_checkers.py", line 578, in wrapper
raise KeyError(("The parameter 'self' could not be found in the call to function {!r}: "
KeyError: "The parameter 'self' could not be found in the call to function <function ID.separator at 0x7f26061a1550>: the param names were [], the args were (<__main__.ID object at 0x7f260636beb0>,) and kwargs were {}"
```
### Versions
```
icontract 2.4.1
asttokens 2.0.4
Python 3.9.1
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_invariant.py::TestOK::test_inherited_static_method",
"tests/test_invariant.py::TestOK::test_static_method"
] | [
"tests/test_invariant.py::TestOK::test_class_method",
"tests/test_invariant.py::TestOK::test_init",
"tests/test_invariant.py::TestOK::test_instance_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_private_method",
"tests/test_invariant.py::TestOK::test_inv_broken_before_protected_method",
"tests/test_invariant.py::TestOK::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestOK::test_magic_method",
"tests/test_invariant.py::TestOK::test_new_exempted",
"tests/test_invariant.py::TestOK::test_no_dict_pollution",
"tests/test_invariant.py::TestOK::test_private_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_protected_method_may_violate_inv",
"tests/test_invariant.py::TestOK::test_subclass_of_generic_mapping",
"tests/test_invariant.py::TestViolation::test_condition_as_function",
"tests/test_invariant.py::TestViolation::test_condition_as_function_with_default_argument_value",
"tests/test_invariant.py::TestViolation::test_init",
"tests/test_invariant.py::TestViolation::test_inv_as_precondition",
"tests/test_invariant.py::TestViolation::test_inv_ok_but_post_violated",
"tests/test_invariant.py::TestViolation::test_inv_violated_after_pre",
"tests/test_invariant.py::TestViolation::test_inv_violated_but_post_ok",
"tests/test_invariant.py::TestViolation::test_inv_with_empty_arguments",
"tests/test_invariant.py::TestViolation::test_magic_method",
"tests/test_invariant.py::TestViolation::test_method",
"tests/test_invariant.py::TestViolation::test_multiple_invs_first_violated",
"tests/test_invariant.py::TestViolation::test_multiple_invs_last_violated",
"tests/test_invariant.py::TestProperty::test_property_deleter",
"tests/test_invariant.py::TestProperty::test_property_getter",
"tests/test_invariant.py::TestProperty::test_property_setter",
"tests/test_invariant.py::TestError::test_as_function",
"tests/test_invariant.py::TestError::test_as_function_with_empty_args",
"tests/test_invariant.py::TestError::test_as_type",
"tests/test_invariant.py::TestToggling::test_disabled",
"tests/test_invariant.py::TestInvalid::test_no_boolyness",
"tests/test_invariant.py::TestInvalid::test_with_invalid_arguments"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-01-23T13:47:23Z" | mit |
|
Parquery__pyicontract-lint-25 | diff --git a/icontract_lint/__init__.py b/icontract_lint/__init__.py
index b35a405..8639113 100644
--- a/icontract_lint/__init__.py
+++ b/icontract_lint/__init__.py
@@ -2,6 +2,7 @@
import collections
import enum
import json
+import os
import pathlib
import re
import sys
@@ -589,9 +590,10 @@ def output_verbose(errors: List[Error], stream: TextIO) -> None:
"""
for err in errors:
if err.lineno is not None:
- stream.write("{}:{}: {} ({})\n".format(err.filename, err.lineno, err.description, err.identifier.value))
+ stream.write("{}:{}: {} ({}){}".format(err.filename, err.lineno, err.description, err.identifier.value,
+ os.linesep))
else:
- stream.write("{}: {} ({})\n".format(err.filename, err.description, err.identifier.value))
+ stream.write("{}: {} ({}){}".format(err.filename, err.description, err.identifier.value, os.linesep))
def output_json(errors: List[Error], stream: TextIO) -> None:
diff --git a/icontract_lint/main.py b/icontract_lint/main.py
index aeca12b..d5028f7 100644
--- a/icontract_lint/main.py
+++ b/icontract_lint/main.py
@@ -4,6 +4,7 @@
# This file is necessary so that we can specify the entry point for pex.
import argparse
+import os
import pathlib
import sys
from typing import List, Any, TextIO
@@ -42,13 +43,16 @@ def parse_args(sys_argv: List[str]) -> Args:
def _main(args: Args, stream: TextIO) -> int:
"""Execute the main routine."""
if args.version:
- stream.write("{}\n".format(pyicontract_lint_meta.__version__))
+ stream.write("{}{}".format(pyicontract_lint_meta.__version__, os.linesep))
return 0
errors = icontract_lint.check_paths(paths=args.paths)
if args.format == 'verbose':
- icontract_lint.output_verbose(errors=errors, stream=stream)
+ if not errors:
+ stream.write("No errors detected.{}".format(os.linesep))
+ else:
+ icontract_lint.output_verbose(errors=errors, stream=stream)
elif args.format == 'json':
icontract_lint.output_json(errors=errors, stream=stream)
else:
| Parquery/pyicontract-lint | 388bc70a37d046379f0496f6d3eed7b33d5dcdc0 | diff --git a/tests/test_icontract_lint.py b/tests/test_icontract_lint.py
index f66931e..be91d62 100644
--- a/tests/test_icontract_lint.py
+++ b/tests/test_icontract_lint.py
@@ -2,6 +2,7 @@
# pylint: disable=missing-docstring
import io
+import os
import pathlib
import sys
import tempfile
@@ -725,8 +726,9 @@ class TestOutputVerbose(unittest.TestCase):
icontract_lint.output_verbose(errors=errs, stream=stream)
- self.assertEqual('/path/to/some/file.py:123: The contract decorator lacks the condition. (no-condition)\n',
- buf.getvalue())
+ self.assertEqual(
+ '/path/to/some/file.py:123: The contract decorator lacks the condition. (no-condition){}'.format(
+ os.linesep), buf.getvalue())
class TestOutputJson(unittest.TestCase):
diff --git a/tests/test_main.py b/tests/test_main.py
index ae4f23a..fca1281 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
"""Test the main routine."""
import io
+import os
import pathlib
import sys
import tempfile
@@ -93,6 +94,20 @@ class TestMain(unittest.TestCase):
]""".format(pth=str(pth).replace("\\", "\\\\"))),
buf.getvalue())
+ def test_verbose_no_errors(self):
+ with tempfile.TemporaryDirectory() as tmp:
+ tmp_path = pathlib.Path(tmp)
+ pth = tmp_path / "some-executable.py"
+ pth.write_text('"""all ok"""')
+
+ buf = io.StringIO()
+ stream = cast(TextIO, buf)
+ args = icontract_lint.main.parse_args(sys_argv=[str(pth)])
+ retcode = icontract_lint.main._main(args=args, stream=stream)
+
+ self.assertEqual(0, retcode)
+ self.assertEqual(("No errors detected.{}").format(os.linesep), buf.getvalue())
+
def test_verbose(self):
with tempfile.TemporaryDirectory() as tmp:
tmp_path = pathlib.Path(tmp)
@@ -107,10 +122,9 @@ class TestMain(unittest.TestCase):
retcode = icontract_lint.main._main(args=args, stream=stream)
self.assertEqual(1, retcode)
- self.assertEqual(
- ("{pth}:3: Precondition argument(s) are missing in "
- "the function signature: x (pre-invalid-arg)\n").format(pth=str(pth)),
- buf.getvalue())
+ self.assertEqual(("{}:3: Precondition argument(s) are missing in "
+ "the function signature: x (pre-invalid-arg){}").format(str(pth), os.linesep),
+ buf.getvalue())
def test_dont_panic(self):
with tempfile.TemporaryDirectory() as tmp:
@@ -134,7 +148,7 @@ class TestMain(unittest.TestCase):
retcode = icontract_lint.main._main(args=args, stream=stream)
self.assertEqual(0, retcode)
- self.assertEqual('{}\n'.format(pyicontract_lint_meta.__version__), buf.getvalue())
+ self.assertEqual('{}{}'.format(pyicontract_lint_meta.__version__, os.linesep), buf.getvalue())
if __name__ == '__main__':
| Running pyicontract-lint without linting errors should notify success
Hi @mristin , it's me again :)
Just an enhancement proposal: when I got the package to run I had no linting errors and therefore the package would not give any output, even in `--verbose` mode. I had to run it with `--verbose json` mode to get the following output:
```bash
> pyicontract-lint --format verbose telereddit # no output
> pyicontract-lint --format json telereddit
[]
```
It would be great if we could get some sort of feedback on the successful run (at least in `--verbose` mode). I'm thinking of something along the line of:
```bash
> pyicontract-lint --format verbose telereddit
--------------------------------------------------------------------
Checked xx files. OK
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_main.py::TestMain::test_verbose_no_errors"
] | [
"tests/test_icontract_lint.py::TestCheckUnreadableFile::test_parse_failure",
"tests/test_icontract_lint.py::TestCheckUnreadableFile::test_read_failure",
"tests/test_icontract_lint.py::TestCheckFile::test_disabled",
"tests/test_icontract_lint.py::TestCheckFile::test_inv_invalid_arg",
"tests/test_icontract_lint.py::TestCheckFile::test_inv_ok",
"tests/test_icontract_lint.py::TestCheckFile::test_missing_condition",
"tests/test_icontract_lint.py::TestCheckFile::test_no_condition_in_inv",
"tests/test_icontract_lint.py::TestCheckFile::test_post_invalid_args",
"tests/test_icontract_lint.py::TestCheckFile::test_post_old_conflict",
"tests/test_icontract_lint.py::TestCheckFile::test_post_result_conflict",
"tests/test_icontract_lint.py::TestCheckFile::test_post_result_none",
"tests/test_icontract_lint.py::TestCheckFile::test_post_valid",
"tests/test_icontract_lint.py::TestCheckFile::test_post_valid_without_returns",
"tests/test_icontract_lint.py::TestCheckFile::test_pre_invalid_arg",
"tests/test_icontract_lint.py::TestCheckFile::test_pre_valid",
"tests/test_icontract_lint.py::TestCheckFile::test_snapshot_invalid_arg",
"tests/test_icontract_lint.py::TestCheckFile::test_snapshot_valid",
"tests/test_icontract_lint.py::TestCheckFile::test_snapshot_wo_post",
"tests/test_icontract_lint.py::TestCheckFile::test_syntax_error",
"tests/test_icontract_lint.py::TestCheckFile::test_uninferrable_decorator",
"tests/test_icontract_lint.py::TestCheckFile::test_uninferrable_returns",
"tests/test_icontract_lint.py::TestCheckFile::test_wo_contracts",
"tests/test_icontract_lint.py::TestCheckPaths::test_directory",
"tests/test_icontract_lint.py::TestCheckPaths::test_empty",
"tests/test_icontract_lint.py::TestCheckPaths::test_file",
"tests/test_icontract_lint.py::TestOutputVerbose::test_empty",
"tests/test_icontract_lint.py::TestOutputVerbose::test_errors",
"tests/test_icontract_lint.py::TestOutputJson::test_empty",
"tests/test_icontract_lint.py::TestOutputJson::test_errors",
"tests/test_main.py::TestParseArgs::test_dont_panic",
"tests/test_main.py::TestParseArgs::test_format",
"tests/test_main.py::TestParseArgs::test_multiple_paths",
"tests/test_main.py::TestParseArgs::test_panic",
"tests/test_main.py::TestParseArgs::test_single_path",
"tests/test_main.py::TestMain::test_dont_panic",
"tests/test_main.py::TestMain::test_json",
"tests/test_main.py::TestMain::test_verbose",
"tests/test_main.py::TestMain::test_version"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-08-25T19:10:38Z" | mit |
|
Parquery__pyicontract-lint-28 | diff --git a/icontract_lint/__init__.py b/icontract_lint/__init__.py
index 8639113..9dced7a 100644
--- a/icontract_lint/__init__.py
+++ b/icontract_lint/__init__.py
@@ -351,25 +351,6 @@ class _LintVisitor(_AstroidVisitor):
else:
raise NotImplementedError("Unhandled pytype: {}".format(pytype))
- def _infer_decorator(self, node: astroid.nodes.Call) -> Optional[astroid.bases.Instance]:
- """
- Try to infer the decorator as instance of a class.
-
- :param node: decorator AST node
- :return: instance of the decorator or None if decorator instance could not be inferred
- """
- # While this function does not use ``self``, keep it close to the usage to improve the readability.
- # pylint: disable=no-self-use
- try:
- decorator = next(node.infer())
- except astroid.exceptions.NameInferenceError:
- return None
-
- if decorator is astroid.Uninferable:
- return None
-
- return decorator
-
def visit_FunctionDef(self, node: astroid.nodes.FunctionDef) -> None: # pylint: disable=invalid-name
"""Lint the function definition."""
if node.decorators is None:
@@ -394,7 +375,25 @@ class _LintVisitor(_AstroidVisitor):
pass
# Infer the decorator instances
- decorators = [self._infer_decorator(node=decorator_node) for decorator_node in node.decorators.nodes]
+
+ def infer_decorator(a_node: astroid.nodes.Call) -> Optional[astroid.bases.Instance]:
+ """
+ Try to infer the decorator as instance of a class.
+
+ :param a_node: decorator AST node
+ :return: instance of the decorator or None if decorator instance could not be inferred
+ """
+ try:
+ decorator = next(a_node.infer())
+ except (astroid.exceptions.NameInferenceError, astroid.exceptions.InferenceError):
+ return None
+
+ if decorator is astroid.Uninferable:
+ return None
+
+ return decorator
+
+ decorators = [infer_decorator(a_node=decorator_node) for decorator_node in node.decorators.nodes]
# Check the decorators individually
for decorator, decorator_node in zip(decorators, node.decorators.nodes):
| Parquery/pyicontract-lint | 76ece692dac02dd926a5c1c30fd5933899ffeefc | diff --git a/tests/test_icontract_lint.py b/tests/test_icontract_lint.py
index be91d62..d09c398 100644
--- a/tests/test_icontract_lint.py
+++ b/tests/test_icontract_lint.py
@@ -82,22 +82,33 @@ class TestCheckUnreadableFile(unittest.TestCase):
self.assertEqual(str(pth), errors[0].filename)
-class TestCheckFile(unittest.TestCase):
- def test_wo_contracts(self):
+class TestUninferrableDecorator(unittest.TestCase):
+ def test_astroid_name_inference_error(self):
text = textwrap.dedent("""\
+ @some_uninferrable_decorator
def some_func(x: int) -> int:
pass
-
- class SomeClass:
- def some_method(self, x: int) -> int:
- pass
-
- @classmethod
- def some_class_method(self, x: int) -> int:
+ """)
+
+ with tempfile.TemporaryDirectory() as tmp:
+ tmp_path = pathlib.Path(tmp)
+ pth = tmp_path / "some_module.py"
+ pth.write_text(text)
+
+ with sys_path_with(tmp_path):
+ errors = icontract_lint.check_file(path=pth)
+ self.assertListEqual([], errors)
+
+ def test_astroid_inferrence_error(self):
+ # This example was adapted from the issue https://github.com/Parquery/pyicontract-lint/issues/27.
+ text = textwrap.dedent("""\
+ class RuleTable:
+ @classinstancemethod
+ def insert_rule(cls, index, rule_):
pass
-
- @staticmethod
- def some_static_method(self, x: int) -> int:
+
+ @insert_rule.instancemethod
+ def insert_rule(self, index, rule_):
pass
""")
@@ -108,14 +119,26 @@ class TestCheckFile(unittest.TestCase):
with sys_path_with(tmp_path):
errors = icontract_lint.check_file(path=pth)
-
self.assertListEqual([], errors)
- def test_uninferrable_decorator(self):
+
+class TestCheckFile(unittest.TestCase):
+ def test_wo_contracts(self):
text = textwrap.dedent("""\
- @some_uninferrable_decorator
def some_func(x: int) -> int:
pass
+
+ class SomeClass:
+ def some_method(self, x: int) -> int:
+ pass
+
+ @classmethod
+ def some_class_method(self, x: int) -> int:
+ pass
+
+ @staticmethod
+ def some_static_method(self, x: int) -> int:
+ pass
""")
with tempfile.TemporaryDirectory() as tmp:
| Error linting file w/o icontract
I'm trying out icontract and this linter. I get a fatal error in [this file](https://github.com/pymor/pymor/blob/contracts_type-hints/src/pymor/algorithms/rules.py)
[pip freeze output](https://github.com/Parquery/pyicontract-lint/files/5429910/freeze.txt)
```
pyicontract-lint ./src/pymor/algorithms/rules.py
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 132, in raise_if_nothing_inferred
yield next(generator)
StopIteration: {'node': <Attribute.instancemethod l.209 at 0x7f2f85040350>, 'context': <astroid.context.InferenceContext object at 0x7f2f850a8470>}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/bin/pyicontract-lint", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/icontract_lint/main.py", line 70, in main
return _main(args=args, stream=sys.stdout)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/main.py", line 49, in _main
errors = icontract_lint.check_paths(paths=args.paths)
File "/usr/local/lib/python3.7/site-packages/icontract/_checkers.py", line 396, in wrapper
result = func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 576, in check_paths
errs.extend(check_recursively(path=pth))
File "/usr/local/lib/python3.7/site-packages/icontract/_checkers.py", line 396, in wrapper
result = func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 556, in check_recursively
errs.extend(check_file(pth))
File "/usr/local/lib/python3.7/site-packages/icontract/_checkers.py", line 396, in wrapper
result = func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 541, in check_file
lint_visitor.visit(node=tree)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 109, in visit
return func(node)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 114, in visit_generic
self.visit(child)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 109, in visit
return func(node)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 483, in visit_ClassDef
self.visit(child)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 109, in visit
return func(node)
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 397, in visit_FunctionDef
decorators = [self._infer_decorator(node=decorator_node) for decorator_node in node.decorators.nodes]
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 397, in <listcomp>
decorators = [self._infer_decorator(node=decorator_node) for decorator_node in node.decorators.nodes]
File "/usr/local/lib/python3.7/site-packages/icontract_lint/__init__.py", line 364, in _infer_decorator
decorator = next(node.infer())
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 137, in raise_if_nothing_inferred
raise exceptions.InferenceError(**error.args[0])
astroid.exceptions.InferenceError: Inference failed for <Attribute.instancemethod l.209 at 0x7f2f85040350>.
```
It's reproducable on mybinder if you click `new -> terminal` and run `pyicontract-lint /pymor/src/pymor/algorithms/rules.py`
[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/pymor/pymor/contracts_type-hints) | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_icontract_lint.py::TestUninferrableDecorator::test_astroid_inferrence_error"
] | [
"tests/test_icontract_lint.py::TestCheckUnreadableFile::test_parse_failure",
"tests/test_icontract_lint.py::TestCheckUnreadableFile::test_read_failure",
"tests/test_icontract_lint.py::TestUninferrableDecorator::test_astroid_name_inference_error",
"tests/test_icontract_lint.py::TestCheckFile::test_disabled",
"tests/test_icontract_lint.py::TestCheckFile::test_inv_invalid_arg",
"tests/test_icontract_lint.py::TestCheckFile::test_inv_ok",
"tests/test_icontract_lint.py::TestCheckFile::test_missing_condition",
"tests/test_icontract_lint.py::TestCheckFile::test_no_condition_in_inv",
"tests/test_icontract_lint.py::TestCheckFile::test_post_invalid_args",
"tests/test_icontract_lint.py::TestCheckFile::test_post_old_conflict",
"tests/test_icontract_lint.py::TestCheckFile::test_post_result_conflict",
"tests/test_icontract_lint.py::TestCheckFile::test_post_result_none",
"tests/test_icontract_lint.py::TestCheckFile::test_post_valid",
"tests/test_icontract_lint.py::TestCheckFile::test_post_valid_without_returns",
"tests/test_icontract_lint.py::TestCheckFile::test_pre_invalid_arg",
"tests/test_icontract_lint.py::TestCheckFile::test_pre_valid",
"tests/test_icontract_lint.py::TestCheckFile::test_snapshot_invalid_arg",
"tests/test_icontract_lint.py::TestCheckFile::test_snapshot_valid",
"tests/test_icontract_lint.py::TestCheckFile::test_snapshot_wo_post",
"tests/test_icontract_lint.py::TestCheckFile::test_syntax_error",
"tests/test_icontract_lint.py::TestCheckFile::test_uninferrable_returns",
"tests/test_icontract_lint.py::TestCheckFile::test_wo_contracts",
"tests/test_icontract_lint.py::TestCheckPaths::test_directory",
"tests/test_icontract_lint.py::TestCheckPaths::test_empty",
"tests/test_icontract_lint.py::TestCheckPaths::test_file",
"tests/test_icontract_lint.py::TestOutputVerbose::test_empty",
"tests/test_icontract_lint.py::TestOutputVerbose::test_errors",
"tests/test_icontract_lint.py::TestOutputJson::test_empty",
"tests/test_icontract_lint.py::TestOutputJson::test_errors"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-23T18:36:04Z" | mit |
|
Parquery__pyicontract-lint-35 | diff --git a/icontract_lint/__init__.py b/icontract_lint/__init__.py
index 0b9a53d..d4f2312 100644
--- a/icontract_lint/__init__.py
+++ b/icontract_lint/__init__.py
@@ -35,6 +35,7 @@ class ErrorID(enum.Enum):
SNAPSHOT_INVALID_ARG = "snapshot-invalid-arg"
SNAPSHOT_WO_CAPTURE = "snapshot-wo-capture"
SNAPSHOT_WO_POST = "snapshot-wo-post"
+ SNAPSHOT_WO_NAME = "snapshot-wo-name"
POST_INVALID_ARG = "post-invalid-arg"
POST_RESULT_NONE = "post-result-none"
POST_RESULT_CONFLICT = "post-result-conflict"
@@ -275,15 +276,23 @@ class _LintVisitor(_AstroidVisitor):
"""
# Find the ``capture=...`` node
capture_node = None # type: Optional[astroid.node_classes.NodeNG]
+ name_node = None # type: Optional[astroid.node_classes.NodeNG]
- if node.args and len(node.args) >= 1:
- capture_node = node.args[0]
+ if node.args:
+ if len(node.args) >= 1:
+ capture_node = node.args[0]
+
+ if len(node.args) >= 2:
+ name_node = node.args[1]
- if capture_node is None and node.keywords:
+ if node.keywords:
for keyword_node in node.keywords:
if keyword_node.arg == "capture":
capture_node = keyword_node.value
+ if keyword_node.arg == "name":
+ name_node = keyword_node.value
+
if capture_node is None:
self.errors.append(
Error(
@@ -305,26 +314,26 @@ class _LintVisitor(_AstroidVisitor):
"Expected the inferred capture to be either a lambda or a function definition, but got: {}".format(
capture)
- capture_args = capture.argnames()
+ capture_arg_set = set(capture.argnames())
- if len(capture_args) > 1:
+ diff = capture_arg_set.difference(func_arg_set)
+
+ if diff:
self.errors.append(
Error(
identifier=ErrorID.SNAPSHOT_INVALID_ARG,
- description="Snapshot capture function expects at most one argument, but got: {}".format(
- capture_args),
+ description="Snapshot argument(s) are missing in the function signature: {}".format(
+ ", ".join(sorted(diff))),
filename=self._filename,
lineno=node.lineno))
- return
- if len(capture_args) == 1 and capture_args[0] not in func_arg_set:
+ if len(capture_arg_set) > 1 and name_node is None:
self.errors.append(
Error(
- identifier=ErrorID.SNAPSHOT_INVALID_ARG,
- description="Snapshot argument is missing in the function signature: {}".format(capture_args[0]),
+ identifier=ErrorID.SNAPSHOT_WO_NAME,
+ description="Snapshot involves multiple arguments, but its name has not been specified.",
filename=self._filename,
lineno=node.lineno))
- return
def _check_func_decorator(self, node: astroid.nodes.Call, decorator: astroid.bases.Instance, func_arg_set: Set[str],
func_has_result: bool) -> None:
| Parquery/pyicontract-lint | ceacf96c6a59173b0f9b0611403c7f6315377963 | diff --git a/tests/test_snapshot.py b/tests/test_snapshot.py
index f55eed3..ba3120c 100644
--- a/tests/test_snapshot.py
+++ b/tests/test_snapshot.py
@@ -34,6 +34,29 @@ class TestSnapshot(unittest.TestCase):
errors = icontract_lint.check_file(path=pth)
self.assertListEqual([], errors)
+ def test_multiple_args_are_ok(self):
+ # This is a regression test related to the issue #32.
+ text = textwrap.dedent("""\
+ from typing import List
+ from icontract import ensure, snapshot
+
+ @snapshot(lambda lst, another_lst: lst + another_lst, name="combined")
+ @ensure(lambda OLD: len(OLD.combined) > 0)
+ def some_func(lst: List[int], another_lst: List[int]) -> None:
+ pass
+ """)
+
+ with tempfile.TemporaryDirectory() as tmp:
+ tmp_path = pathlib.Path(tmp)
+
+ pth = tmp_path / "some_module.py"
+ pth.write_text(text)
+
+ with tests.common.sys_path_with(tmp_path):
+ errors = icontract_lint.check_file(path=pth)
+
+ self.assertListEqual([], errors)
+
def test_invalid_arg(self):
text = textwrap.dedent("""\
from typing import List
@@ -63,11 +86,39 @@ class TestSnapshot(unittest.TestCase):
self.assertDictEqual(
{
'identifier': 'snapshot-invalid-arg',
- 'description': 'Snapshot argument is missing in the function signature: another_lst',
+ 'description': 'Snapshot argument(s) are missing in the function signature: another_lst',
'filename': str(pth),
'lineno': lineno
}, err.as_mapping())
+ def test_multiple_args_and_no_name(self) -> None:
+ text = textwrap.dedent("""\
+ from icontract import snapshot, ensure
+
+ @snapshot(lambda lst, another_lst: lst + another_lst) # No name is specified here.
+ @ensure(lambda OLD, lst: OLD.lst + OLD.another_lst == lst)
+ def some_func(lst: List[int], another_lst: List[int]) -> None:
+ lst.extend(another_lst)
+ """)
+
+ with tempfile.TemporaryDirectory() as tmp:
+ tmp_path = pathlib.Path(tmp)
+
+ pth = tmp_path / "some_module.py"
+ pth.write_text(text)
+
+ with tests.common.sys_path_with(tmp_path):
+ errors = icontract_lint.check_file(path=pth)
+ self.assertEqual(1, len(errors))
+
+ self.assertDictEqual(
+ {
+ 'identifier': 'snapshot-wo-name',
+ 'description': 'Snapshot involves multiple arguments, but its name has not been specified.',
+ 'filename': str(pth),
+ 'lineno': 3
+ }, errors[0].as_mapping()) # type: ignore
+
def test_without_post(self):
text = textwrap.dedent("""\
from typing import List
| Snapshot capture function expects at most one argument
The docs show how you can use snapshots to capture multiple arguments and combine them into one set. However when I try the same idea the code runs without error, but the linter throws the error:
```Snapshot capture function expects at most one argument, but got: ['account_id', 'account2_id'] (snapshot-invalid-arg)```
Does this linter error infer that the snapshot function should only ever take one argument? and if so why does the docs say otherwise?
Docs reference: https://icontract.readthedocs.io/en/latest/usage.html#snapshots-a-k-a-old-argument-values
Edit:
In-fact the linter fails on the documentation example
```
Snapshot capture function expects at most one argument, but got: ['lst_a', 'lst_b'] (snapshot-invalid-arg)
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_snapshot.py::TestSnapshot::test_invalid_arg",
"tests/test_snapshot.py::TestSnapshot::test_multiple_args_and_no_name",
"tests/test_snapshot.py::TestSnapshot::test_multiple_args_are_ok"
] | [
"tests/test_snapshot.py::TestSnapshot::test_valid",
"tests/test_snapshot.py::TestSnapshot::test_without_capture",
"tests/test_snapshot.py::TestSnapshot::test_without_post"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-12T08:08:44Z" | mit |
|
Parquery__pylddwrap-13 | diff --git a/lddwrap/__init__.py b/lddwrap/__init__.py
index d1a2d87..520bc07 100644
--- a/lddwrap/__init__.py
+++ b/lddwrap/__init__.py
@@ -252,10 +252,18 @@ def _cmd_output_parser(cmd_out: str) -> List[Dependency]:
"""
dependencies = [] # type: List[Dependency]
- for line in [
- line.strip() for line in cmd_out.split('\n') if line.strip() != ''
- ]:
+ lines = [line.strip() for line in cmd_out.split('\n') if line.strip() != '']
+
+ if len(lines) == 0:
+ return []
+
+ # This is a special case of a static library. The first line refers
+ # to the library and the second line indicates that the library
+ # was statically linked.
+ if len(lines) == 2 and lines[1] == 'statically linked':
+ return []
+ for line in lines:
dep = _parse_line(line=line)
if dep is not None:
dependencies.append(dep)
| Parquery/pylddwrap | b13a44564ec9cee8f623c5a26d888dfd8fa26ca8 | diff --git a/tests/test_ldd.py b/tests/test_ldd.py
index 45e6c8d..2a47981 100644
--- a/tests/test_ldd.py
+++ b/tests/test_ldd.py
@@ -2,6 +2,7 @@
"""Test lddwrap."""
# pylint: disable=missing-docstring,too-many-public-methods
import pathlib
+import tempfile
import textwrap
import unittest
from typing import Any, List, Optional
@@ -140,6 +141,17 @@ class TestParseOutputWithoutUnused(unittest.TestCase):
'Expected 2 parts in the line but found {}: {}'.format(
line.count(' ') + 1, line), str(run_err))
+ def test_parse_static(self) -> None:
+ """Test parsing of the output when we ldd a static library."""
+ # pylint: disable=protected-access
+ deps = lddwrap._cmd_output_parser(
+ textwrap.dedent('''\
+ my_static_lib.so:
+ statically linked
+ '''))
+
+ self.assertListEqual([], deps)
+
class TestAgainstMockLdd(unittest.TestCase):
def test_pwd(self):
@@ -308,6 +320,24 @@ class TestAgainstMockLdd(unittest.TestCase):
[], diff_dependencies(ours=dep, theirs=exp_dep),
"Mismatch at the unused dependency {}".format(i))
+ def test_with_static_library(self) -> None:
+ """Test against a fantasy static library."""
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ lib_pth = pathlib.Path(tmp_dir) / "my_static_lib.so"
+ lib_pth.write_text("totally static!")
+
+ with tests.MockLdd(
+ out=textwrap.dedent('''\
+ my_static_lib.so:
+ \tstatically linked\n'''),
+ out_unused=''):
+ # pylint: enable=line-too-long
+ deps = lddwrap.list_dependencies(path=lib_pth, unused=True)
+
+ # The dependencies are empty since the library is
+ # statically linked.
+ self.assertListEqual([], deps)
+
class TestSorting(unittest.TestCase):
def test_sorting_by_all_attributes(self) -> None:
| pylddwrap 1.1.0 fails to handle statically linked libraries
I have noticed that with the latest release of 1.1.0, the `lddwrap.list_dependencies` fails when using it on a statically linked library:
```
my_static_lib.so:
statically linked
```
with the error message:
> RuntimeError: Unexpected mem address. Expected to match ^\s*\(([^)]*)\)\s*$, but got: 'linked'
This makes sense because normally, for a shared library, this is what you would get:
```
my_share_lib.so:
linux-lib.so.1 (0x00007fff4a9fe000)
linux-lib2.so => path
```
so `statically linked` is being parsed in the way that `statically` is considered to be the file and the `linked` is considered to be a memory address which of course fails the regex match.
Do you think that it would make more sense to avoid proceeding to the parsing step if `ldd` tells us that it's a statically linked library?
For the previous version, 1.0.1, it's happily reporting the following:
```
from pathlib import Path
import lddwrap
deps = lddwrap.list_dependencies(Path('my_static_lib.so'))
for dep in deps:
print(dep.path)
print(dep.found)
print(dep.soname)
statically
True
None
```
The statically is definitely not a correct file path, but raising an exception does not feel right either as `ldd` doesn't fail itself.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_ldd.py::TestParseOutputWithoutUnused::test_parse_static",
"tests/test_ldd.py::TestAgainstMockLdd::test_with_static_library"
] | [
"tests/test_ldd.py::TestParseOutputWithoutUnused::test_parse_line",
"tests/test_ldd.py::TestParseOutputWithoutUnused::test_parse_wrong_line",
"tests/test_ldd.py::TestAgainstMockLdd::test_bin_dir",
"tests/test_ldd.py::TestAgainstMockLdd::test_bin_dir_with_empty_unused",
"tests/test_ldd.py::TestAgainstMockLdd::test_pwd",
"tests/test_ldd.py::TestAgainstMockLdd::test_with_fantasy_unused",
"tests/test_ldd.py::TestSorting::test_sorting_by_all_attributes"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2020-12-02T20:39:00Z" | mit |
|
Parquery__swagger-to-141 | diff --git a/swagger_to/intermediate.py b/swagger_to/intermediate.py
index c4dc496..8a26b91 100644
--- a/swagger_to/intermediate.py
+++ b/swagger_to/intermediate.py
@@ -432,12 +432,7 @@ def _recursively_strip_descriptions(schema_dict: MutableMapping[str, Any]) -> Mu
new_schema_dict = collections.OrderedDict() # type: MutableMapping[str, Any]
for key, value in schema_dict.items():
- if key.lower() == 'description':
- if not isinstance(value, str):
- raise ValueError("Expected the value in a schema to be a string, but got: {}".format(type(value)))
-
- new_schema_dict[key] = value.strip()
- elif isinstance(value, list):
+ if isinstance(value, list):
lst = [] # type: List[Any]
for item in value:
if isinstance(item, (dict, collections.OrderedDict)):
@@ -451,6 +446,11 @@ def _recursively_strip_descriptions(schema_dict: MutableMapping[str, Any]) -> Mu
new_schema_dict[key] = _recursively_strip_descriptions(schema_dict=value)
elif isinstance(value, swagger_to.swagger.RawDict):
new_schema_dict[key] = _recursively_strip_descriptions(schema_dict=value)
+ elif key.lower() == 'description':
+ if not isinstance(value, str):
+ raise ValueError("Expected the value in a schema to be a string, but got: {}".format(type(value)))
+
+ new_schema_dict[key] = value.strip()
else:
new_schema_dict[key] = value
| Parquery/swagger-to | d0427c9f56e0a30a04e0777eb874954c99f1c90a | diff --git a/tests/cases/intermediate/definitions_item_description_as_object/endpoints.json b/tests/cases/intermediate/definitions_item_description_as_object/endpoints.json
new file mode 100644
index 0000000..1298ed4
--- /dev/null
+++ b/tests/cases/intermediate/definitions_item_description_as_object/endpoints.json
@@ -0,0 +1,44 @@
+[
+ {
+ "consumes": [],
+ "description": "",
+ "line": 14,
+ "method": "get",
+ "operation_id": "get_foo",
+ "parameters": [
+ {
+ "description": "The foo id",
+ "in_what": "path",
+ "json_schema": {
+ "identifier": "",
+ "text": ""
+ },
+ "line": 21,
+ "name": "foo_id",
+ "required": true,
+ "typedef": {
+ "description": "",
+ "format": "",
+ "identifier": "",
+ "json_schema": {
+ "identifier": "",
+ "text": ""
+ },
+ "line": 0,
+ "pattern": "",
+ "type": "string"
+ }
+ }
+ ],
+ "path": "/api/v1/foo",
+ "produces": [],
+ "responses": {
+ "200": {
+ "code": "200",
+ "description": "Success",
+ "line": 17,
+ "typedef": null
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/tests/cases/intermediate/definitions_item_description_as_object/intermediate_params.json b/tests/cases/intermediate/definitions_item_description_as_object/intermediate_params.json
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/tests/cases/intermediate/definitions_item_description_as_object/intermediate_params.json
@@ -0,0 +1,1 @@
+{}
\ No newline at end of file
diff --git a/tests/cases/intermediate/definitions_item_description_as_object/intermediate_typedefs.json b/tests/cases/intermediate/definitions_item_description_as_object/intermediate_typedefs.json
new file mode 100644
index 0000000..0228d9e
--- /dev/null
+++ b/tests/cases/intermediate/definitions_item_description_as_object/intermediate_typedefs.json
@@ -0,0 +1,65 @@
+{
+ "foo": {
+ "description": "",
+ "identifier": "foo",
+ "json_schema": {
+ "identifier": "foo",
+ "text": "{\n \"title\": \"foo\",\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"id\": {\n \"description\": \"Identifier\"\n },\n \"name\": {\n \"type\": \"string\",\n \"example\": \"TestJob\",\n \"description\": \"Name of the test.\"\n },\n \"description\": {\n \"type\": \"string\",\n \"example\": \"TestJob\",\n \"description\": \"Test description.\"\n }\n }\n}"
+ },
+ "line": 28,
+ "properties": {
+ "id": {
+ "description": "Identifier",
+ "line": 0,
+ "name": "id",
+ "required": false,
+ "typedef": {
+ "description": "",
+ "identifier": "",
+ "json_schema": {
+ "identifier": "",
+ "text": ""
+ },
+ "line": 31
+ }
+ },
+ "name": {
+ "description": "Name of the test.",
+ "line": 0,
+ "name": "name",
+ "required": false,
+ "typedef": {
+ "description": "",
+ "format": null,
+ "identifier": "",
+ "json_schema": {
+ "identifier": "",
+ "text": ""
+ },
+ "line": 33,
+ "pattern": "",
+ "type": "string"
+ }
+ },
+ "description": {
+ "description": "Test description.",
+ "line": 0,
+ "name": "description",
+ "required": false,
+ "typedef": {
+ "description": "",
+ "format": null,
+ "identifier": "",
+ "json_schema": {
+ "identifier": "",
+ "text": ""
+ },
+ "line": 37,
+ "pattern": "",
+ "type": "string"
+ }
+ }
+ },
+ "required": []
+ }
+}
\ No newline at end of file
diff --git a/tests/cases/intermediate/definitions_item_description_as_object/swagger.yaml b/tests/cases/intermediate/definitions_item_description_as_object/swagger.yaml
new file mode 100644
index 0000000..0d77584
--- /dev/null
+++ b/tests/cases/intermediate/definitions_item_description_as_object/swagger.yaml
@@ -0,0 +1,40 @@
+# This is a valid schema, but the definition...->description object broke swagger-to.
+# Test relates to: https://github.com/Parquery/swagger-to/issues/132
+swagger: '2.0'
+info:
+ description: description
+ version: '1.0'
+ title: An API
+basePath: /api/v1
+tags:
+- name: foo
+ description: description
+paths:
+ /foo:
+ get:
+ operationId: get_foo
+ responses:
+ '200':
+ description: Success
+ tags:
+ - foo
+ parameters:
+ - in: path
+ description: The foo id
+ name: foo_id
+ required: true
+ type: string
+definitions:
+ foo:
+ type: object
+ properties:
+ id:
+ description: Identifier
+ name:
+ type: string
+ example: TestJob
+ description: Name of the test.
+ description:
+ type: string
+ example: TestJob
+ description: Test description.
\ No newline at end of file
diff --git a/tests/cases/parsing/info_description_not_string/errors.txt b/tests/cases/parsing/info_description_not_string/errors.txt
new file mode 100644
index 0000000..e5878bd
--- /dev/null
+++ b/tests/cases/parsing/info_description_not_string/errors.txt
@@ -0,0 +1,11 @@
+"info"/"description":RawDict([('name', 'This is expected to fail!')]) is not of type 'string'
+
+Failed validating 'type' in schema['properties']['info']['properties']['description']:
+ {'description': 'A longer description of the API. Should be different '
+ 'from the title. GitHub Flavored Markdown is allowed.',
+ 'type': 'string'}
+
+On instance['info']['description']:
+ RawDict([('name', 'This is expected to fail!')])
+
+We used the JSON schema of OpenAPI 2 from: https://raw.githubusercontent.com/OAI/OpenAPI-Specification/88cd94419e117b154b67b834fa8e471bb98bd346/schemas/v2.0/schema.json
\ No newline at end of file
diff --git a/tests/cases/parsing/info_description_not_string/swagger.yaml b/tests/cases/parsing/info_description_not_string/swagger.yaml
new file mode 100644
index 0000000..20ba39f
--- /dev/null
+++ b/tests/cases/parsing/info_description_not_string/swagger.yaml
@@ -0,0 +1,28 @@
+# This is invalid schema...
+# It confirms the validator fails at a "description" element that MUST be a string.
+# Test relates to: https://github.com/Parquery/swagger-to/issues/132
+swagger: '2.0'
+info:
+ description:
+ name: This is expected to fail!
+ version: '1.0'
+ title: An API
+basePath: /api/v1
+tags:
+- name: foo
+ description: description
+paths:
+ /foo:
+ get:
+ operationId: get_foo
+ responses:
+ '200':
+ description: Success
+ tags:
+ - foo
+ parameters:
+ - in: path
+ description: The foo id
+ name: foo_id
+ required: true
+ type: string
\ No newline at end of file
| ValueError: Expected the value in a schema to be a string, but got: <class 'swagger_to.swagger.RawDict'>
if definitions properties has name with description, will cause this issue
```
DeviceDefineInfoRes:
type: object
discriminator: ''
properties:
createPerson:
type: string
createTime:
type: string
description:
type: string
id:
type: string
manufacturer:
type: string
name:
type: string
number:
type: string
productTypeCode:
type: integer
format: int32
updatePerson:
type: string
updateTime:
type: string
description: ''
```
<img width="729" alt="20211027104911" src="https://user-images.githubusercontent.com/17000812/138991549-b512b07d-f789-4df9-8436-92e87c698d4c.png">
*(@mristin, 2021-10-27: added markers for multi-line code)* | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_intermediate.py::TestIntermediate::test_that_it_does_not_break"
] | [
"tests/test_elm_client.py::TestElmClient::test_that_it_works",
"tests/test_elm_client.py::TestEscapeElmString::test_that_it_works",
"tests/test_go_server.py::TestEscapeStr::test_empty",
"tests/test_go_server.py::TestEscapeStr::test_that_it_works",
"tests/test_go_server.py::TestGoServer::test_that_it_works",
"tests/test_parsing.py::TestParsing::test_that_it_does_not_break",
"tests/test_py_client.py::TestPyClient::test_that_it_works",
"tests/test_py_client.py::TestDocstring::test_backslash_and_triple_quote",
"tests/test_py_client.py::TestDocstring::test_backslash_handled",
"tests/test_py_client.py::TestDocstring::test_multiline",
"tests/test_py_client.py::TestDocstring::test_single_line",
"tests/test_py_client.py::TestDocstring::test_special_chars",
"tests/test_py_client.py::TestDocstring::test_triple_quote_handled",
"tests/test_style.py::TestStyleCheck::test_that_it_works",
"tests/test_style.py::TestDescription::test_that_it_works",
"tests/test_swagger_to.py::TestSwaggerTo::test_camel_case",
"tests/test_swagger_to.py::TestSwaggerTo::test_camel_case_split",
"tests/test_swagger_to.py::TestSwaggerTo::test_capial_camel_case",
"tests/test_swagger_to.py::TestSwaggerTo::test_path_tokenization",
"tests/test_swagger_to.py::TestSwaggerTo::test_snake_case",
"tests/test_ts_angular5_client.py::TestTypescriptClient::test_that_it_works"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
} | "2023-01-26T04:04:36Z" | mit |
|
Parquery__temppathlib-12 | diff --git a/pylint.rc b/pylint.rc
index a70a9ba..9ecb8f0 100644
--- a/pylint.rc
+++ b/pylint.rc
@@ -7,5 +7,5 @@ generated-members=bottle\.request\.forms\.decode,bottle\.request\.query\.decode
max-line-length=120
[MESSAGES CONTROL]
-disable=too-few-public-methods,abstract-class-little-used,len-as-condition,bad-continuation,bad-whitespace
+disable=too-few-public-methods,abstract-class-little-used,len-as-condition,bad-continuation,bad-whitespace,too-many-arguments
diff --git a/temppathlib/__init__.py b/temppathlib/__init__.py
index 442e321..e23b513 100644
--- a/temppathlib/__init__.py
+++ b/temppathlib/__init__.py
@@ -41,18 +41,32 @@ class TmpDirIfNecessary:
def __init__(self,
path: Union[None, str, pathlib.Path],
base_tmp_dir: Union[None, str, pathlib.Path] = None,
- dont_delete_tmp_dir: bool = False) -> None:
+ dont_delete_tmp_dir: bool = False,
+ prefix: Optional[str] = None,
+ suffix: Optional[str] = None) -> None:
"""
Initialize with the given values.
- :param path: provided path to the directory; if specified, no temporary directory is created.
- :param base_tmp_dir: parent directory of the temporary directories; if not set,
- the default is used (usually '/tmp'). This path is only used if a temporary directory needs to be created
- and has no effect if 'path' was provided.
+ :param path:
+ provided path to the directory; if specified, no temporary directory is created.
+
+ :param base_tmp_dir:
+ parent directory of the temporary directories; if not set,
+ the default is used (usually '/tmp'). This path is only used if a temporary directory needs to be created
+ and has no effect if 'path' was provided.
+
+ :param dont_delete_tmp_dir:
+ if set, the temporary directory is not deleted upon close.
- :param dont_delete_tmp_dir: if set, the temporary directory is not deleted upon close.
+ If the 'path' was provided, this argument has no effect.
- If the 'path' was provided, this argument has no effect.
+ :param prefix:
+ If 'prefix' is not None, the name will begin with that prefix,
+ otherwise a default prefix is used.
+
+ :param suffix:
+ If 'suffix' is not None, the name will end with that suffix,
+ otherwise a default suffix is used.
"""
if base_tmp_dir is None:
self.base_tmp_dir = base_tmp_dir
@@ -76,6 +90,9 @@ class TmpDirIfNecessary:
self.dont_delete = dont_delete_tmp_dir
+ self._prefix = prefix
+ self._suffix = suffix
+
self.__use_tmp_dir = path is None
self.exited = False
@@ -96,9 +113,10 @@ class TmpDirIfNecessary:
if self._path is None:
if self.base_tmp_dir is None:
- self._path = pathlib.Path(tempfile.mkdtemp())
+ self._path = pathlib.Path(tempfile.mkdtemp(prefix=self._prefix, suffix=self._suffix))
else:
- self._path = pathlib.Path(tempfile.mkdtemp(dir=str(self.base_tmp_dir)))
+ self._path = pathlib.Path(
+ tempfile.mkdtemp(dir=str(self.base_tmp_dir), prefix=self._prefix, suffix=self._suffix))
else:
self._path.mkdir(exist_ok=True, parents=True)
@@ -213,7 +231,6 @@ class NamedTemporaryFile:
:param delete: whether the file is deleted on close (default True).
"""
- # pylint: disable=too-many-arguments
self.__tmpfile = tempfile.NamedTemporaryFile(
mode=mode,
buffering=buffering,
| Parquery/temppathlib | 21fd2b96fb2fb13c4e64d5bf2783327b11bafe4e | diff --git a/tests/test_temppathlib.py b/tests/test_temppathlib.py
index 6cd1e7c..cdf6988 100644
--- a/tests/test_temppathlib.py
+++ b/tests/test_temppathlib.py
@@ -94,6 +94,14 @@ class TestTmpDirIfNecessary(unittest.TestCase):
finally:
shutil.rmtree(str(basedir))
+ def test_prefix(self) -> None:
+ with temppathlib.TmpDirIfNecessary(path=None, prefix="some_prefix") as tmp_dir:
+ self.assertTrue(tmp_dir.path.name.startswith("some_prefix"))
+
+ def test_suffix(self) -> None:
+ with temppathlib.TmpDirIfNecessary(path=None, suffix="some_suffix") as tmp_dir:
+ self.assertTrue(tmp_dir.path.name.endswith("some_suffix"))
+
class TestTemporaryDirectory(unittest.TestCase):
def test_that_it_works(self) -> None:
| Support `prefix` option also with `TmpDirIfNecessary`?
Maybe `TmpDirIfNecessary` should also support the `prefix` option, to give a temporary directory a certain prefix (if no directory is provided already? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_temppathlib.py::TestTmpDirIfNecessary::test_prefix",
"tests/test_temppathlib.py::TestTmpDirIfNecessary::test_suffix"
] | [
"tests/test_temppathlib.py::TestRemovingTree::test_no_enter",
"tests/test_temppathlib.py::TestRemovingTree::test_that_it_works",
"tests/test_temppathlib.py::TestTmpDirIfNecessary::test_with_base_tmp_dir",
"tests/test_temppathlib.py::TestTmpDirIfNecessary::test_with_path_str",
"tests/test_temppathlib.py::TestTemporaryDirectory::test_that_it_works",
"tests/test_temppathlib.py::TestTemporaryDirectory::test_with_prefix",
"tests/test_temppathlib.py::TestNamedTemporaryFile::test_that_it_works",
"tests/test_temppathlib.py::TestNamedTemporaryFile::test_with_dir",
"tests/test_temppathlib.py::TestGettempdir::test_that_it_works"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-03-15T19:13:03Z" | mit |
|
PaulSchweizer__flowpipe-127 | diff --git a/flowpipe/node.py b/flowpipe/node.py
index 7fc825d..a9671e9 100644
--- a/flowpipe/node.py
+++ b/flowpipe/node.py
@@ -436,8 +436,13 @@ class FunctionNode(INode):
metadata = copy.deepcopy(self.metadata)
metadata.update(kwargs.pop("metadata", {}))
graph = kwargs.pop('graph', 'default')
+ outputs = []
+ for o in self.outputs.values():
+ outputs.append(o.name)
+ for key in o._sub_plugs.keys():
+ outputs.append("{0}.{1}".format(o.name, key))
return self.__class__(func=self.func,
- outputs=[o for o in self.outputs],
+ outputs=outputs,
metadata=metadata,
graph=graph,
**kwargs)
@@ -511,7 +516,15 @@ class FunctionNode(INode):
if outputs is not None:
for output in outputs:
- OutputPlug(output, self)
+ if "." in output:
+ parent, subplug = output.split(".")
+ parent_plug = self.outputs.get(parent)
+ if parent_plug is None:
+ parent_plug = OutputPlug(parent, self)
+ SubOutputPlug(subplug, self, parent_plug)
+ else:
+ if self.outputs.get(output) is None:
+ OutputPlug(output, self)
def to_pickle(self): # pragma: no cover
"""Pickle the node. -- DOES NOT WORK FOR FunctionNode."""
diff --git a/flowpipe/plug.py b/flowpipe/plug.py
index b58e4c6..85e6f65 100644
--- a/flowpipe/plug.py
+++ b/flowpipe/plug.py
@@ -303,6 +303,7 @@ class SubInputPlug(IPlug):
(OutputPlug, SubOutputPlug))
self.key = key
self.parent_plug = parent_plug
+ self.parent_plug._sub_plugs[key] = self
self.value = value
self.is_dirty = True
@@ -362,6 +363,7 @@ class SubOutputPlug(IPlug):
(InputPlug, SubInputPlug))
self.key = key
self.parent_plug = parent_plug
+ self.parent_plug._sub_plugs[key] = self
self.value = value
self.is_dirty = True
| PaulSchweizer/flowpipe | 06cade8408791fde0ab422c19d5d8e73facbc234 | diff --git a/tests/test_convert_function_to_node.py b/tests/test_convert_function_to_node.py
index 2d84466..e99a8dc 100644
--- a/tests/test_convert_function_to_node.py
+++ b/tests/test_convert_function_to_node.py
@@ -182,3 +182,20 @@ def test_node_reserved_names():
def function(func, name, identifier, inputs,
outputs, metadata, omit, graph):
pass
+
+
+def test_create_node_with_sub_output_plugs():
+
+ @Node(outputs=["out.a", "out", "out.b"])
+ def function1(in_):
+ pass
+
+ node = function1(name="contains_all_plugs")
+ assert len(node.outputs["out"]._sub_plugs) == 2
+
+ @Node(outputs=["out.a", "out.b"])
+ def function2(in_):
+ pass
+
+ node = function2(name="contains_only_subplugs")
+ assert len(node.outputs["out"]._sub_plugs) == 2
| Add sub output plugs through Node decorator
This should be made possible:
```python
@Node(outputs=["out.a", "out.b"])
MyNode():
pass
```
Resulting in a node like this:
```
+-----------+
| MyNode |
+-----------+
| out %
| out.a o
| out.b o
+-----------+
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_convert_function_to_node.py::test_create_node_with_sub_output_plugs"
] | [
"tests/test_convert_function_to_node.py::test_input_plugs_are_taken_from_func_inputs",
"tests/test_convert_function_to_node.py::test_name_is_taken_from_func_name_if_not_provided",
"tests/test_convert_function_to_node.py::test_name_can_be_provided_as_kwarg",
"tests/test_convert_function_to_node.py::test_doc_is_taken_from_func",
"tests/test_convert_function_to_node.py::test_define_outputs",
"tests/test_convert_function_to_node.py::test_decorator_returns_node_instances",
"tests/test_convert_function_to_node.py::test_serialize_function_node",
"tests/test_convert_function_to_node.py::test_use_self_as_first_arg_if_present",
"tests/test_convert_function_to_node.py::test_assign_input_args_to_function_input_plugs",
"tests/test_convert_function_to_node.py::test_provide_custom_node_class",
"tests/test_convert_function_to_node.py::test_passing_metadata_updates_exisiting_metadata",
"tests/test_convert_function_to_node.py::test_default_args_are_assigned_to_input_plugs",
"tests/test_convert_function_to_node.py::test_metadata_is_unique_for_each_node_created",
"tests/test_convert_function_to_node.py::test_class_name_restored_after_deserialization",
"tests/test_convert_function_to_node.py::test_node_reserved_names"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-02-23T16:26:46Z" | mit |
|
PaulSchweizer__flowpipe-147 | diff --git a/flowpipe/__init__.py b/flowpipe/__init__.py
index a14309f..a48b2ba 100644
--- a/flowpipe/__init__.py
+++ b/flowpipe/__init__.py
@@ -1,4 +1,5 @@
"""Flow-based programming with python."""
from .graph import Graph
from .node import INode, Node
-from .plug import InputPlug, OutputPlug, SubInputPlug, SubOutputPlug
+from .plug import (InputPlug, InputPlugGroup, OutputPlug, SubInputPlug,
+ SubOutputPlug)
diff --git a/flowpipe/graph.py b/flowpipe/graph.py
index 6a27fbc..fb3f3ca 100644
--- a/flowpipe/graph.py
+++ b/flowpipe/graph.py
@@ -19,7 +19,6 @@ try:
except ImportError:
from ordereddict import OrderedDict
-
log = logging.getLogger(__name__)
@@ -32,6 +31,7 @@ class Graph(object):
self.nodes = nodes or []
self.inputs = {}
self.outputs = {}
+ self.input_groups = {}
def __unicode__(self):
"""Display the Graph."""
diff --git a/flowpipe/plug.py b/flowpipe/plug.py
index 0912195..fc3ef4a 100644
--- a/flowpipe/plug.py
+++ b/flowpipe/plug.py
@@ -1,9 +1,12 @@
"""Plugs are ins and outs for Nodes through which they exchange data."""
from __future__ import print_function
-from abc import abstractmethod
+
import sys
import warnings
+from abc import abstractmethod
+
from .utilities import get_hash
+
__all__ = ['OutputPlug', 'InputPlug']
try:
@@ -106,6 +109,10 @@ class IPlug(object):
def disconnect(self, plug):
"""Break the connection to the given Plug."""
+ if isinstance(plug, InputPlugGroup):
+ for plug_ in plug:
+ self.disconnect(plug_)
+ return
if plug in self.connections:
self.connections.pop(self.connections.index(plug))
self.is_dirty = True
@@ -133,7 +140,7 @@ class OutputPlug(IPlug):
name (str): The name of the Plug.
node (INode): The Node holding the Plug.
"""
- self.accepted_plugs = (InputPlug,)
+ self.accepted_plugs = (InputPlug, InputPlugGroup)
super(OutputPlug, self).__init__(name, node)
if not isinstance(self, SubPlug):
self.node.outputs[self.name] = self
@@ -160,6 +167,11 @@ class OutputPlug(IPlug):
if not isinstance(plug, self.accepted_plugs):
raise TypeError("Cannot connect {0} to {1}".format(
type(self), type(plug)))
+ if isinstance(plug, InputPlugGroup):
+ for plug_ in plug:
+ self.connect(plug_)
+ return
+
if self.node.graph.accepts_connection(self, plug):
for connection in plug.connections:
plug.disconnect(connection)
@@ -390,3 +402,44 @@ class SubOutputPlug(SubPlug, OutputPlug):
'value': self.value,
'connections': connections
}
+
+
+class InputPlugGroup(object):
+ """Group plugs inside a group into one entry point on the graph."""
+
+ def __init__(self, name, graph, plugs=None):
+ """Initialize the group and assigning it to the `Graph.input_groups`.
+
+ Can be connected to an OutputPlug.
+ Args:
+ name (str): The name of the InputPlugGroup.
+ graph (Graph): The Graph holding the PlugGroup.
+ plugs (list of InputPlug): The plugs in this group.
+ """
+ self.name = name
+ self.graph = graph
+ self.plugs = plugs or []
+ self.graph.input_groups[self.name] = self
+
+ def connect(self, plug):
+ """Connect all plugs in this group to the given plug."""
+ for input_plug in self.plugs:
+ plug.connect(input_plug)
+
+ def disconnect(self, plug):
+ """Disconnect all plugs in this group from the given plug."""
+ for input_plug in self.plugs:
+ plug.disconnect(input_plug)
+
+ def __iter__(self):
+ """Convenience to iterate over the plugs in this group."""
+ for plug in self.plugs:
+ yield plug
+
+ def __rshift__(self, other):
+ """Syntactic sugar for the connect() method."""
+ self.connect(other)
+
+ def __lshift__(self, other):
+ """Syntactic sugar for the disconnect() method."""
+ self.disconnect(other)
| PaulSchweizer/flowpipe | d2a17a3d9abeef0a7c48bee11bc6ff4630e5bd7c | diff --git a/tests/test_inputpluggroup.py b/tests/test_inputpluggroup.py
new file mode 100644
index 0000000..f5011bc
--- /dev/null
+++ b/tests/test_inputpluggroup.py
@@ -0,0 +1,126 @@
+import pytest
+from flowpipe import Graph, InputPlugGroup, Node
+
+
+@Node(outputs=["out"])
+def DemoNode(in_):
+ """
+ +-----------+
+ | DemoNode |
+ |-----------|
+ o in_<> |
+ | out<> o
+ +-----------+
+ """
+ return {"out": in_}
+
+
[email protected]
+def demo_graph_fixture():
+ """
+ +---main----+ +---sub----+
+ | A | | C1 |
+ |-----------| |----------|
+ o in_<> | +--->o in_<> |
+ | out<> o-----+ | out<> o
+ +-----------+ | +----------+
+ | +---sub----+
+ | | C2 |
+ | |----------|
+ +--->o in_<> |
+ | out<> o
+ +----------+
+ """
+ # Sub graph
+ sub = Graph("sub")
+ c1 = DemoNode(graph=sub, name="C1")
+ c2 = DemoNode(graph=sub, name="C2")
+
+ # Main graph
+ main = Graph("main")
+ DemoNode(graph=main, name="A")
+
+ # Group inputs in the sub graph
+ InputPlugGroup("graph_in", sub, [
+ c1.inputs["in_"],
+ c2.inputs["in_"],
+ ])
+ return sub, main
+
+
+def test_connect_groupinput_to_output(demo_graph_fixture):
+ sub, main = demo_graph_fixture
+ sub.input_groups["graph_in"].connect(main["A"].outputs["out"])
+
+ assert main["A"].outputs["out"] in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] in sub["C2"].inputs["in_"].connections
+
+ sub.input_groups["graph_in"].disconnect(main["A"].outputs["out"])
+
+ assert main["A"].outputs["out"] not in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] not in sub["C2"].inputs["in_"].connections
+
+
+def test_connect_output_to_groupinput(demo_graph_fixture):
+ sub, main = demo_graph_fixture
+ main["A"].outputs["out"].connect(sub.input_groups["graph_in"])
+
+ assert main["A"].outputs["out"] in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] in sub["C2"].inputs["in_"].connections
+
+ main["A"].outputs["out"].disconnect(sub.input_groups["graph_in"])
+
+ assert main["A"].outputs["out"] not in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] not in sub["C2"].inputs["in_"].connections
+
+
+def test_rshift_connect_groupinput_to_output(demo_graph_fixture):
+ sub, main = demo_graph_fixture
+ sub.input_groups["graph_in"] >> main["A"].outputs["out"]
+
+ assert main["A"].outputs["out"] in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] in sub["C2"].inputs["in_"].connections
+
+ sub.input_groups["graph_in"] << main["A"].outputs["out"]
+
+ assert main["A"].outputs["out"] not in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] not in sub["C2"].inputs["in_"].connections
+
+
+def test_rshift_connect_output_to_groupinput(demo_graph_fixture):
+ sub, main = demo_graph_fixture
+ main["A"].outputs["out"] >> sub.input_groups["graph_in"]
+
+ assert main["A"].outputs["out"] in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] in sub["C2"].inputs["in_"].connections
+
+ main["A"].outputs["out"] << sub.input_groups["graph_in"]
+
+ assert main["A"].outputs["out"] not in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"] not in sub["C2"].inputs["in_"].connections
+
+
+def test_connect_groupinput_to_suboutput(demo_graph_fixture):
+ sub, main = demo_graph_fixture
+ sub.input_groups["graph_in"].connect(main["A"].outputs["out"]["1"])
+
+ assert main["A"].outputs["out"]["1"] in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"]["1"] in sub["C2"].inputs["in_"].connections
+
+ sub.input_groups["graph_in"].disconnect(main["A"].outputs["out"]["1"])
+
+ assert main["A"].outputs["out"]["1"] not in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"]["1"] not in sub["C2"].inputs["in_"].connections
+
+
+def test_connect_suboutput_to_groupinput(demo_graph_fixture):
+ sub, main = demo_graph_fixture
+ main["A"].outputs["out"]["1"].connect(sub.input_groups["graph_in"])
+
+ assert main["A"].outputs["out"]["1"] in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"]["1"] in sub["C2"].inputs["in_"].connections
+
+ main["A"].outputs["out"]["1"].disconnect(sub.input_groups["graph_in"])
+
+ assert main["A"].outputs["out"]["1"] not in sub["C1"].inputs["in_"].connections
+ assert main["A"].outputs["out"]["1"] not in sub["C2"].inputs["in_"].connections
| Utility to distribute data into subgraphs
**Is your feature request related to a problem? Please describe.**
I often have subgraphs with several nodes that will take the same input from outside the subgraph (e.g. several nodes taking the same file from a file-reading node). This is not well supported as of now, all options I came up with have drawbacks.
**Describe the solution you'd like**
I think a way to group inputs would be useful. Such a feature could be used to clearly set up several inputs to take the same information. Connecting all these inputs to the same output can then be done transparently in a single connection operation.
I think that such a grouping might be implemented as a `GroupingPlug`, which inherits from `InputPlug`, providing methods to
* be connected up to an `OutputPlug`, thereby connecting all grouped Inputs with that output
* Add a new `InputPlug` to the group, connecting it with the `OutputPlug` if a connection already exists.
* Disconnect form the output, thereby disconnecting all inputs in the group
**Describe alternatives you've considered**
Right now, there are three options to connecting that up:
1. Connect the outside nodes directly to the inputs of the nodes within the subgraph. This is not great, since I define the subgraphs to abstract away the internal structure as much as possible.
2. Promote all the input plugs to the subgraph. This is awkward, because the subgraph then has several inputs for the same data.
3. Introduce an extra node that trivially pipes through the data. This way, we can have a single input to the subgraph, however, the extra node makes the graph unnecessarily complicated and, when caching outputs, increases the memory footprint.
**Additional context**
I think such a feature would be useful in all graphs that fan out widely. Right after instantiating a node, one could add the inputs this node shares with other nodes to the group, keeping that behavior neatly in one place. Still, the connection code would be much less boilerplate, since I could just connect `my_file_reader_node.outputs["file"] >> file_consumers`.
By being a plug itself, my suggested implementation above could provide the functionality I'm after: Group all inputs for the same outside data inside a subgraph and expose the bundle as a single unit.
Some illustrations:
Options 1 or 2 of the alternatives yield such a behavior:
![flowpipe_1](https://user-images.githubusercontent.com/23141072/97970271-8e67e800-1dc1-11eb-901d-e250d14e04e9.png)
Option 3 adds a node to the graph, that doesn't do very much and caches the data redundantly, if the output cashing is not disabled:
![flowpipe_2](https://user-images.githubusercontent.com/23141072/97970276-9162d880-1dc1-11eb-83ad-0ef46c15e7e9.png)
The requested feature would provide some utility for the circle in this diagram:
![flowpipe_3](https://user-images.githubusercontent.com/23141072/97970280-932c9c00-1dc1-11eb-8d9d-f8ff6a65475d.png)
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_inputpluggroup.py::test_connect_groupinput_to_output",
"tests/test_inputpluggroup.py::test_connect_output_to_groupinput",
"tests/test_inputpluggroup.py::test_rshift_connect_groupinput_to_output",
"tests/test_inputpluggroup.py::test_rshift_connect_output_to_groupinput",
"tests/test_inputpluggroup.py::test_connect_groupinput_to_suboutput",
"tests/test_inputpluggroup.py::test_connect_suboutput_to_groupinput"
] | [] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-12-25T13:31:27Z" | mit |
|
PennChopMicrobiomeProgram__illqc-17 | diff --git a/illqclib/main.py b/illqclib/main.py
index e8503bf..396d282 100644
--- a/illqclib/main.py
+++ b/illqclib/main.py
@@ -67,7 +67,7 @@ class Trimmomatic(object):
"ILLUMINACLIP:%s:2:30:10:8:true" % self._adapter_fp,
"LEADING:%d" % self.config["leading"],
"TRAILING:%d" % self.config["trailing"],
- "SLIDINGWINDOW:%d:%d" % self.config["slidingwindow"],
+ "SLIDINGWINDOW:%d:%d" % tuple(self.config["slidingwindow"]),
"MINLEN:%d" % self.config["minlen"],
]
| PennChopMicrobiomeProgram/illqc | bc504d4c93300db446ab7b70cb0660f682d07687 | diff --git a/test/test_main.py b/test/test_main.py
index 7a75113..803ed7b 100644
--- a/test/test_main.py
+++ b/test/test_main.py
@@ -26,17 +26,19 @@ class ConfigTests(unittest.TestCase):
class TrimmomaticTests(unittest.TestCase):
+ config_vals = {
+ "trimmomatic_jar_fp": "trimmomatic-0.30.jar",
+ "adapter_dir": "adapters",
+ "adapter": "NexteraPE-PE",
+ "leading": 3,
+ "trailing": 3,
+ "slidingwindow": (4, 15),
+ "minlen": 36,
+ "java_heapsize":"200M"
+ }
+
def test_make_command(self):
- app = Trimmomatic({
- "trimmomatic_jar_fp": "trimmomatic-0.30.jar",
- "adapter_dir": "adapters",
- "adapter": "NexteraPE-PE",
- "leading": 3,
- "trailing": 3,
- "slidingwindow": (4, 15),
- "minlen": 36,
- "java_heapsize":"200M"
- })
+ app = Trimmomatic(self.config_vals)
observed = app.make_command("a.fastq", "b.fastq", "mydir")
expected = [
'java', '-Xmx200M', '-jar', 'trimmomatic-0.30.jar', 'PE', '-phred33',
@@ -47,3 +49,18 @@ class TrimmomaticTests(unittest.TestCase):
'LEADING:3', 'TRAILING:3', 'SLIDINGWINDOW:4:15', 'MINLEN:36',
]
self.assertEqual(observed, expected)
+
+ def test_make_command_sliding_window_as_list(self):
+ config_vals = self.config_vals.copy()
+ config_vals["slidingwindow"] = [6, 32]
+ app = Trimmomatic(config_vals)
+ observed = app.make_command("a.fastq", "b.fastq", "mydir")
+ expected = [
+ 'java', '-Xmx200M', '-jar', 'trimmomatic-0.30.jar', 'PE', '-phred33',
+ 'a.fastq', 'b.fastq',
+ 'mydir/a.fastq', 'mydir/a_unpaired.fastq',
+ 'mydir/b.fastq', 'mydir/b_unpaired.fastq',
+ 'ILLUMINACLIP:adapters/NexteraPE-PE.fa:2:30:10:8:true',
+ 'LEADING:3', 'TRAILING:3', 'SLIDINGWINDOW:6:32', 'MINLEN:36',
+ ]
+ self.assertEqual(observed, expected)
| Cannot configure setting for sliding window
Python needs a tuple value for this setting, but JSON does not support tuple types. Suggest converting to value tuple before line 70:
https://github.com/PennChopMicrobiomeProgram/illqc/blob/master/illqclib/main.py#L70 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_main.py::TrimmomaticTests::test_make_command_sliding_window_as_list"
] | [
"test/test_main.py::TrimmomaticTests::test_make_command"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2017-03-04T12:40:47Z" | mit |
|
PennyLaneAI__pennylane-3624 | diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md
index fc59a61f5..6adf859bb 100644
--- a/doc/releases/changelog-dev.md
+++ b/doc/releases/changelog-dev.md
@@ -160,13 +160,22 @@
from the queue instead of updating their metadata to have an `"owner"`.
[(#3282)](https://github.com/PennyLaneAI/pennylane/pull/3282)
+* `qchem.scf`, `RandomLayers.compute_decomposition`, and `Wires.select_random` all use
+ local random number generators now instead of global random number generators. This may lead to slighlty
+ different random numbers, and an independence of the results from the global random number generation state.
+ Please provide a seed to each individual function instead if you want controllable results.
+ [(#3624)](https://github.com/PennyLaneAI/pennylane/pull/3624)
+
<h3>Deprecations</h3>
<h3>Documentation</h3>
<h3>Bug fixes</h3>
-* Handles breaking networkx version change by selectively skipping a qcut tensorflow-jit test,
+* Uses a local random number generator where possible to avoid mutating the global random state.
+ [(#3624)](https://github.com/PennyLaneAI/pennylane/pull/3624)
+
+* Handles breaking networkx version change by selectively skipping a qcut tensorflow-jit test.
[(#3609)](https://github.com/PennyLaneAI/pennylane/pull/3609)
[(#3619)](https://github.com/PennyLaneAI/pennylane/pull/3619)
diff --git a/pennylane/math/is_independent.py b/pennylane/math/is_independent.py
index 78fcbbc01..d90b32784 100644
--- a/pennylane/math/is_independent.py
+++ b/pennylane/math/is_independent.py
@@ -196,10 +196,9 @@ def _get_random_args(args, interface, num, seed, bounds):
tuple(torch.rand(np.shape(arg)) * width + bounds[0] for arg in args) for _ in range(num)
]
else:
- np.random.seed(seed)
+ rng = np.random.default_rng(seed)
rnd_args = [
- tuple(np.random.random(np.shape(arg)) * width + bounds[0] for arg in args)
- for _ in range(num)
+ tuple(rng.random(np.shape(arg)) * width + bounds[0] for arg in args) for _ in range(num)
]
if interface == "autograd":
diff --git a/pennylane/qchem/hartree_fock.py b/pennylane/qchem/hartree_fock.py
index 8634e2dcf..fb3762b5f 100644
--- a/pennylane/qchem/hartree_fock.py
+++ b/pennylane/qchem/hartree_fock.py
@@ -132,8 +132,8 @@ def scf(mol, n_steps=50, tol=1e-8):
s = overlap_matrix(basis_functions)(*args)
h_core = core_matrix(basis_functions, charges, r)(*args)
- qml.math.random.seed(2030)
- s = s + qml.math.diag(qml.math.random.rand(len(s)) * 1.0e-12)
+ rng = qml.math.random.default_rng(2030)
+ s = s + qml.math.diag(rng.random(len(s)) * 1.0e-12)
w, v = qml.math.linalg.eigh(s)
x = v @ qml.math.diag(1.0 / qml.math.sqrt(w)) @ v.T
diff --git a/pennylane/templates/layers/random.py b/pennylane/templates/layers/random.py
index 8e07418dc..20a3d90dd 100644
--- a/pennylane/templates/layers/random.py
+++ b/pennylane/templates/layers/random.py
@@ -233,8 +233,7 @@ class RandomLayers(Operation):
RX(tensor(1.4000), wires=['a'])]
"""
wires = qml.wires.Wires(wires)
- if seed is not None:
- np.random.seed(seed)
+ rng = np.random.default_rng(seed)
shape = qml.math.shape(weights)
n_layers = qml.math.shape(weights)[0]
@@ -244,17 +243,17 @@ class RandomLayers(Operation):
i = 0
while i < shape[1]:
- if np.random.random() > ratio_imprimitive:
+ if rng.random() > ratio_imprimitive:
# apply a random rotation gate to a random wire
- gate = np.random.choice(rotations)
- rnd_wire = wires.select_random(1)
+ gate = rng.choice(rotations)
+ rnd_wire = wires.select_random(1, seed=rng)
op_list.append(gate(weights[l][i], wires=rnd_wire))
i += 1
else:
# apply the entangler to two random wires
if len(wires) > 1:
- rnd_wires = wires.select_random(2)
+ rnd_wires = wires.select_random(2, seed=rng)
op_list.append(imprimitive(wires=rnd_wires))
return op_list
diff --git a/pennylane/wires.py b/pennylane/wires.py
index 61a54129f..21d556e51 100644
--- a/pennylane/wires.py
+++ b/pennylane/wires.py
@@ -348,10 +348,9 @@ class Wires(Sequence):
if n_samples > len(self._labels):
raise WireError(f"Cannot sample {n_samples} wires from {len(self._labels)} wires.")
- if seed is not None:
- np.random.seed(seed)
+ rng = np.random.default_rng(seed)
- indices = np.random.choice(len(self._labels), size=n_samples, replace=False)
+ indices = rng.choice(len(self._labels), size=n_samples, replace=False)
subset = tuple(self[i] for i in indices)
return Wires(subset, _override=True)
| PennyLaneAI/pennylane | efe3c077efdd366121fd8eee322250cf498a5653 | diff --git a/tests/math/test_is_independent.py b/tests/math/test_is_independent.py
index cdeaf3062..5e933eaa9 100644
--- a/tests/math/test_is_independent.py
+++ b/tests/math/test_is_independent.py
@@ -115,11 +115,10 @@ class TestIsIndependentAutograd:
seed = 921
rnd_args = _get_random_args(args, self.interface, num, seed, bounds)
assert len(rnd_args) == num
- np.random.seed(seed)
+ rng = np.random.default_rng(seed)
for _rnd_args in rnd_args:
expected = tuple(
- np.random.random(np.shape(arg)) * (bounds[1] - bounds[0]) + bounds[0]
- for arg in args
+ rng.random(np.shape(arg)) * (bounds[1] - bounds[0]) + bounds[0] for arg in args
)
assert all(np.allclose(_exp, _rnd) for _exp, _rnd in zip(expected, _rnd_args))
@@ -244,11 +243,10 @@ class TestIsIndependentJax:
seed = 921
rnd_args = _get_random_args(args, self.interface, num, seed, bounds)
assert len(rnd_args) == num
- np.random.seed(seed)
+ rng = np.random.default_rng(seed)
for _rnd_args in rnd_args:
expected = tuple(
- np.random.random(np.shape(arg)) * (bounds[1] - bounds[0]) + bounds[0]
- for arg in args
+ rng.random(np.shape(arg)) * (bounds[1] - bounds[0]) + bounds[0] for arg in args
)
assert all(np.allclose(_exp, _rnd) for _exp, _rnd in zip(expected, _rnd_args))
diff --git a/tests/qchem/test_hartree_fock.py b/tests/qchem/test_hartree_fock.py
index 5ce5809ac..3db579f62 100644
--- a/tests/qchem/test_hartree_fock.py
+++ b/tests/qchem/test_hartree_fock.py
@@ -21,6 +21,26 @@ from pennylane import numpy as np
from pennylane import qchem
+def test_scf_leaves_random_seed_unchanged():
+ """Tests that the scf function leaves the global numpy sampling state unchanged."""
+
+ symbols = ["H", "H"]
+ geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad=False)
+ alpha = np.array(
+ [[3.42525091, 0.62391373, 0.1688554], [3.42525091, 0.62391373, 0.1688554]],
+ requires_grad=True,
+ )
+ mol = qchem.Molecule(symbols, geometry, alpha=alpha)
+ args = [alpha]
+
+ initial_numpy_state = np.random.get_state()
+ v_fock, coeffs, fock_matrix, h_core, rep_tensor = qchem.scf(mol)(*args)
+ final_numpy_state = np.random.get_state()
+
+ assert initial_numpy_state[0] == final_numpy_state[0]
+ assert np.all(initial_numpy_state[1] == final_numpy_state[1])
+
+
@pytest.mark.parametrize(
("symbols", "geometry", "v_fock", "coeffs", "fock_matrix", "h_core", "repulsion_tensor"),
[
diff --git a/tests/templates/test_layers/test_random.py b/tests/templates/test_layers/test_random.py
index 94537749a..50aeea78b 100644
--- a/tests/templates/test_layers/test_random.py
+++ b/tests/templates/test_layers/test_random.py
@@ -150,10 +150,10 @@ def circuit_template(weights):
def circuit_decomposed(weights):
# this structure is only true for a seed of 42 and 3 wires
- qml.RX(weights[0, 0], wires=1)
- qml.RX(weights[0][1], wires=0)
- qml.CNOT(wires=[1, 0])
- qml.RZ(weights[0, 2], wires=2)
+ qml.RY(weights[0, 0], wires=1)
+ qml.RX(weights[0][1], wires=2)
+ qml.CNOT(wires=[1, 2])
+ qml.RZ(weights[0, 2], wires=1)
return qml.expval(qml.PauliZ(0))
@@ -169,16 +169,14 @@ class TestInterfaces:
decomp = op.decomposition()
expected = [
- qml.RX(weights[0][0], wires=1),
- qml.RX(weights[0][1], wires=0),
- qml.CNOT(wires=[1, 0]),
- qml.RZ(weights[0][2], wires=2),
+ qml.RY(weights[0][0], wires=1),
+ qml.RX(weights[0][1], wires=2),
+ qml.CNOT(wires=[1, 2]),
+ qml.RZ(weights[0][2], wires=1),
]
for op1, op2 in zip(decomp, expected):
- assert op1.name == op2.name
- assert op1.data == op2.data
- assert op1.wires == op2.wires
+ assert qml.equal(op1, op2)
def test_autograd(self, tol):
"""Tests the autograd interface."""
| [BUG] qml.chem.molecular_hamiltonian sets random number generator seed
### Expected behavior
When using the argument `shots=100` to a device one expects variation in the output. This variation depends on the random number generator that is used internally in the device.
Setting on controlling the seed of the RNG is not possible at this moment, see https://github.com/PennyLaneAI/pennylane/issues/1327
In an application with a for loop over some parameters of the system the results of a circuit turned out to be always the same (instead of a random variation due to the finite number of shots). The problem turns out to be in a call to `qml.qchem.molecular_hamiltonian` which seems to reset the RNG. A minimal example:
```
import pennylane
import pennylane as qml
from pennylane import numpy as np
symbols = ["H", "H"]
coordinates = np.array([0.0, 0.0, -0.6614, 0.0, 0.0, 0.6614])
H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates)
dev = qml.device("default.qubit", wires=qubits, shots=100)
@qml.qnode(dev)
def F():
qml.BasisState(np.array([0,0,0,0]), wires=[0,1,2,3])
qml.Hadamard(0)
qml.Hadamard(1)
return qml.expval(H)
#%% Circuit has variation in the result, as expected for a finite number of shots
[F() for ii in range(10)]
#%% Redefining the device does not lead to reproducible results. How can we set the seed?
for ii in range(10):
dev = qml.device("default.qubit", wires=qubits, shots=100)
@qml.qnode(dev)
def F():
qml.BasisState(np.array([0,0]), wires=[0,1])
qml.Hadamard(0)
qml.Hadamard(1)
return qml.expval(H)
print(F())
#%% Making a call to qml.qchem.molecular_hamiltonian seems to reset the seed
for ii in range(10):
dev = qml.device("default.qubit", wires=qubits, shots=100)
H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates)
@qml.qnode(dev)
def F():
qml.BasisState(np.array([0,0,0,0]), wires=[0,1,2,3])
qml.Hadamard(0)
qml.Hadamard(1)
return qml.expval(H)
print(F())
#%% Check the call indeed modifies the seed:
qml.qchem.molecular_hamiltonian(symbols, coordinates)
s1=np.random.get_state()
np.random.rand()
qml.qchem.molecular_hamiltonian(symbols, coordinates)
s2=np.random.get_state()
print(s1)
print(s2)
```
Expected behaviour:
* `qml.qchem.molecular_hamiltonian` should not modify the RNG seed
### Actual behavior
`qml.qchem.molecular_hamiltonian` changes the RNG seed
### Additional information
_No response_
### Source code
_No response_
### Tracebacks
_No response_
### System information
```shell
Name: PennyLane
Version: 0.28.0.dev0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: c:\develop\env310\lib\site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, retworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning
Platform info: Windows-10-10.0.19045-SP0
Python version: 3.10.8
Numpy version: 1.24.1
Scipy version: 1.9.2
Installed devices:
- default.gaussian (PennyLane-0.28.0.dev0)
- default.mixed (PennyLane-0.28.0.dev0)
- default.qubit (PennyLane-0.28.0.dev0)
- default.qubit.autograd (PennyLane-0.28.0.dev0)
- default.qubit.jax (PennyLane-0.28.0.dev0)
- default.qubit.tf (PennyLane-0.28.0.dev0)
- default.qubit.torch (PennyLane-0.28.0.dev0)
- default.qutrit (PennyLane-0.28.0.dev0)
- null.qubit (PennyLane-0.28.0.dev0)
- lightning.qubit (PennyLane-Lightning-0.26.1)
```
### Existing GitHub issues
- [X] I have searched existing GitHub issues to make sure the issue does not already exist. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/qchem/test_hartree_fock.py::test_scf_leaves_random_seed_unchanged",
"tests/templates/test_layers/test_random.py::TestInterfaces::test_list_lists",
"tests/templates/test_layers/test_random.py::TestInterfaces::test_autograd"
] | [
"tests/qchem/test_hartree_fock.py::test_nuclear_energy[symbols0-geometry0-e_ref0]",
"tests/qchem/test_hartree_fock.py::test_nuclear_energy[symbols1-geometry1-e_ref1]",
"tests/qchem/test_hartree_fock.py::test_nuclear_energy[symbols2-geometry2-e_ref2]",
"tests/qchem/test_hartree_fock.py::test_nuclear_energy_gradient[symbols0-geometry0-g_ref0]",
"tests/qchem/test_hartree_fock.py::test_nuclear_energy_gradient[symbols1-geometry1-g_ref1]",
"tests/templates/test_layers/test_random.py::TestDecomposition::test_seed",
"tests/templates/test_layers/test_random.py::TestDecomposition::test_number_gates[3-4]",
"tests/templates/test_layers/test_random.py::TestDecomposition::test_number_gates[1-2]",
"tests/templates/test_layers/test_random.py::TestDecomposition::test_ratio_imprimitive[0.2]",
"tests/templates/test_layers/test_random.py::TestDecomposition::test_ratio_imprimitive[0.6]",
"tests/templates/test_layers/test_random.py::TestDecomposition::test_random_wires",
"tests/templates/test_layers/test_random.py::TestDecomposition::test_custom_wire_labels",
"tests/templates/test_layers/test_random.py::TestInputs::test_exception_wrong_dim",
"tests/templates/test_layers/test_random.py::TestInputs::test_id",
"tests/templates/test_layers/test_random.py::TestAttributes::test_shape[2-3-expected_shape0]",
"tests/templates/test_layers/test_random.py::TestAttributes::test_shape[2-1-expected_shape1]",
"tests/templates/test_layers/test_random.py::TestAttributes::test_shape[2-2-expected_shape2]"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-01-11T11:50:38Z" | apache-2.0 |
|
PennyLaneAI__pennylane-3744 | diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md
index 33b1663d1..3b7fdf41a 100644
--- a/doc/releases/changelog-dev.md
+++ b/doc/releases/changelog-dev.md
@@ -9,7 +9,7 @@
* The `qml.math` module now also contains a submodule for
fast Fourier transforms, `qml.math.fft`.
[(#1440)](https://github.com/PennyLaneAI/pennylane/pull/1440)
-
+
The submodule in particular provides differentiable
versions of the following functions, available in all common
interfaces for PennyLane
@@ -42,11 +42,11 @@
>>> qml.ops.qubit.special_unitary.pauli_basis_strings(1) # 4**1-1 = 3 Pauli words
['X', 'Y', 'Z']
>>> qml.ops.qubit.special_unitary.pauli_basis_strings(2) # 4**2-1 = 15 Pauli words
- ['IX', 'IY', 'IZ', 'XI', 'XX', 'XY', 'XZ', 'YI', 'YX', 'YY', 'YZ', 'ZI', 'ZX', 'ZY', 'ZZ']
+ ['IX', 'IY', 'IZ', 'XI', 'XX', 'XY', 'XZ', 'YI', 'YX', 'YY', 'YZ', 'ZI', 'ZX', 'ZY', 'ZZ']
```
-
+
For example, on a single qubit, we may define
-
+
```pycon
>>> theta = np.array([0.2, 0.1, -0.5])
>>> U = qml.SpecialUnitary(theta, 0)
@@ -54,7 +54,7 @@
array([[ 0.8537127 -0.47537233j, 0.09507447+0.19014893j],
[-0.09507447+0.19014893j, 0.8537127 +0.47537233j]])
```
-
+
A single non-zero entry in the parameters will create a Pauli rotation:
```pycon
@@ -65,7 +65,7 @@
>>> qml.math.allclose(su.matrix(), rx.matrix())
True
```
-
+
This operation can be differentiated with hardware-compatible methods like parameter shifts
and it supports parameter broadcasting/batching, but not both at the same time.
@@ -80,7 +80,7 @@
A `ParametrizedHamiltonian` holds information representing a linear combination of operators
with parametrized coefficents. The `ParametrizedHamiltonian` can be passed parameters to create the operator for
the specified parameters.
-
+
```pycon
f1 = lambda p, t: p * np.sin(t) * (t - 1)
f2 = lambda p, t: p[0] * np.cos(p[1]* t ** 2)
@@ -170,7 +170,7 @@
... qml.RX(x, 0)
... qml.RX(x, 1)
... return qml.expval(qml.PauliZ(0))
- >>> jax.jacobian(circuit)(jax.numpy.array(0.5))
+ >>> jax.jacobian(circuit)(jax.numpy.array(0.5))
DeviceArray(-0.4792258, dtype=float32, weak_type=True)
```
@@ -189,20 +189,20 @@
import pennylane as qml
import jax
from jax import numpy as jnp
-
+
jax.config.update("jax_enable_x64", True)
-
+
qml.enable_return()
-
+
dev = qml.device("lightning.qubit", wires=2)
-
+
@jax.jit
@qml.qnode(dev, interface="jax-jit", diff_method="parameter-shift", max_diff=2)
def circuit(a, b):
qml.RY(a, wires=0)
qml.RX(b, wires=1)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
-
+
a, b = jnp.array(1.0), jnp.array(2.0)
```
@@ -231,7 +231,7 @@
import pennylane as qml
from pennylane import numpy as np
import jax
-
+
symbols = ["H", "H"]
geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
@@ -352,16 +352,16 @@
```pycon
>>> f(params=[1.2, 2.3, 3.4, 4.5], t=3.9)
- DeviceArray(4.5, dtype=float32)
- >>> f(params=[1.2, 2.3, 3.4, 4.5], t=6) # zero outside the range (2, 4)
+ DeviceArray(4.5, dtype=float32)
+ >>> f(params=[1.2, 2.3, 3.4, 4.5], t=6) # zero outside the range (2, 4)
DeviceArray(0., dtype=float32)
```
-
+
* Added `pwc_from_function` as a decorator for defining a `ParametrizedHamiltonian`.
This function can be used to decorate a function and create a piecewise constant
approximation of it.
[(#3645)](https://github.com/PennyLaneAI/pennylane/pull/3645)
-
+
```pycon
>>> @pwc_from_function(t=(2, 4), num_bins=10)
... def f1(p, t):
@@ -370,7 +370,7 @@
The resulting function approximates the same of `p**2 * t` on the interval `t=(2, 4)`
in 10 bins, and returns zero outside the interval.
-
+
```pycon
# t=2 and t=2.1 are within the same bin
>>> f1(3, 2), f1(3, 2.1)
@@ -382,7 +382,7 @@
>>> f1(3, 5)
DeviceArray(0., dtype=float32)
```
-
+
*Next generation device API:*
* The `apply_operation` single-dispatch function is added to `devices/qubit` that applies an operation
@@ -398,6 +398,10 @@
<h3>Improvements</h3>
+* The parameter-shift derivative of variances saves a redundant evaluation of the
+ corresponding unshifted expectation value tape, if possible
+ [(#3744)](https://github.com/PennyLaneAI/pennylane/pull/3744)
+
* `qml.purity` is added as a measurement process for purity
[(#3551)](https://github.com/PennyLaneAI/pennylane/pull/3551)
@@ -501,20 +505,20 @@
* `qml.VQECost` is removed.
[(#3735)](https://github.com/PennyLaneAI/pennylane/pull/3735)
-* The default interface is now `auto`. There is no need to specify the interface anymore! It is automatically
+* The default interface is now `auto`. There is no need to specify the interface anymore! It is automatically
determined by checking your `QNode` parameters.
[(#3677)](https://github.com/PennyLaneAI/pennylane/pull/3677)
-
+
```python
import jax
import jax.numpy as jnp
-
+
qml.enable_return()
a = jnp.array(0.1)
b = jnp.array(0.2)
-
+
dev = qml.device("default.qubit", wires=2)
-
+
@qml.qnode(dev)
def circuit(a, b):
qml.RY(a, wires=0)
@@ -522,18 +526,18 @@
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
```
-
+
```pycon
>>> circuit(a, b)
(Array(0.9950042, dtype=float32), Array(-0.19767681, dtype=float32))
>>> jac = jax.jacobian(circuit)(a, b)
(Array(-0.09983341, dtype=float32, weak_type=True), Array(0.01983384, dtype=float32, weak_type=True))
```
-
- It comes with the fact that the interface is determined during the `QNode` call instead of the
- initialization. It means that the `gradient_fn` and `gradient_kwargs` are only defined on the QNode at the beginning
- of the call. As well, without specifying the interface it is not possible to guarantee that the device will not be changed
- during the call if you are using backprop(`default.qubit` to `default.qubit,jax`e.g.) whereas before it was happening at
+
+ It comes with the fact that the interface is determined during the `QNode` call instead of the
+ initialization. It means that the `gradient_fn` and `gradient_kwargs` are only defined on the QNode at the beginning
+ of the call. As well, without specifying the interface it is not possible to guarantee that the device will not be changed
+ during the call if you are using backprop(`default.qubit` to `default.qubit,jax`e.g.) whereas before it was happening at
initialization, therefore you should not try to track the device without specifying the interface.
* The tape method `get_operation` can also now return the operation index in the tape, and it can be
@@ -543,13 +547,13 @@
* `Operation.inv()` and the `Operation.inverse` setter have been removed. Please use `qml.adjoint` or `qml.pow` instead.
[(#3618)](https://github.com/PennyLaneAI/pennylane/pull/3618)
-
+
For example, instead of
-
+
```pycon
>>> qml.PauliX(0).inv()
```
-
+
use
```pycon
@@ -599,7 +603,7 @@
* Updated the code example in `qml.SparseHamiltonian` with the correct wire range.
[(#3643)](https://github.com/PennyLaneAI/pennylane/pull/3643)
-
+
* A hyperlink has been added in the text for a URL in the `qml.qchem.mol_data` docstring.
[(#3644)](https://github.com/PennyLaneAI/pennylane/pull/3644)
diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py
index f8d35909a..0f34169ca 100644
--- a/pennylane/gradients/parameter_shift.py
+++ b/pennylane/gradients/parameter_shift.py
@@ -587,6 +587,8 @@ def _expval_param_shift_tuple(
return tuple(grads)
+ processing_fn.first_result_unshifted = at_least_one_unshifted
+
return gradient_tapes, processing_fn
@@ -738,6 +740,8 @@ def expval_param_shift(
return qml.math.T(qml.math.stack(grads))
+ processing_fn.first_result_unshifted = at_least_one_unshifted
+
return gradient_tapes, processing_fn
@@ -893,7 +897,7 @@ def _create_variance_proc_fn(
shot_vector = isinstance(shots, Sequence)
# analytic derivative of <A>
- pdA = pdA_fn(results[1:tape_boundary])
+ pdA = pdA_fn(results[int(not pdA_fn.first_result_unshifted) : tape_boundary])
# analytic derivative of <A^2>
pdA2 = _get_pdA2(
@@ -969,8 +973,6 @@ def _var_param_shift_tuple(
# Get <A>, the expectation value of the tape with unshifted parameters.
expval_tape = tape.copy(copy_operations=True)
- gradient_tapes = [expval_tape]
-
# Convert all variance measurements on the tape into expectation values
for i in var_indices:
obs = expval_tape._measurements[i].obs
@@ -980,11 +982,12 @@ def _var_param_shift_tuple(
pdA_tapes, pdA_fn = expval_param_shift(
expval_tape, argnum, shifts, gradient_recipes, f0, broadcast, shots
)
+ gradient_tapes = [] if pdA_fn.first_result_unshifted else [expval_tape]
gradient_tapes.extend(pdA_tapes)
# Store the number of first derivative tapes, so that we know
# the number of results to post-process later.
- tape_boundary = len(pdA_tapes) + 1
+ tape_boundary = len(gradient_tapes)
# If there are non-involutory observables A present, we must compute d<A^2>/dp.
# Get the indices in the measurement queue of all non-involutory
@@ -1020,9 +1023,6 @@ def _var_param_shift_tuple(
)
gradient_tapes.extend(pdA2_tapes)
- # Store the number of first derivative tapes, so that we know
- # the number of results to post-process later.
- tape_boundary = len(pdA_tapes) + 1
processing_fn = _create_variance_proc_fn(
tape, var_mask, var_indices, pdA_fn, pdA2_fn, tape_boundary, non_involutory_indices, shots
)
@@ -1076,8 +1076,6 @@ def var_param_shift(
# Get <A>, the expectation value of the tape with unshifted parameters.
expval_tape = tape.copy(copy_operations=True)
- gradient_tapes = [expval_tape]
-
# Convert all variance measurements on the tape into expectation values
for i in var_idx:
obs = expval_tape._measurements[i].obs
@@ -1087,11 +1085,12 @@ def var_param_shift(
pdA_tapes, pdA_fn = expval_param_shift(
expval_tape, argnum, shifts, gradient_recipes, f0, broadcast
)
+ gradient_tapes = [] if pdA_fn.first_result_unshifted else [expval_tape]
gradient_tapes.extend(pdA_tapes)
# Store the number of first derivative tapes, so that we know
# the number of results to post-process later.
- tape_boundary = len(pdA_tapes) + 1
+ tape_boundary = len(gradient_tapes)
# If there are non-involutory observables A present, we must compute d<A^2>/dp.
# Get the indices in the measurement queue of all non-involutory
@@ -1153,7 +1152,7 @@ def var_param_shift(
f0 = qml.math.expand_dims(res, -1)
mask = qml.math.convert_like(qml.math.reshape(mask, qml.math.shape(f0)), res)
- pdA = pdA_fn(results[1:tape_boundary])
+ pdA = pdA_fn(results[int(not pdA_fn.first_result_unshifted) : tape_boundary])
pdA2 = 0
if non_involutory:
| PennyLaneAI/pennylane | 58790391e4eb53886254f80a4d5c428b4d146fc7 | diff --git a/tests/gradients/test_parameter_shift.py b/tests/gradients/test_parameter_shift.py
index 15e30190e..dcf42475d 100644
--- a/tests/gradients/test_parameter_shift.py
+++ b/tests/gradients/test_parameter_shift.py
@@ -1298,6 +1298,37 @@ class TestParameterShiftRule:
assert gradA == pytest.approx(expected, abs=tol)
assert gradF == pytest.approx(expected, abs=tol)
+ def test_recycling_unshifted_tape_result(self):
+ """Test that an unshifted term in the used gradient recipe is reused
+ for the chain rule computation within the variance parameter shift rule."""
+ dev = qml.device("default.qubit", wires=2)
+ gradient_recipes = ([[-1e-5, 1, 0], [1e-5, 1, 0], [-1e5, 1, -5e-6], [1e5, 1, 5e-6]], None)
+ x = [0.543, -0.654]
+
+ with qml.queuing.AnnotatedQueue() as q:
+ qml.RX(x[0], wires=[0])
+ qml.RX(x[1], wires=[0])
+ qml.var(qml.PauliZ(0))
+
+ tape = qml.tape.QuantumScript.from_queue(q)
+ tapes, fn = qml.gradients.param_shift(tape, gradient_recipes=gradient_recipes)
+ # 2 operations x 2 shifted positions + 1 unshifted term overall
+ assert len(tapes) == 2 * 2 + 1
+
+ with qml.queuing.AnnotatedQueue() as q:
+ qml.RX(x[0], wires=[0])
+ qml.RX(x[1], wires=[0])
+ qml.var(qml.Projector([1], wires=0))
+
+ tape = qml.tape.QuantumScript.from_queue(q)
+ tape.trainable_params = [0, 1]
+ tapes, fn = qml.gradients.param_shift(tape, gradient_recipes=gradient_recipes)
+ for tape in tapes:
+ print(tape.measurements)
+ # 2 operations x 2 shifted positions + 1 unshifted term overall <-- <H>
+ # + 2 operations x 2 shifted positions + 1 unshifted term <-- <H^2>
+ assert len(tapes) == (2 * 2 + 1) + (2 * 2 + 1)
+
def test_projector_variance(self, tol):
"""Test that the variance of a projector is correctly returned"""
dev = qml.device("default.qubit", wires=2)
@@ -1311,7 +1342,7 @@ class TestParameterShiftRule:
qml.var(qml.Projector(P, wires=0) @ qml.PauliX(1))
tape = qml.tape.QuantumScript.from_queue(q)
- tape.trainable_params = {0, 1}
+ tape.trainable_params = [0, 1]
res = dev.execute(tape)
expected = 0.25 * np.sin(x / 2) ** 2 * (3 + np.cos(2 * y) + 2 * np.cos(x) * np.sin(y) ** 2)
diff --git a/tests/returntypes/paramshift/test_parameter_shift_new.py b/tests/returntypes/paramshift/test_parameter_shift_new.py
index 73d279da3..7832fe972 100644
--- a/tests/returntypes/paramshift/test_parameter_shift_new.py
+++ b/tests/returntypes/paramshift/test_parameter_shift_new.py
@@ -1993,6 +1993,37 @@ class TestParameterShiftRule:
assert np.allclose(a_comp, e_comp, atol=tol, rtol=0)
assert gradF == pytest.approx(expected, abs=tol)
+ def test_recycling_unshifted_tape_result(self):
+ """Test that an unshifted term in the used gradient recipe is reused
+ for the chain rule computation within the variance parameter shift rule."""
+ dev = qml.device("default.qubit", wires=2)
+ gradient_recipes = ([[-1e-5, 1, 0], [1e-5, 1, 0], [-1e5, 1, -5e-6], [1e5, 1, 5e-6]], None)
+ x = [0.543, -0.654]
+
+ with qml.queuing.AnnotatedQueue() as q:
+ qml.RX(x[0], wires=[0])
+ qml.RX(x[1], wires=[0])
+ qml.var(qml.PauliZ(0))
+
+ tape = qml.tape.QuantumScript.from_queue(q)
+ tapes, fn = qml.gradients.param_shift(tape, gradient_recipes=gradient_recipes)
+ # 2 operations x 2 shifted positions + 1 unshifted term overall
+ assert len(tapes) == 2 * 2 + 1
+
+ with qml.queuing.AnnotatedQueue() as q:
+ qml.RX(x[0], wires=[0])
+ qml.RX(x[1], wires=[0])
+ qml.var(qml.Projector([1], wires=0))
+
+ tape = qml.tape.QuantumScript.from_queue(q)
+ tape.trainable_params = [0, 1]
+ tapes, fn = qml.gradients.param_shift(tape, gradient_recipes=gradient_recipes)
+ for tape in tapes:
+ print(tape.measurements)
+ # 2 operations x 2 shifted positions + 1 unshifted term overall <-- <H>
+ # + 2 operations x 2 shifted positions + 1 unshifted term <-- <H^2>
+ assert len(tapes) == (2 * 2 + 1) + (2 * 2 + 1)
+
def test_projector_variance(self, tol):
"""Test that the variance of a projector is correctly returned"""
dev = qml.device("default.qubit", wires=2)
| Parameter-shift rule of variance does not recycle unshifted evaluation
### Feature details
Save repeated evaluations at unshifted parameters when using `param_shift` together with gradient recipes that involve unshifted evaluations. Consider the setup
```python
import pennylane as qml
import numpy as np
dev = qml.device("default.qubit", wires=2)
x = [0.543, -0.654]
ops_with_custom_recipe = [0]
with qml.tape.QuantumTape() as tape:
qml.RX(x[0], wires=[0])
qml.RX(x[1], wires=[0])
qml.var(qml.PauliZ(0))
gradient_recipes = tuple([[[-1e-7, 1, 0], [1e-7, 1, 0], [-1e5, 1, -5e-6], [1e5, 1, 5e-6]], None])
tapes, fn = qml.gradients.param_shift(tape, gradient_recipes=gradient_recipes)
```
Then we get:
```pycon
>>> for tape in tapes:
>>> print(tape.get_parameters())
[0.543, -0.654]
[0.543, -0.654]
[0.542995, -0.654]
[0.5430050000000001, -0.654]
[0.543, 0.9167963267948965]
[0.543, -2.2247963267948965]
```
i.e. the unshifted tape is included twice, once for the unshifted evaluation in the product rule of the variance, and once for the shift rule that uses an unshifted term.
### Implementation
This could be saved by passing the information that the unshifted evaluation already is available between `var_param_shift` and the calls to `expval_param_shift`.
### How important would you say this feature is?
1: Not important. Would be nice to have.
### Additional information
This issue occurs with and without the new return type system. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_recycling_unshifted_tape_result",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_recycling_unshifted_tape_result"
] | [
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_first_order[RX-frequencies0-None]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_first_order[RX-frequencies1-shifts1]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_first_order[CRY-frequencies2-None]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_first_order[CRY-frequencies3-shifts3]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_second_order[RX-frequencies0-None]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_second_order[RX-frequencies1-shifts1]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_second_order[CRY-frequencies2-None]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_custom_recipe_second_order[CRY-frequencies3-shifts3]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_error_wrong_order[0]",
"tests/gradients/test_parameter_shift.py::TestGetOperationRecipe::test_error_wrong_order[3]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_empty_circuit",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_all_parameters_independent",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_state_non_differentiable_error",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_independent_parameter",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_no_trainable_params_qnode_autograd",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_no_trainable_params_tape[True]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_no_trainable_params_tape[False]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_all_zero_diff_methods[True]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_all_zero_diff_methods[False]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_with_gradient_recipes",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_recycled_unshifted_tape[ops_with_custom_recipe0]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_recycled_unshifted_tape[ops_with_custom_recipe1]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_recycled_unshifted_tape[ops_with_custom_recipe2]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_custom_recipe_unshifted_only[ops_with_custom_recipe0]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_custom_recipe_unshifted_only[ops_with_custom_recipe1]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_custom_recipe_unshifted_only[ops_with_custom_recipe2]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_f0_provided[0]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_f0_provided[1]",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_op_with_custom_unshifted_term",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_independent_parameters_analytic",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_grad_recipe_parameter_dependent",
"tests/gradients/test_parameter_shift.py::TestParamShift::test_error_no_diff_info",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[0-1]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[0-3]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[1-1]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[1-3]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum0]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum1]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum2]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum3]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum0]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum1]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum2]",
"tests/gradients/test_parameter_shift.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum3]",
"tests/gradients/test_parameter_shift.py::TestParamShiftUsingBroadcasting::test_independent_parameter",
"tests/gradients/test_parameter_shift.py::TestParamShiftUsingBroadcasting::test_with_gradient_recipes",
"tests/gradients/test_parameter_shift.py::TestParamShiftUsingBroadcasting::test_recycled_unshifted_tape",
"tests/gradients/test_parameter_shift.py::TestParamShiftUsingBroadcasting::test_independent_parameters_analytic",
"tests/gradients/test_parameter_shift.py::TestParamShiftUsingBroadcasting::test_grad_recipe_parameter_dependent",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_controlled_rotation_gradient[CRX]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_controlled_rotation_gradient[CRY]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_controlled_rotation_gradient[CRZ]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_CRot_gradient[theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_CRot_gradient[theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_CRot_gradient[theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_CRot_gradient[theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_CRot_gradient[theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_CRot_gradient[theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_CRot_gradient[theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_gradients_agree_finite_differences",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_variance_gradients_agree_finite_differences",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_fallback",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_all_fallback",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_single_expectation_value",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_multiple_expectation_values",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_var_expectation_values",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_prob_expectation_values",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_involutory_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_non_involutory_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_involutory_and_noninvolutory_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_expval_and_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_projector_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_output_shape_matches_qnode",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRule::test_special_observable_qnode_differentiation",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_controlled_rotation_gradient[CRX]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_controlled_rotation_gradient[CRY]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_controlled_rotation_gradient[CRZ]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta0]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta1]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta2]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta3]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta4]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta5]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta6]",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_gradients_agree_finite_differences",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_fallback",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_all_fallback",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_single_expectation_value",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_multiple_expectation_values",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_var_expectation_values",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_prob_expectation_values",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_involutory_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_non_involutory_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_involutory_and_noninvolutory_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_expval_and_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_projector_variance",
"tests/gradients/test_parameter_shift.py::TestParameterShiftRuleBroadcast::test_output_shape_matches_qnode",
"tests/gradients/test_parameter_shift.py::TestParamShiftGradients::test_autograd[False-expected0]",
"tests/gradients/test_parameter_shift.py::TestParamShiftGradients::test_autograd[True-expected1]",
"tests/gradients/test_parameter_shift.py::TestParamShiftProbJacobians::test_autograd[False-expected0]",
"tests/gradients/test_parameter_shift.py::TestParamShiftProbJacobians::test_autograd[True-expected1]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_not_expval_error[True]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_not_expval_error[False]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_no_trainable_coeffs[True]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_no_trainable_coeffs[False]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_trainable_coeffs[True]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_trainable_coeffs[False]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_multiple_hamiltonians[True]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_multiple_hamiltonians[False]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_autograd[True]",
"tests/gradients/test_parameter_shift.py::TestHamiltonianExpvalGradients::test_autograd[False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_first_order[RX-frequencies0-None]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_first_order[RX-frequencies1-shifts1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_first_order[CRY-frequencies2-None]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_first_order[CRY-frequencies3-shifts3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_qnode_custom_recipe",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_second_order[RX-frequencies0-None]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_second_order[RX-frequencies1-shifts1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_second_order[CRY-frequencies2-None]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_custom_recipe_second_order[CRY-frequencies3-shifts3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_error_wrong_order[0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestGetOperationRecipe::test_error_wrong_order[3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_empty_circuit",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_all_parameters_independent",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_state_non_differentiable_error",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_independent_parameter",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_no_trainable_params_tape[True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_no_trainable_params_tape[False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_no_trainable_params_multiple_return_tape",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_all_zero_diff_methods_tape",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_all_zero_diff_methods_multiple_returns_tape",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_with_gradient_recipes",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_recycled_unshifted_tape[ops_with_custom_recipe0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_recycled_unshifted_tape[ops_with_custom_recipe1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_recycled_unshifted_tape[ops_with_custom_recipe2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_unshifted_only[False-ops_with_custom_recipe0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_unshifted_only[False-ops_with_custom_recipe1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_unshifted_only[False-ops_with_custom_recipe2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_unshifted_only[True-ops_with_custom_recipe0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_unshifted_only[True-ops_with_custom_recipe1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_unshifted_only[True-ops_with_custom_recipe2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_mixing_unshifted_shifted[ops_with_custom_recipe0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_mixing_unshifted_shifted[ops_with_custom_recipe1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_custom_recipe_mixing_unshifted_shifted[ops_with_custom_recipe2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_f0_provided[0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_f0_provided[1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_op_with_custom_unshifted_term",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_independent_parameters_analytic",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_grad_recipe_parameter_dependent",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShift::test_error_no_diff_info",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[0-1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[0-3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[1-1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_single_parameter_broadcasted[1-3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[1-argnum3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftWithBroadcasted::test_with_multiple_parameters_broadcasted[3-argnum3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftUsingBroadcasting::test_independent_parameter",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftUsingBroadcasting::test_with_gradient_recipes",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftUsingBroadcasting::test_recycled_unshifted_tape",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftUsingBroadcasting::test_independent_parameters_analytic",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParamShiftUsingBroadcasting::test_grad_recipe_parameter_dependent",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RX-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RY-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_Rot_gradient[1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_controlled_rotation_gradient[CRX]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_controlled_rotation_gradient[CRY]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_controlled_rotation_gradient[CRZ]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_CRot_gradient[theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_CRot_gradient[theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_CRot_gradient[theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_CRot_gradient[theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_CRot_gradient[theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_CRot_gradient[theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_CRot_gradient[theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_gradients_agree_finite_differences",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_variance_gradients_agree_finite_differences",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_fallback",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_fallback_single_meas",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_fallback_probs[RX-RY-0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_fallback_probs[RX-RY-1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_all_fallback",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_single_expectation_value",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_multiple_expectation_values",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_var_expectation_values",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_prob_expectation_values",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_involutory_variance_single_param",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_involutory_variance_multi_param",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_non_involutory_variance_single_param",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_non_involutory_variance_multi_param",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_involutory_and_noninvolutory_variance_single_param",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_var_and_probs_single_param[0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_var_and_probs_single_param[1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_var_and_probs_multi_params",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_put_zeros_in_pdA2_involutory",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_expval_and_variance_single_param",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_expval_and_variance_multi_param",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_projector_variance",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_output_shape_matches_qnode_expval[cost1-expected_shape0-False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_output_shape_matches_qnode_expval[cost2-expected_shape1-True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_output_shape_matches_qnode_expval[cost3-expected_shape2-True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_output_shape_matches_qnode_probs[cost4-expected_shape0-False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_output_shape_matches_qnode_probs[cost5-expected_shape1-True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_output_shape_matches_qnode_probs[cost6-expected_shape2-True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_special_observable_qnode_differentiation",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRule::test_multi_measure_no_warning",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RX-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RY-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[RZ-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_pauli_rotation_gradient[PhaseShift-1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.5707963267948966-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[0.3-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_Rot_gradient[1.4142135623730951-theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_controlled_rotation_gradient[CRX]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_controlled_rotation_gradient[CRY]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_controlled_rotation_gradient[CRZ]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta0]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta1]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta2]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta3]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta4]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta5]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_CRot_gradient[theta6]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_gradients_agree_finite_differences",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_fallback",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_all_fallback",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_single_expectation_value",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_multiple_expectation_values",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_var_expectation_values",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_prob_expectation_values",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_involutory_variance",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_non_involutory_variance",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_involutory_and_noninvolutory_variance",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_expval_and_variance",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_projector_variance",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestParameterShiftRuleBroadcast::test_output_shape_matches_qnode",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_not_expval_error[True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_not_expval_error[False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_no_trainable_coeffs[True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_no_trainable_coeffs[False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_trainable_coeffs[True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_trainable_coeffs[False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_multiple_hamiltonians[True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_multiple_hamiltonians[False]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_autograd[True]",
"tests/returntypes/paramshift/test_parameter_shift_new.py::TestHamiltonianExpvalGradients::test_autograd[False]"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-02-09T13:38:13Z" | apache-2.0 |
|
PennyLaneAI__pennylane-4295 | diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md
index 7667e77f5..5c4a4b0d9 100644
--- a/doc/releases/changelog-dev.md
+++ b/doc/releases/changelog-dev.md
@@ -14,6 +14,11 @@
<h3>Bug fixes π</h3>
+* Raise a warning if control indicators are hidden when calling `qml.draw_mpl`
+ [(#4295)](https://github.com/PennyLaneAI/pennylane/pull/4295)
+
<h3>Contributors βοΈ</h3>
This release contains contributions from (in alphabetical order):
+
+Matthew Silverman
diff --git a/pennylane/drawer/mpldrawer.py b/pennylane/drawer/mpldrawer.py
index 29c260d2b..c0d1a6cce 100644
--- a/pennylane/drawer/mpldrawer.py
+++ b/pennylane/drawer/mpldrawer.py
@@ -15,6 +15,7 @@
This module contains the MPLDrawer class for creating circuit diagrams with matplotlib
"""
from collections.abc import Iterable
+import warnings
has_mpl = True
try:
@@ -591,6 +592,15 @@ class MPLDrawer:
min_wire = min(wires_all)
max_wire = max(wires_all)
+ if len(wires_target) > 1:
+ min_target, max_target = min(wires_target), max(wires_target)
+ if any(min_target < w < max_target for w in wires_ctrl):
+ warnings.warn(
+ "Some control indicators are hidden behind an operator. Consider re-ordering "
+ "your circuit wires to ensure all control indicators are visible.",
+ UserWarning,
+ )
+
line = plt.Line2D((layer, layer), (min_wire, max_wire), **options)
self._ax.add_line(line)
| PennyLaneAI/pennylane | 6e0d11a8025007f4295b78733e318bd343710c5a | diff --git a/tests/drawer/test_mpldrawer.py b/tests/drawer/test_mpldrawer.py
index 28cd5a94c..26bb683ff 100644
--- a/tests/drawer/test_mpldrawer.py
+++ b/tests/drawer/test_mpldrawer.py
@@ -19,6 +19,7 @@ page in the developement guide.
"""
# pylint: disable=protected-access,wrong-import-position
+import warnings
import pytest
plt = pytest.importorskip("matplotlib.pyplot")
@@ -429,6 +430,28 @@ class TestCTRL:
assert circle.center == (0, 0)
plt.close()
+ @pytest.mark.parametrize(
+ "control_wires,target_wires",
+ [
+ ((1,), (0, 2)),
+ ((0, 2), (1, 3)),
+ ((1, 3), (0, 2)),
+ ((0, 2, 4), (1, 3)),
+ ],
+ )
+ def test_ctrl_raises_warning_with_overlap(self, control_wires, target_wires):
+ """Tests that a warning is raised if some control indicators are not visible."""
+ drawer = MPLDrawer(1, 4)
+ with pytest.warns(UserWarning, match="control indicators are hidden behind an operator"):
+ drawer.ctrl(0, control_wires, target_wires)
+
+ @pytest.mark.parametrize("control_wires,target_wires", [((0,), (1, 2)), ((2,), (0, 1))])
+ def test_ctrl_no_warning_without_overlap(self, control_wires, target_wires):
+ drawer = MPLDrawer(1, 3)
+ with warnings.catch_warnings(record=True) as w:
+ drawer.ctrl(0, control_wires, target_wires)
+ assert len(w) == 0
+
def test_target_x(self):
"""Tests hidden target_x drawing method"""
| [BUG] draw_mpl misses controls for multi-target-wire ops with interspersed wires
### Expected behavior
All control and target wires are indicated in some way, or the draw function fails.
### Actual behavior
The control wire sandwiched between target wires is hidden behind a box drawing, For example, the attached image is hiding a control on wire 2.
![image](https://github.com/PennyLaneAI/pennylane/assets/23283972/8ddcac21-1fe9-46e8-94f0-731d931be315)
### Additional information
_No response_
### Source code
```shell
def c():
qml.ctrl(qml.IsingXX(1.1, [1, 3]), [0, 2])
_ = qml.draw_mpl(c, wire_order=range(4))()
```
### Tracebacks
_No response_
### System information
```shell
Name: PennyLane
Version: 0.31.0.dev0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: /Users/matthews/.pyenv/versions/3.9.13/envs/pl/lib/python3.9/site-packages
Editable project location: /Users/matthews/src/github.com/PennyLaneAI/pennylane
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning
Platform info: macOS-13.4-arm64-arm-64bit
Python version: 3.9.13
Numpy version: 1.23.5
Scipy version: 1.10.0
Installed devices:
- default.gaussian (PennyLane-0.31.0.dev0)
- default.mixed (PennyLane-0.31.0.dev0)
- default.qubit (PennyLane-0.31.0.dev0)
- default.qubit.autograd (PennyLane-0.31.0.dev0)
- default.qubit.jax (PennyLane-0.31.0.dev0)
- default.qubit.tf (PennyLane-0.31.0.dev0)
- default.qubit.torch (PennyLane-0.31.0.dev0)
- default.qutrit (PennyLane-0.31.0.dev0)
- null.qubit (PennyLane-0.31.0.dev0)
- lightning.qubit (PennyLane-Lightning-0.31.0.dev3)
```
### Existing GitHub issues
- [X] I have searched existing GitHub issues to make sure the issue does not already exist. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_raises_warning_with_overlap[control_wires0-target_wires0]",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_raises_warning_with_overlap[control_wires1-target_wires1]",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_raises_warning_with_overlap[control_wires2-target_wires2]",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_raises_warning_with_overlap[control_wires3-target_wires3]"
] | [
"tests/drawer/test_mpldrawer.py::TestInitialization::test_figsize_wires[2-2]",
"tests/drawer/test_mpldrawer.py::TestInitialization::test_figsize_wires[2-3]",
"tests/drawer/test_mpldrawer.py::TestInitialization::test_figsize_wires[3-2]",
"tests/drawer/test_mpldrawer.py::TestInitialization::test_figsize_wires[3-3]",
"tests/drawer/test_mpldrawer.py::TestInitialization::test_customfigsize",
"tests/drawer/test_mpldrawer.py::TestInitialization::test_config_params_set",
"tests/drawer/test_mpldrawer.py::TestInitialization::test_wires_formatting",
"tests/drawer/test_mpldrawer.py::TestInitialization::test_fontsize",
"tests/drawer/test_mpldrawer.py::TestLabels::test_labels",
"tests/drawer/test_mpldrawer.py::TestLabels::test_labels_formatting",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_simple_box",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_multiwire_box",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_notch_standard_styling",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_active_wire_notches_number[wires0-0]",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_active_wire_notches_number[wires1-0]",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_active_wire_notches_number[wires2-4]",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_active_wire_notches_number[wires3-6]",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_no_active_wire_notches",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_extra_width",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_box_formatting",
"tests/drawer/test_mpldrawer.py::TestBoxGate::test_text_formatting",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_no_target",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_multi_wires",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_on_zero",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_control_values_error",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_formatting",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_circ",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrlo_circ",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_target",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_no_warning_without_overlap[control_wires0-target_wires0]",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_ctrl_no_warning_without_overlap[control_wires1-target_wires1]",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_target_x",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_target_x_color",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_CNOT",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_CNOT_control_values",
"tests/drawer/test_mpldrawer.py::TestCTRL::test_CNOT_color",
"tests/drawer/test_mpldrawer.py::TestSWAP::test_swap_x",
"tests/drawer/test_mpldrawer.py::TestSWAP::test_SWAP",
"tests/drawer/test_mpldrawer.py::TestSWAP::test_SWAP_options",
"tests/drawer/test_mpldrawer.py::TestMeasure::test_measure",
"tests/drawer/test_mpldrawer.py::TestMeasure::test_measure_formatted",
"tests/drawer/test_mpldrawer.py::TestAutosize::test_autosize_false",
"tests/drawer/test_mpldrawer.py::TestAutosize::test_autosize_one_wire",
"tests/drawer/test_mpldrawer.py::TestAutosize::test_autosize_multiwires",
"tests/drawer/test_mpldrawer.py::TestAutosize::test_multiline_text_single_wire",
"tests/drawer/test_mpldrawer.py::TestAutosize::test_wide_multline_text_multiwires",
"tests/drawer/test_mpldrawer.py::TestCond::test_cond_basic",
"tests/drawer/test_mpldrawer.py::TestCond::test_cond_two_ctrl_wires",
"tests/drawer/test_mpldrawer.py::TestCond::test_cond_two_ctrl_wires_upward",
"tests/drawer/test_mpldrawer.py::TestCond::test_cond_fail_with_bad_order[ctrl_wires0-target_wires0]",
"tests/drawer/test_mpldrawer.py::TestCond::test_cond_fail_with_bad_order[ctrl_wires1-target_wires1]",
"tests/drawer/test_mpldrawer.py::TestCond::test_cond_fail_with_bad_order[ctrl_wires2-target_wires2]",
"tests/drawer/test_mpldrawer.py::TestCond::test_cond_fail_with_bad_order[ctrl_wires3-target_wires3]"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2023-06-23T14:38:34Z" | apache-2.0 |
|
PermutaTriangle__Tilings-469 | diff --git a/tilings/strategies/factor.py b/tilings/strategies/factor.py
index 694cc83..1b13955 100644
--- a/tilings/strategies/factor.py
+++ b/tilings/strategies/factor.py
@@ -260,6 +260,22 @@ class FactorWithInterleavingStrategy(FactorStrategy):
self.tracked = tracked
self.cols, self.rows = self.interleaving_rows_and_cols(self.partition)
+ def to_jsonable(self) -> dict:
+ d = super().to_jsonable()
+ d["tracked"] = self.tracked
+ return d
+
+ def __repr__(self) -> str:
+ args = ", ".join(
+ [
+ f"partition={self.partition}",
+ f"ignore_parent={self.ignore_parent}",
+ f"workable={self.workable}",
+ f"tracked={self.tracked}",
+ ]
+ )
+ return f"{self.__class__.__name__}({args})"
+
def is_two_way(self, comb_class: Tiling) -> bool: # type: ignore
return self.is_reversible(comb_class)
| PermutaTriangle/Tilings | 244371dc498344f859942b160ca99bbba24b884f | diff --git a/tests/strategies/test_encoding.py b/tests/strategies/test_encoding.py
index b5584ed..d46b725 100644
--- a/tests/strategies/test_encoding.py
+++ b/tests/strategies/test_encoding.py
@@ -244,6 +244,26 @@ def partition_ignoreparent_workable(strategy):
]
+def partition_ignoreparent_workable_tracked(strategy):
+ return [
+ strategy(
+ partition=partition,
+ ignore_parent=ignore_parent,
+ workable=workable,
+ tracked=tracked,
+ )
+ for partition, ignore_parent, workable, tracked in product(
+ (
+ [[(2, 1), (0, 1)], [(1, 0)]],
+ (((0, 0), (0, 2)), ((0, 1),), ((3, 3), (4, 3))),
+ ),
+ (True, False),
+ (True, False),
+ (True, False),
+ )
+ ]
+
+
def gps_ignoreparent(strategy):
return [
strategy(gps=gps, ignore_parent=ignore_parent)
@@ -376,8 +396,8 @@ strategy_objects = (
+ subreqs_partial_ignoreparent_dirs(RequirementPlacementFactory)
+ [SymmetriesFactory(), BasicVerificationStrategy(), EmptyCellInferralFactory()]
+ partition_ignoreparent_workable(FactorStrategy)
- + partition_ignoreparent_workable(FactorWithInterleavingStrategy)
- + partition_ignoreparent_workable(FactorWithMonotoneInterleavingStrategy)
+ + partition_ignoreparent_workable_tracked(FactorWithInterleavingStrategy)
+ + partition_ignoreparent_workable_tracked(FactorWithMonotoneInterleavingStrategy)
+ ignoreparent(DatabaseVerificationStrategy)
+ ignoreparent(LocallyFactorableVerificationStrategy)
+ ignoreparent(ElementaryVerificationStrategy)
| AssertionError: rule not in the spec and not empty
```
AssertionError: rule not in the spec and not empty
+-+-+
|1|\|
+-+-+
|\| |
+-+-+
1: Av(0123)
\: Av(01)
\: Av+(01)
Crossing obstructions:
0123: (0, 0), (0, 1), (0, 1), (0, 1)
0123: (0, 0), (0, 1), (0, 1), (1, 1)
0123: (0, 1), (0, 1), (0, 1), (1, 1)
Requirement 0:
0: (1, 1)
Assumption 0:
can count points in cell (0, 0)
```
To recreate, on `insert-factorable-reqs` branch:
```python
import requests
from comb_spec_searcher import CombinatorialSpecification
from tilings.tilescope import TileScopePack
uri = "https://api.permpal.com/garpur_run/627c8c4a4afd988318e59567"
data = requests.get(uri).json()
pack = TileScopePack.from_dict(data["pack"])
spec = CombinatorialSpecification.from_dict(data["specification"])
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/strategies/test_encoding.py::test_json_encoding[strategy147]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy149]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy151]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy153]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy155]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy157]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy159]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy161]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy163]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy165]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy167]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy169]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy171]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy173]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy175]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy177]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy147]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy149]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy151]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy153]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy155]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy157]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy159]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy161]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy163]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy165]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy167]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy169]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy171]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy173]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy175]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy177]"
] | [
"tests/strategies/test_encoding.py::test_json_encoding[strategy0]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy1]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy2]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy3]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy4]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy5]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy6]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy7]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy8]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy9]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy10]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy11]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy12]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy13]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy14]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy15]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy16]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy17]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy18]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy19]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy20]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy21]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy22]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy23]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy24]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy25]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy26]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy27]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy28]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy29]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy30]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy31]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy32]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy33]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy34]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy35]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy36]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy37]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy38]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy39]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy40]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy41]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy42]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy43]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy44]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy45]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy46]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy47]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy48]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy49]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy50]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy51]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy52]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy53]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy54]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy55]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy56]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy57]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy58]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy59]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy60]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy61]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy62]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy63]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy64]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy65]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy66]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy67]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy68]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy69]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy70]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy71]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy72]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy73]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy74]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy75]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy76]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy77]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy78]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy79]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy80]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy81]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy82]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy83]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy84]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy85]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy86]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy87]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy88]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy89]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy90]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy91]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy92]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy93]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy94]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy95]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy96]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy97]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy98]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy99]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy100]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy101]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy102]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy103]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy104]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy105]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy106]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy107]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy108]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy109]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy110]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy111]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy112]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy113]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy114]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy115]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy116]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy117]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy118]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy119]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy120]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy121]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy122]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy123]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy124]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy125]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy126]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy127]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy128]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy129]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy130]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy131]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy132]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy133]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy134]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy135]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy136]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy137]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy138]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy139]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy140]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy141]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy142]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy143]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy144]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy145]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy146]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy148]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy150]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy152]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy154]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy156]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy158]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy160]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy162]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy164]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy166]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy168]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy170]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy172]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy174]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy176]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy178]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy179]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy180]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy181]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy182]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy183]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy184]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy185]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy186]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy187]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy188]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy189]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy190]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy191]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy192]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy193]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy194]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy195]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy196]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy197]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy198]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy199]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy200]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy201]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy202]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy203]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy204]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy205]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy206]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy207]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy208]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy209]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy210]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy211]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy212]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy213]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy214]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy215]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy216]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy217]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy218]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy219]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy220]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy221]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy222]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy223]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy224]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy225]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy226]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy227]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy228]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy229]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy230]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy231]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy232]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy233]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy234]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy235]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy236]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy237]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy238]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy239]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy240]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy241]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy242]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy243]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy244]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy245]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy246]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy247]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy248]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy249]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy250]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy251]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy252]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy253]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy254]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy255]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy256]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy257]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy258]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy259]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy260]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy261]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy262]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy263]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy264]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy265]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy266]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy267]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy268]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy269]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy270]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy271]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy272]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy273]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy274]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy275]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy276]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy277]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy278]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy279]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy280]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy281]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy282]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy283]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy284]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy285]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy286]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy287]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy288]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy289]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy290]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy291]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy292]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy293]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy294]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy295]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy296]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy297]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy298]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy299]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy300]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy301]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy302]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy303]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy304]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy305]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy306]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy307]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy308]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy309]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy310]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy311]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy312]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy313]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy314]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy315]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy316]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy317]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy318]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy319]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy320]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy321]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy322]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy323]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy324]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy325]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy326]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy327]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy328]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy329]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy330]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy331]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy332]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy333]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy334]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy335]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy336]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy337]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy338]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy339]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy340]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy341]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy342]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy343]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy344]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy345]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy346]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy347]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy348]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy349]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy350]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy351]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy352]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy353]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy354]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy355]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy356]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy357]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy358]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy359]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy360]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy361]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy362]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy363]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy364]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy365]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy366]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy367]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy368]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy369]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy370]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy371]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy372]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy373]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy374]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy375]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy376]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy377]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy378]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy379]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy380]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy381]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy382]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy383]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy384]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy385]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy386]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy387]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy388]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy389]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy390]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy391]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy392]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy393]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy394]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy395]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy396]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy397]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy398]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy399]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy400]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy401]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy402]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy403]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy404]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy405]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy406]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy407]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy408]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy409]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy410]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy411]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy412]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy413]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy414]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy415]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy416]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy417]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy418]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy419]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy420]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy421]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy422]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy423]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy424]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy425]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy426]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy427]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy428]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy429]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy430]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy431]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy432]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy433]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy434]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy435]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy436]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy437]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy438]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy439]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy440]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy441]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy442]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy443]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy444]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy445]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy446]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy447]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy448]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy449]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy450]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy451]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy452]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy453]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy454]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy455]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy456]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy457]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy458]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy459]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy460]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy461]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy462]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy463]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy464]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy465]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy466]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy467]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy468]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy469]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy470]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy471]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy472]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy473]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy474]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy475]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy476]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy477]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy478]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy479]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy480]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy481]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy482]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy483]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy484]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy485]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy486]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy487]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy488]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy489]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy490]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy491]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy492]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy493]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy494]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy495]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy496]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy497]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy498]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy499]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy500]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy501]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy502]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy503]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy504]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy505]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy506]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy507]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy508]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy509]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy510]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy511]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy512]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy513]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy514]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy515]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy516]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy517]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy518]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy519]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy520]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy521]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy522]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy523]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy524]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy525]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy526]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy527]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy528]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy529]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy530]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy531]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy532]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy533]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy534]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy535]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy536]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy537]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy538]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy539]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy540]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy541]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy542]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy543]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy544]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy545]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy546]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy547]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy548]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy549]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy550]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy551]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy552]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy553]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy554]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy555]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy556]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy557]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy558]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy559]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy560]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy561]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy562]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy563]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy564]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy565]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy566]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy567]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy568]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy569]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy570]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy571]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy572]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy573]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy574]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy575]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy576]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy577]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy578]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy579]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy580]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy581]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy582]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy583]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy584]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy585]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy586]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy587]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy588]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy589]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy590]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy591]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy592]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy593]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy594]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy595]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy596]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy597]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy598]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy599]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy600]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy601]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy602]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy603]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy604]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy605]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy606]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy607]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy608]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy609]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy610]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy611]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy612]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy613]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy614]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy615]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy616]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy617]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy618]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy619]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy620]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy621]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy622]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy623]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy624]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy625]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy626]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy627]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy628]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy629]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy630]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy631]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy632]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy633]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy634]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy635]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy636]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy637]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy638]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy639]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy640]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy641]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy642]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy643]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy644]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy645]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy646]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy647]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy648]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy649]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy650]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy651]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy652]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy653]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy654]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy655]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy656]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy657]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy658]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy659]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy660]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy661]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy662]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy663]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy664]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy665]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy666]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy667]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy668]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy669]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy670]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy671]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy672]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy673]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy674]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy675]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy676]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy677]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy678]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy679]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy680]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy681]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy682]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy683]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy684]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy685]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy686]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy687]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy688]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy689]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy690]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy691]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy692]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy693]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy694]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy695]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy696]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy697]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy698]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy699]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy700]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy701]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy702]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy703]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy704]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy705]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy706]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy707]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy708]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy709]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy710]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy711]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy712]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy713]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy714]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy715]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy716]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy717]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy718]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy719]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy720]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy721]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy722]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy723]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy724]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy725]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy726]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy727]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy728]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy729]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy730]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy731]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy732]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy733]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy734]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy735]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy736]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy737]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy738]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy739]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy740]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy741]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy742]",
"tests/strategies/test_encoding.py::test_json_encoding[strategy743]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy0]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy1]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy2]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy3]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy4]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy5]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy6]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy7]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy8]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy9]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy10]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy11]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy12]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy13]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy14]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy15]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy16]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy17]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy18]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy19]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy20]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy21]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy22]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy23]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy24]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy25]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy26]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy27]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy28]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy29]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy30]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy31]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy32]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy33]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy34]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy35]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy36]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy37]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy38]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy39]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy40]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy41]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy42]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy43]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy44]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy45]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy46]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy47]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy48]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy49]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy50]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy51]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy52]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy53]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy54]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy55]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy56]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy57]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy58]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy59]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy60]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy61]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy62]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy63]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy64]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy65]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy66]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy67]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy68]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy69]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy70]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy71]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy72]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy73]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy74]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy75]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy76]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy77]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy78]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy79]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy80]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy81]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy82]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy83]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy84]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy85]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy86]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy87]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy88]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy89]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy90]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy91]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy92]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy93]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy94]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy95]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy96]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy97]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy98]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy99]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy100]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy101]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy102]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy103]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy104]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy105]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy106]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy107]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy108]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy109]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy110]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy111]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy112]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy113]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy114]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy115]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy116]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy117]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy118]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy119]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy120]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy121]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy122]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy123]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy124]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy125]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy126]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy127]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy128]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy129]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy130]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy131]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy132]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy133]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy134]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy135]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy136]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy137]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy138]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy139]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy140]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy141]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy142]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy143]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy144]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy145]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy146]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy148]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy150]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy152]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy154]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy156]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy158]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy160]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy162]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy164]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy166]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy168]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy170]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy172]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy174]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy176]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy178]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy179]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy180]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy181]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy182]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy183]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy184]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy185]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy186]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy187]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy188]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy189]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy190]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy191]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy192]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy193]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy194]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy195]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy196]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy197]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy198]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy199]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy200]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy201]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy202]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy203]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy204]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy205]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy206]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy207]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy208]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy209]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy210]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy211]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy212]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy213]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy214]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy215]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy216]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy217]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy218]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy219]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy220]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy221]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy222]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy223]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy224]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy225]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy226]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy227]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy228]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy229]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy230]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy231]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy232]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy233]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy234]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy235]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy236]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy237]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy238]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy239]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy240]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy241]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy242]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy243]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy244]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy245]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy246]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy247]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy248]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy249]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy250]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy251]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy252]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy253]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy254]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy255]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy256]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy257]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy258]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy259]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy260]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy261]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy262]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy263]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy264]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy265]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy266]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy267]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy268]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy269]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy270]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy271]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy272]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy273]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy274]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy275]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy276]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy277]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy278]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy279]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy280]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy281]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy282]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy283]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy284]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy285]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy286]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy287]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy288]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy289]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy290]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy291]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy292]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy293]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy294]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy295]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy296]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy297]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy298]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy299]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy300]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy301]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy302]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy303]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy304]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy305]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy306]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy307]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy308]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy309]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy310]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy311]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy312]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy313]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy314]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy315]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy316]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy317]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy318]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy319]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy320]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy321]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy322]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy323]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy324]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy325]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy326]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy327]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy328]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy329]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy330]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy331]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy332]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy333]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy334]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy335]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy336]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy337]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy338]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy339]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy340]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy341]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy342]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy343]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy344]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy345]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy346]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy347]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy348]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy349]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy350]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy351]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy352]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy353]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy354]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy355]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy356]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy357]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy358]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy359]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy360]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy361]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy362]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy363]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy364]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy365]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy366]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy367]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy368]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy369]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy370]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy371]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy372]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy373]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy374]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy375]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy376]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy377]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy378]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy379]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy380]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy381]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy382]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy383]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy384]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy385]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy386]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy387]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy388]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy389]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy390]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy391]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy392]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy393]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy394]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy395]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy396]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy397]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy398]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy399]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy400]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy401]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy402]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy403]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy404]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy405]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy406]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy407]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy408]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy409]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy410]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy411]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy412]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy413]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy414]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy415]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy416]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy417]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy418]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy419]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy420]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy421]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy422]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy423]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy424]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy425]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy426]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy427]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy428]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy429]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy430]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy431]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy432]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy433]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy434]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy435]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy436]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy437]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy438]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy439]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy440]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy441]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy442]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy443]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy444]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy445]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy446]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy447]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy448]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy449]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy450]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy451]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy452]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy453]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy454]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy455]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy456]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy457]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy458]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy459]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy460]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy461]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy462]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy463]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy464]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy465]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy466]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy467]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy468]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy469]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy470]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy471]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy472]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy473]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy474]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy475]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy476]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy477]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy478]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy479]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy480]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy481]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy482]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy483]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy484]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy485]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy486]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy487]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy488]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy489]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy490]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy491]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy492]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy493]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy494]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy495]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy496]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy497]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy498]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy499]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy500]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy501]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy502]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy503]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy504]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy505]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy506]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy507]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy508]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy509]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy510]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy511]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy512]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy513]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy514]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy515]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy516]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy517]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy518]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy519]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy520]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy521]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy522]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy523]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy524]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy525]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy526]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy527]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy528]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy529]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy530]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy531]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy532]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy533]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy534]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy535]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy536]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy537]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy538]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy539]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy540]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy541]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy542]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy543]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy544]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy545]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy546]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy547]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy548]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy549]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy550]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy551]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy552]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy553]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy554]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy555]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy556]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy557]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy558]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy559]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy560]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy561]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy562]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy563]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy564]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy565]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy566]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy567]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy568]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy569]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy570]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy571]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy572]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy573]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy574]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy575]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy576]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy577]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy578]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy579]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy580]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy581]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy582]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy583]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy584]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy585]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy586]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy587]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy588]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy589]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy590]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy591]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy592]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy593]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy594]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy595]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy596]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy597]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy598]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy599]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy600]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy601]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy602]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy603]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy604]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy605]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy606]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy607]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy608]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy609]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy610]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy611]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy612]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy613]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy614]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy615]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy616]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy617]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy618]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy619]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy620]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy621]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy622]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy623]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy624]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy625]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy626]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy627]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy628]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy629]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy630]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy631]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy632]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy633]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy634]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy635]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy636]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy637]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy638]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy639]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy640]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy641]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy642]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy643]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy644]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy645]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy646]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy647]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy648]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy649]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy650]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy651]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy652]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy653]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy654]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy655]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy656]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy657]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy658]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy659]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy660]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy661]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy662]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy663]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy664]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy665]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy666]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy667]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy668]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy669]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy670]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy671]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy672]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy673]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy674]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy675]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy676]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy677]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy678]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy679]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy680]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy681]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy682]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy683]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy684]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy685]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy686]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy687]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy688]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy689]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy690]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy691]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy692]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy693]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy694]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy695]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy696]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy697]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy698]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy699]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy700]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy701]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy702]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy703]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy704]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy705]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy706]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy707]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy708]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy709]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy710]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy711]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy712]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy713]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy714]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy715]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy716]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy717]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy718]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy719]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy720]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy721]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy722]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy723]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy724]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy725]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy726]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy727]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy728]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy729]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy730]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy731]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy732]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy733]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy734]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy735]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy736]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy737]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy738]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy739]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy740]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy741]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy742]",
"tests/strategies/test_encoding.py::test_repr_encoding[strategy743]",
"tests/strategies/test_encoding.py::test_old_json_compatibility[strat_dict0]"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-05-12T15:41:30Z" | bsd-3-clause |
|
Picterra__picterra-python-61 | diff --git a/src/picterra/__main__.py b/src/picterra/__main__.py
index 1dc840f..c2004a2 100644
--- a/src/picterra/__main__.py
+++ b/src/picterra/__main__.py
@@ -262,4 +262,5 @@ def main():
exit("\033[91m%s\033[00m" % e)
-main()
+if __name__ == '__main__':
+ main()
diff --git a/src/picterra/nongeo.py b/src/picterra/nongeo.py
index bd2dab5..1e368ef 100644
--- a/src/picterra/nongeo.py
+++ b/src/picterra/nongeo.py
@@ -40,6 +40,33 @@ def _nongeo_latlng2xy(lat_deg, lng_deg):
return x, y
+def _load_polygons(geojson):
+ """
+ Loads polygons from a geojson file; should work for both MultiPolygon and
+ FeatureCollection of Polygons
+ """
+ polygons = []
+ if geojson['type'] == 'MultiPolygon':
+ for polygon in geojson['coordinates']:
+ polygons.append(polygon)
+ elif geojson['type'] == 'Polygon':
+ polygons = [geojson['coordinates']]
+ elif geojson['type'] == 'FeatureCollection':
+ for feature in geojson['features']:
+ geom = feature['geometry']
+ polygons.extend(_load_polygons(geom))
+ return polygons
+
+
+def _polygon_to_xy(polygon):
+ xy_polygon = []
+ for ring in polygon:
+ xy_polygon.append([
+ _nongeo_latlng2xy(lat, lng) for lng, lat in ring
+ ])
+ return xy_polygon
+
+
def nongeo_result_to_pixel(result_filename):
"""
This is a helper function to convert result obtained on non-georeferenced
@@ -65,15 +92,7 @@ def nongeo_result_to_pixel(result_filename):
]
"""
with open(result_filename) as f:
- multipolygon = json.load(f)
-
- polygons = []
- for polygon in multipolygon['coordinates']:
- xy_polygon = []
- for ring in polygon:
- xy_polygon.append([
- _nongeo_latlng2xy(lat, lng) for lng, lat in ring
- ])
- polygons.append(xy_polygon)
-
+ geojson = json.load(f)
+ polygons = _load_polygons(geojson)
+ polygons = [_polygon_to_xy(p) for p in polygons]
return polygons
| Picterra/picterra-python | c8199141b11c9bb2b74ffa09e923c1c371f44e0f | diff --git a/tests/test_nongeo.py b/tests/test_nongeo.py
index a8af5e8..9dc5ba0 100644
--- a/tests/test_nongeo.py
+++ b/tests/test_nongeo.py
@@ -1,6 +1,6 @@
import pytest
import tempfile
-from picterra.nongeo import _nongeo_latlng2xy
+from picterra.nongeo import _nongeo_latlng2xy, _load_polygons
from picterra import nongeo_result_to_pixel
@@ -52,3 +52,99 @@ def test_nongeo_result_to_pixel():
assert tuple(map(round, polygons[0][0][2])) == (1520, 0)
assert tuple(map(round, polygons[0][0][3])) == (0, 0)
assert tuple(map(round, polygons[0][0][4])) == (0, 1086)
+
+
+def test_load_polygons_multipoly():
+ geojson = {
+ "type": "MultiPolygon",
+ "coordinates": [
+ [
+ [
+ [0.000000096, -0.000975470],
+ [0.00136530, -0.00097539],
+ [0.001365320, 0.000000129],
+ [0.000000034, -0.000000034],
+ [0.000000096, -0.000975470]
+ ]
+ ]
+ ]
+ }
+ polygons = _load_polygons(geojson)
+ assert len(polygons) == 1
+ assert len(polygons[0][0]) == 5
+ assert polygons[0][0][2][1] == 0.000000129
+
+
+def test_load_polygons_polygon():
+ geojson = {
+ "type": "Polygon",
+ "coordinates": [
+ [
+ [0.000000096, -0.000975470],
+ [0.00136530, -0.00097539],
+ [0.001365320, 0.000000129],
+ [0.000000034, -0.000000034],
+ [0.000000096, -0.000975470]
+ ]
+ ]
+ }
+ polygons = _load_polygons(geojson)
+ assert len(polygons) == 1
+ assert len(polygons[0][0]) == 5
+ assert polygons[0][0][2][1] == 0.000000129
+
+
+def test_load_polygons_fc():
+ geojson = {
+ "type": "FeatureCollection",
+ "features": [
+ {
+ "type": "Feature",
+ "properties": {},
+ "geometry": {
+ "type": "Polygon",
+ "coordinates": [
+ [
+ [0.000000096, -0.000975470],
+ [0.00136530, -0.00097539],
+ [0.001365320, 0.000000129],
+ [0.000000034, -0.000000034],
+ [0.000000096, -0.000975470]
+ ]
+ ]
+ }
+ },
+ {
+ "type": "Feature",
+ "properties": {},
+ "geometry": {
+ "type": "MultiPolygon",
+ "coordinates": [
+ [
+ [
+ [0.000000096, -0.000975470],
+ [0.00136530, -0.00097539],
+ [0.001365320, 0.000000129],
+ [0.000000034, -0.000000034],
+ [0.000000096, -0.000975470]
+ ]
+ ],
+ [
+ [
+ [0.100000096, -0.100975470],
+ [0.10136530, -0.10097539],
+ [0.101365320, 0.100000129],
+ [0.100000034, -0.100000034],
+ [0.100000096, -0.100975470]
+ ]
+ ]
+ ]
+ }
+ }
+ ]
+ }
+ polygons = _load_polygons(geojson)
+ assert len(polygons) == 3
+ assert len(polygons[0][0]) == 5
+ assert polygons[0][0][2][1] == 0.000000129
+ assert polygons[2][0][2][1] == 0.100000129
| Fix picterra.nongeo_result_to_pixel to work with list of Polygons
Currently, `picterra.nongeo_result_to_pixel` only works with MultiPolygon (e.g. obtained through the API).
It doesn't work with a list of Polygon such as what you get when exporting from the UI.
We should fix this by making it work with both when loading the data. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_nongeo.py::test_nongeo_latlng2xy[latlng0-xy0]",
"tests/test_nongeo.py::test_nongeo_latlng2xy[latlng1-xy1]",
"tests/test_nongeo.py::test_nongeo_latlng2xy[latlng2-xy2]",
"tests/test_nongeo.py::test_nongeo_latlng2xy[latlng3-xy3]",
"tests/test_nongeo.py::test_nongeo_result_to_pixel",
"tests/test_nongeo.py::test_load_polygons_multipoly",
"tests/test_nongeo.py::test_load_polygons_polygon",
"tests/test_nongeo.py::test_load_polygons_fc"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-01-08T19:12:42Z" | mit |
|
PlasmaControl__DESC-613 | diff --git a/desc/random.py b/desc/random.py
new file mode 100644
index 000000000..e40213d43
--- /dev/null
+++ b/desc/random.py
@@ -0,0 +1,198 @@
+"""Utilities for creating random surfaces and profiles."""
+
+import numpy as np
+import scipy.optimize
+import scipy.stats
+from numpy.random import default_rng
+
+from desc.backend import jnp, sign
+from desc.basis import DoubleFourierSeries
+from desc.derivatives import Derivative
+from desc.geometry import FourierRZToroidalSurface
+from desc.profiles import PowerSeriesProfile
+from desc.utils import setdefault
+
+
+def random_surface(
+ M=8,
+ N=8,
+ R0=(1, 10),
+ R_scale=(0.5, 2),
+ Z_scale=(0.5, 2),
+ NFP=(1, 10),
+ sym=None,
+ alpha=(1, 4),
+ beta=(1, 4),
+ rng=None,
+):
+ """Create a "random" toroidal surface.
+
+ Uses a double Fourier series representation with random coefficients.
+ The coefficients are given by
+
+ X_mn = X_scale * X_norm * N(1, exp(-beta))
+
+ Where N(m,s) is a normal random variable on with mean m and stdev s, and
+ X_norm = exp(-alpha*(|m| + |n|)) / exp(-alpha)
+
+
+ Parameters
+ ----------
+ M, N : int
+ Poloidal and toroidal resolution of the double Fourier series.
+ R0 : float or tuple
+ Major radius. If a tuple, treats as min/max for random value.
+ R_scale, Z_scale : float or tuple
+ Scale factors for R and Z coordinates. If a tuple, treats as min/max for random
+ values. The aspect ratio of the surface will be approximately
+ R0/sqrt(R_scale*Z_scale)
+ NFP : int or tuple
+ Number of field periods. If a tuple, treats as min/max for random int
+ sym : bool or None
+ Whether the surface should be stellarator symmetric. If None, selects randomly.
+ alpha : int or tuple
+ Spectral decay factor. Larger values of alpha will tend to create simpler
+ surfaces. If a tuple, treats as min/max for random int.
+ beta : int or tuple
+ Relative standard deviation for spectral coefficients. Larger values of beta
+ will tend to create simpler surfaces. If a tuple, treats as min/max for
+ random int.
+ rng : numpy.random.Generator
+ Random number generator. If None, uses numpy's default_rng
+
+ Returns
+ -------
+ surf : FourierRZToroidalSurface
+ Random toroidal surface.
+ """
+ rng = setdefault(rng, default_rng())
+ sym = setdefault(sym, rng.choice([True, False]))
+ if isinstance(alpha, tuple):
+ alpha = rng.integers(alpha[0], alpha[1] + 1)
+ if isinstance(beta, tuple):
+ beta = rng.integers(beta[0], beta[1] + 1)
+ if isinstance(NFP, tuple):
+ NFP = rng.integers(NFP[0], NFP[1] + 1)
+ if isinstance(R_scale, tuple):
+ R_scale = (R_scale[1] - R_scale[0]) * rng.random() + R_scale[0]
+ if isinstance(Z_scale, tuple):
+ Z_scale = (Z_scale[1] - Z_scale[0]) * rng.random() + Z_scale[0]
+ if isinstance(R0, tuple):
+ R0 = (R0[1] - R0[0]) * rng.random() + R0[0]
+
+ R_basis = DoubleFourierSeries(M=M, N=N, NFP=NFP, sym="cos" if sym else False)
+ Z_basis = DoubleFourierSeries(M=M, N=N, NFP=NFP, sym="sin" if sym else False)
+ # alpha determines how quickly amplitude decays for high M, N,
+ # normalized so that X_norm=1 for m=1
+ R_norm = np.exp(-alpha * np.sum(abs(R_basis.modes), axis=-1)) / np.exp(-alpha)
+ Z_norm = np.exp(-alpha * np.sum(abs(Z_basis.modes), axis=-1)) / np.exp(-alpha)
+
+ R_mn = R_norm * scipy.stats.truncnorm.rvs(
+ loc=1, scale=np.exp(-beta), size=R_basis.num_modes, a=-2, b=2, random_state=rng
+ )
+ Z_mn = Z_norm * scipy.stats.truncnorm.rvs(
+ loc=1, scale=np.exp(-beta), size=Z_basis.num_modes, a=-2, b=2, random_state=rng
+ )
+
+ # scale to approximate aspect ratio
+ R_scale1 = np.mean(
+ abs(R_mn)[(abs(R_basis.modes[:, 1]) == 1) & (abs(R_basis.modes[:, 2]) == 0)]
+ )
+ Z_scale1 = np.mean(
+ abs(Z_mn)[(abs(Z_basis.modes[:, 1]) == 1) & (abs(Z_basis.modes[:, 2]) == 0)]
+ )
+ R_mn *= R_scale / R_scale1
+ Z_mn *= Z_scale / Z_scale1
+ R_mn[R_basis.get_idx(0, 0, 0)] = R0
+ if not sym:
+ Z_mn[Z_basis.get_idx(0, 0, 0)] = 0 # center at Z=0
+ # flip sign and reduce magnitude of non-symmetric modes to avoid degenerate
+ # cases with no volume. kind of ad-hoc but seems to produce reasonable results
+ R_mn[sign(R_basis.modes[:, 1]) != sign(R_basis.modes[:, 2])] *= -np.exp(-beta)
+ Z_mn[sign(Z_basis.modes[:, 1]) == sign(Z_basis.modes[:, 2])] *= -np.exp(-beta)
+
+ surf = FourierRZToroidalSurface(
+ R_mn,
+ Z_mn,
+ R_basis.modes[:, 1:],
+ Z_basis.modes[:, 1:],
+ NFP=NFP,
+ sym=sym,
+ check_orientation=False,
+ )
+ # we do this manually just to avoid the warning when creating with left handed
+ # coordinates
+ if surf._compute_orientation() == -1:
+ surf._flip_orientation()
+ assert surf._compute_orientation() == 1
+ return surf
+
+
+def random_pressure(L=8, p0=(1e3, 1e4), rng=None):
+ """Create a random monotonic pressure profile.
+
+ Profile will be a PowerSeriesProfile with even symmetry,
+ enforced to be monotonically decreasing from p0 at r=0 to 0 at r=1
+
+ Could also be used for other monotonically decreasing profiles
+ such as temperature or density.
+
+ Parameters
+ ----------
+ L : int
+ Order of polynomial.
+ p0 : float or tuple
+ Pressure on axis. If a tuple, treats as min/max for random value.
+ rng : numpy.random.Generator
+ Random number generator. If None, uses numpy's default_rng
+
+ Returns
+ -------
+ pressure : PowerSeriesProfile
+ Random pressure profile.
+ """
+ assert (L // 2) == (L / 2), "L should be even"
+ rng = setdefault(rng, default_rng())
+ if isinstance(p0, tuple):
+ p0 = rng.uniform(p0[0], p0[1])
+
+ # first create random even coeffs
+ p = 1 - 2 * rng.random(L // 2 + 1)
+ # make it sum to 0 -> p=0 at r=1
+ p[0] -= p.sum()
+ # make p(0) = 1
+ p = p / p[0]
+ # this inserts zeros for all the odd modes
+ p1 = jnp.vstack([p, jnp.zeros_like(p)]).flatten(order="F")[::-1]
+ r = jnp.linspace(0, 1, 40)
+ y = jnp.polyval(p1, r)
+
+ def fun(x):
+ x = jnp.vstack([x, jnp.zeros_like(x)]).flatten(order="F")[::-1]
+ y_ = jnp.polyval(x, r)
+ return jnp.sum((y - y_) ** 2)
+
+ # constrain it so that it is monotonically decreasing, goes through (0,1) and (1,0)
+ def con(x):
+ x = jnp.vstack([x, jnp.zeros_like(x)]).flatten(order="F")[::-1]
+ dx = jnp.polyder(x, 1)
+ dy = jnp.polyval(dx, r)
+ return jnp.concatenate([dy, jnp.atleast_1d(jnp.sum(x)), jnp.atleast_1d(x[-1])])
+
+ hess = Derivative(fun, mode="hess")
+ grad = Derivative(fun, mode="grad")
+ A = Derivative(con, mode="fwd")(0 * p)
+ l = np.concatenate([-np.inf * np.ones_like(r), jnp.array([0, 1])])
+ u = np.concatenate([np.zeros_like(r), jnp.array([0, 1])])
+
+ out = scipy.optimize.minimize(
+ fun,
+ p,
+ jac=grad,
+ hess=hess,
+ constraints=scipy.optimize.LinearConstraint(A, l, u),
+ method="trust-constr",
+ )
+
+ p = np.vstack([out.x, np.zeros_like(out.x)]).flatten(order="F")
+ return PowerSeriesProfile(p[::2] * p0, modes=np.arange(L + 1)[::2], sym=True)
| PlasmaControl/DESC | 61797b69e6991b0284e76e0e493d9240edc779e0 | diff --git a/tests/test_random.py b/tests/test_random.py
new file mode 100644
index 000000000..c2390a275
--- /dev/null
+++ b/tests/test_random.py
@@ -0,0 +1,81 @@
+"""Tests for random surfaces, profiles etc."""
+import numpy as np
+import pytest
+
+from desc.equilibrium import Equilibrium
+from desc.grid import LinearGrid
+from desc.random import random_pressure, random_surface
+
+
[email protected]
+def test_random_pressure():
+ """Test that randomly generated profile is monotonic, has correct scaling etc."""
+ rng = np.random.default_rng(0)
+ p = random_pressure(L=8, p0=(1e3, 1e4), rng=rng)
+ assert p.basis.sym == "even"
+ assert 1e3 <= p(np.array([0.0])) <= 1e4
+ assert p.basis.L == 8 # symmetric, so should be 4 params up to order 8
+ dp = p(np.linspace(0, 1, 10), dr=1)
+ assert np.all(dp <= 0) # can't use array_less because that doesn't do <=
+
+
[email protected]
+def test_random_surface():
+ """Test that randomly generated surface is "sensible"."""
+ rng = np.random.default_rng(0)
+ surf = random_surface(
+ M=4,
+ N=4,
+ R0=(5, 10),
+ R_scale=(0.5, 2),
+ Z_scale=(0.5, 2),
+ NFP=(1, 3),
+ sym=True,
+ alpha=(1, 4),
+ beta=(1, 4),
+ rng=rng,
+ )
+ assert surf.sym
+ assert 1 <= surf.NFP <= 3
+ assert surf.M == 4
+ assert surf.N == 4
+ assert surf._compute_orientation() == 1
+
+ eq = Equilibrium(surface=surf)
+ R0 = eq.compute("R0")["R0"]
+ assert 5 <= R0 <= 10
+ AR = eq.compute("R0/a")["R0/a"]
+ # should be ~ R0/sqrt(R_scale*Z_scale), allowing for random variation
+ assert 2.5 <= AR <= 20
+ assert eq.is_nested()
+
+ # same stuff for non-symmetric
+ rng = np.random.default_rng(0)
+ surf = random_surface(
+ M=4,
+ N=4,
+ R0=(5, 10),
+ R_scale=(0.5, 2),
+ Z_scale=(0.5, 2),
+ NFP=(1, 3),
+ sym=False,
+ alpha=(1, 4),
+ beta=(1, 4),
+ rng=rng,
+ )
+ assert not surf.sym
+ assert 1 <= surf.NFP <= 3
+ assert surf.M == 4
+ assert surf.N == 4
+ assert surf._compute_orientation() == 1
+
+ eq = Equilibrium(surface=surf)
+ R0 = eq.compute("R0")["R0"]
+ assert 5 <= R0 <= 10
+ Z0 = eq.compute("Z", grid=LinearGrid(rho=np.array([0]), M=0, N=8, NFP=eq.NFP))["Z"]
+ # should be centered around Z=0
+ np.testing.assert_allclose(np.mean(Z0), 0, atol=1e-14)
+ AR = eq.compute("R0/a")["R0/a"]
+ # should be ~ R0/sqrt(R_scale*Z_scale), allowing for random variation
+ assert 2.5 <= AR <= 20
+ assert eq.is_nested()
| Add utilites to create "random" equilibria
For database studies, testing, examples etc, it would be nice to be able to generate some "random" equilibria that satisfy certain basic criteria, such as major/minor radius, field strength, beta, etc.
- [ ] Random surface with approximate shape that ideally doesn't self intersect
- [ ] Random monotonic profiles
- [ ] Random "smooth" non-monotonic profiles | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_random.py::test_random_pressure",
"tests/test_random.py::test_random_surface"
] | [] | {
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2023-08-07T18:21:19Z" | mit |
|
PlasmaPy__PlasmaPy-138 | diff --git a/plasmapy/physics/transport.py b/plasmapy/physics/transport.py
index 75073cbd..8af344d3 100644
--- a/plasmapy/physics/transport.py
+++ b/plasmapy/physics/transport.py
@@ -8,21 +8,22 @@
from .parameters import Debye_length
-@check_quantity({"n_e": {"units": units.m**-3},
- "T": {"units": units.K, "can_be_negative": False}
+@check_quantity({"T": {"units": units.K, "can_be_negative": False},
+ "n_e": {"units": units.m**-3}
})
-def Coulomb_logarithm(n_e, T, particles, V=None):
+def Coulomb_logarithm(T, n_e, particles, V=None):
r"""Estimates the Coulomb logarithm.
Parameters
----------
- n_e : Quantity
- The electron density in units convertible to per cubic meter.
T : Quantity
- Temperature in units of temperature or energy per particle,
- which is assumed to be equal for both the test particle and
- the target particle
+ Temperature in units of temperature or energy per particle,
+ which is assumed to be equal for both the test particle and
+ the target particle
+
+ n_e : Quantity
+ The electron density in units convertible to per cubic meter.
particles : tuple
A tuple containing string representations of the test particle
@@ -90,9 +91,9 @@ def Coulomb_logarithm(n_e, T, particles, V=None):
Examples
--------
>>> from astropy import units as u
- >>> Coulomb_logarithm(T=1e6*units.K, n_e=1e19*units.m**-3, ('e', 'p'))
+ >>> Coulomb_logarithm(T=1e6*u.K, n_e=1e19*u.m**-3, ('e', 'p'))
14.748259780491056
- >>> Coulomb_logarithm(1e6*units.K, 1e19*units.m**-3, ('e', 'p'),
+ >>> Coulomb_logarithm(1e6*u.K, 1e19*u.m**-3, ('e', 'p'),
V=1e6*u.m/u.s)
References
| PlasmaPy/PlasmaPy | 47925baeaac18c58c758b4ab50fabb44087cd800 | diff --git a/plasmapy/physics/tests/test_transport.py b/plasmapy/physics/tests/test_transport.py
index 3e5333a0..a1553e55 100644
--- a/plasmapy/physics/tests/test_transport.py
+++ b/plasmapy/physics/tests/test_transport.py
@@ -18,37 +18,37 @@ def test_Coulomb_logarithm():
particles = ('e', 'p')
for i in range(3):
- assert np.isclose(Coulomb_logarithm(n_e[i], T[i], particles),
+ assert np.isclose(Coulomb_logarithm(T[i], n_e[i], particles),
Lambda[i], atol=0.01)
- assert np.isclose(Coulomb_logarithm(5*u.m**-3, 1*u.eV, ('e', 'e')),
- Coulomb_logarithm(5*u.m**-3, 11604.5220*u.K, ('e', 'e')))
+ assert np.isclose(Coulomb_logarithm(1*u.eV, 5*u.m**-3, ('e', 'e')),
+ Coulomb_logarithm(11604.5220*u.K, 5*u.m**-3, ('e', 'e')))
- assert np.isclose(Coulomb_logarithm(1e9*u.cm**-3, 1e2*u.K, ('e', 'p')),
+ assert np.isclose(Coulomb_logarithm(1e2*u.K, 1e9*u.cm**-3, ('e', 'p')),
5.97, atol=0.01)
- assert np.isclose(Coulomb_logarithm(1e9*u.cm**-3, 1e7*u.K, ('e', 'p')),
+ assert np.isclose(Coulomb_logarithm(1e7*u.K, 1e9*u.cm**-3, ('e', 'p')),
21.6, atol=0.1)
- assert np.isclose(Coulomb_logarithm(1e24*u.cm**-3, 1e8*u.K, ('e', 'p')),
+ assert np.isclose(Coulomb_logarithm(1e8*u.K, 1e24*u.cm**-3, ('e', 'p')),
6.69, atol=0.01)
- assert np.allclose(Coulomb_logarithm(n_e, T, particles), Lambda, atol=0.01)
+ assert np.allclose(Coulomb_logarithm(T, n_e, particles), Lambda, atol=0.01)
- assert np.isclose(Coulomb_logarithm(5*u.m**-3, 1e5*u.K, ('e', 'e'),
+ assert np.isclose(Coulomb_logarithm(1e5*u.K, 5*u.m**-3, ('e', 'e'),
V=1e4*u.m/u.s), 21.379082011)
with pytest.raises(UserWarning):
- Coulomb_logarithm(1*u.m**-3, 1e5*u.K, ('e', 'p'), 299792458*u.m/u.s)
+ Coulomb_logarithm(1e5*u.K, 1*u.m**-3, ('e', 'p'), 299792458*u.m/u.s)
with pytest.raises(u.UnitConversionError):
- Coulomb_logarithm(1*u.m**-3, 1e5*u.g, ('e', 'p'), 29979245*u.m/u.s)
+ Coulomb_logarithm(1e5*u.g, 1*u.m**-3, ('e', 'p'), 29979245*u.m/u.s)
with pytest.raises(ValueError):
- Coulomb_logarithm(5*u.m**-3, 1*u.K, ('e'))
+ Coulomb_logarithm(1*u.K, 5*u.m**-3, ('e'))
with pytest.raises(ValueError):
- Coulomb_logarithm(5*u.m**-3, 1*u.K, ('e', 'g'))
+ Coulomb_logarithm(1*u.K, 5*u.m**-3, ('e', 'g'))
with pytest.raises(ValueError):
- Coulomb_logarithm(5*u.m**-3, 1*u.K, ('e', 'D'))
+ Coulomb_logarithm(1*u.K, 5*u.m**-3, ('e', 'D'))
| Check consistency of argument ordering in physics
Here are a few example signatures straight from `physics.transport`:
```
def Coulomb_logarithm(n_e, T, particles, V=None):
def Debye_length(T_e, n_e):
def Debye_number(T_e, n_e):
def upper_hybrid_frequency(B, n_e):
```
It would be nice to ensure that non-keyword arguments, where applicable, have the same ordering - like in other scientific packages, like Numpy, a consistent API is helpful for being able to call multiple functions without having to check the signature each time you call them.
Any consistent ordering would be welcome - but common sense takes precedence. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"plasmapy/physics/tests/test_transport.py::test_Coulomb_logarithm"
] | [] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2017-10-03T13:09:48Z" | bsd-3-clause |
|
PlasmaPy__PlasmaPy-2133 | diff --git a/CITATION.cff b/CITATION.cff
index 49523acb..0a89a7df 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -112,6 +112,10 @@ authors:
family-names: Carroll
alias: seanwilliamcarroll
+- given-names: Sarthak
+ family-names: Choudhary
+ alias: martha889
+
- given-names: Christian
family-names: Clauss
alias: cclauss
@@ -210,6 +214,11 @@ authors:
family-names: Hansen
alias: raymonshansen
+- given-names: Mohammed
+ family-names: Haque
+ affiliation: Hunter College
+ alias: mohawk811
+
- given-names: Julien
family-names: Hillairet
affiliation: CEA
@@ -490,6 +499,10 @@ authors:
family-names: Stinson
alias: 14tstinson
+- given-names: Michaela
+ family-names: Ε vancarovΓ‘
+ alias: mysakli
+
- given-names: Antoine
family-names: Tavant
affiliation: Centre Spatial de l'Γcole Polytechnique
@@ -521,6 +534,8 @@ authors:
orcid: https://orcid.org/0000-0002-6468-5710
alias: svincena
+- alias: WineDarkMoon
+
- given-names: Tingfeng
family-names: Wu
orcid: https://orcid.org/0000-0001-8745-204X
diff --git a/changelog/2133.bugfix.rst b/changelog/2133.bugfix.rst
new file mode 100644
index 00000000..05b20dce
--- /dev/null
+++ b/changelog/2133.bugfix.rst
@@ -0,0 +1,2 @@
+The ``plasmapy.analysis.nullpoint._vector_space`` function now returns a
+list for its delta values instead of an array.
diff --git a/plasmapy/analysis/nullpoint.py b/plasmapy/analysis/nullpoint.py
index 71bc69cc..a19e3b81 100644
--- a/plasmapy/analysis/nullpoint.py
+++ b/plasmapy/analysis/nullpoint.py
@@ -251,7 +251,7 @@ def _vector_space(
w = w_arr
else:
u, v, w = func(x, y, z)
- return np.array([x, y, z]), np.array([u, v, w]), np.array([dx, dy, dz])
+ return np.array([x, y, z]), np.array([u, v, w]), [dx, dy, dz]
def _trilinear_coeff_cal(vspace, cell):
| PlasmaPy/PlasmaPy | 6b5ba036221a5487719827b7211ee7b551eb3a83 | diff --git a/plasmapy/analysis/tests/test_nullpoint.py b/plasmapy/analysis/tests/test_nullpoint.py
index 2c87e3ad..6e854d66 100644
--- a/plasmapy/analysis/tests/test_nullpoint.py
+++ b/plasmapy/analysis/tests/test_nullpoint.py
@@ -289,7 +289,6 @@ def test_null_point_find3():
@pytest.mark.slow()
[email protected](np.__version__ >= "1.24.0", reason="See issue #2101.")
def test_null_point_find4():
r"""Test `~plasmapy.analysis.nullpoint.null_point_find`."""
# Two null points
@@ -369,7 +368,6 @@ def test_null_point_find7():
@pytest.mark.slow()
[email protected](np.__version__ >= "1.24.0", reason="See issue #2101.")
def test_null_point_find8():
r"""Test `~plasmapy.analysis.nullpoint.null_point_find`."""
# Non-linear field
@@ -426,9 +424,6 @@ class Test_classify_null_point:
"func": lambda x, y, z: [(y - 5.5) * (y + 5.5), (z - 5.5), (x - 5.5)],
},
"Spiral null",
- marks=pytest.mark.xfail(
- np.__version__ >= "1.24.0", reason="See issue #2101."
- ),
),
(
{
| Fix xfailing tests in `test_nullpoint.py` for NumPy 1.24+
We started getting the following failures when we started using NumPy 1.24 (though there's a small chance that it's from a different package). I'm not sure what caused the change, but for the time I'm marking these tests as xfailing in #2098. We'd probably need to go through these with a debugger step by step.
```
______________________________________ test_null_point_find4 _______________________________________
tests/test_nullpoint.py:302: in test_null_point_find4
npoints4 = uniform_null_point_find(**nullpoint4_args)
nullpoint.py:1526: in uniform_null_point_find
vspace = _vector_space(
nullpoint.py:254: in _vector_space
return np.array([x, y, z]), np.array([u, v, w]), np.array([dx, dy, dz])
E ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (3,) + inhomogeneous part.
______________________________________ test_null_point_find8 _______________________________________
tests/test_nullpoint.py:381: in test_null_point_find8
npoints8 = uniform_null_point_find(**nullpoint8_args)
nullpoint.py:1526: in uniform_null_point_find
vspace = _vector_space(
nullpoint.py:254: in _vector_space
return np.array([x, y, z]), np.array([u, v, w]), np.array([dx, dy, dz])
E ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (3,) + inhomogeneous part.
___________ Test_classify_null_point.test_classify_null_point_vals[kwargs2-Spiral null] ____________
tests/test_nullpoint.py:457: in test_classify_null_point_vals
assert uniform_null_point_find(**kwargs)[0].classification == expected
nullpoint.py:1526: in uniform_null_point_find
vspace = _vector_space(
nullpoint.py:254: in _vector_space
return np.array([x, y, z]), np.array([u, v, w]), np.array([dx, dy, dz])
E ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (3,) + inhomogeneous part.
===================================== short test summary info ======================================
FAILED tests/test_nullpoint.py::test_null_point_find4 - ValueError: setting an array element with a sequence. The requested array has an inhomogeneous ...
FAILED tests/test_nullpoint.py::test_null_point_find8 - ValueError: setting an array element with a sequence. The requested array has an inhomogeneous ...
FAILED tests/test_nullpoint.py::Test_classify_null_point::test_classify_null_point_vals[kwargs2-Spiral null] - ValueError: setting an array element with a sequence. The requested array has an inhomogeneous ...
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find4",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find8",
"plasmapy/analysis/tests/test_nullpoint.py::Test_classify_null_point::test_classify_null_point_vals[kwargs2-Spiral"
] | [
"plasmapy/analysis/tests/test_nullpoint.py::test_trilinear_coeff_cal",
"plasmapy/analysis/tests/test_nullpoint.py::test_trilinear_jacobian",
"plasmapy/analysis/tests/test_nullpoint.py::test_trilinear_approx",
"plasmapy/analysis/tests/test_nullpoint.py::Test_reduction::test_reduction_vals[kwargs0-True]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_reduction::test_reduction_vals[kwargs1-True]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_reduction::test_reduction_vals[kwargs2-False]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_reduction::test_reduction_vals[kwargs3-False]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_reduction::test_reduction_vals[kwargs4-True]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_reduction::test_reduction_vals[kwargs5-True]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_reduction::test_reduction_vals[kwargs6-True]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_trilinear_analysis::test_trilinear_analysis_vals[kwargs0-True]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_trilinear_analysis::test_trilinear_analysis_vals[kwargs1-True]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_trilinear_analysis::test_trilinear_analysis_vals[kwargs2-False]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_trilinear_analysis::test_trilinear_analysis_vals[kwargs3-False]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_trilinear_analysis::test_trilinear_analysis_vals[kwargs4-False]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_bilinear_root::test_bilinear_root_vals[kwargs0-expected0]",
"plasmapy/analysis/tests/test_nullpoint.py::Test_locate_null_point::test_locate_null_point_vals[kwargs0-expected0]",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find1",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find2",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find3",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find5",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find6",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find7",
"plasmapy/analysis/tests/test_nullpoint.py::Test_classify_null_point::test_classify_null_point_vals[kwargs0-Improper",
"plasmapy/analysis/tests/test_nullpoint.py::Test_classify_null_point::test_classify_null_point_vals[kwargs1-Proper",
"plasmapy/analysis/tests/test_nullpoint.py::Test_classify_null_point::test_classify_null_point_vals[kwargs3-Critical",
"plasmapy/analysis/tests/test_nullpoint.py::Test_classify_null_point::test_classify_null_point_vals[kwargs4-Skewed",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find9",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find10",
"plasmapy/analysis/tests/test_nullpoint.py::test_null_point_find11"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-05-12T10:39:20Z" | bsd-3-clause |
|
PlasmaPy__PlasmaPy-228 | diff --git a/plasmapy/mathematics/mathematics.py b/plasmapy/mathematics/mathematics.py
index b20ed5e2..58c10e5f 100644
--- a/plasmapy/mathematics/mathematics.py
+++ b/plasmapy/mathematics/mathematics.py
@@ -3,6 +3,7 @@
import numpy as np
from scipy import special
from astropy import units as u
+from scipy.special import wofz as Faddeeva_function
def plasma_dispersion_func(zeta):
@@ -57,9 +58,9 @@ def plasma_dispersion_func(zeta):
>>> plasma_dispersion_func(0)
1.7724538509055159j
>>> plasma_dispersion_func(1j)
- 0.7578721561413119j
+ 0.757872156141312j
>>> plasma_dispersion_func(-1.52+0.47j)
- (0.6088888957234255+0.3349458388287403j)
+ (0.6088888957234254+0.33494583882874024j)
"""
@@ -79,7 +80,7 @@ def plasma_dispersion_func(zeta):
raise ValueError("The argument to plasma_dispersion_function is "
"not finite.")
- Z = 1j * np.sqrt(np.pi) * np.exp(-zeta**2) * (1.0 + special.erf(1j * zeta))
+ Z = 1j * np.sqrt(np.pi) * Faddeeva_function(zeta)
return Z
@@ -124,9 +125,9 @@ def plasma_dispersion_func_deriv(zeta):
>>> plasma_dispersion_func_deriv(0)
(-2+0j)
>>> plasma_dispersion_func_deriv(1j)
- (-0.48425568771737626+0j)
+ (-0.48425568771737604+0j)
>>> plasma_dispersion_func_deriv(-1.52+0.47j)
- (0.1658713314982294+0.4458797880593507j)
+ (0.16587133149822897+0.44587978805935047j)
"""
| PlasmaPy/PlasmaPy | 82eece6d5648641af1878f6846240dbf2a37a190 | diff --git a/plasmapy/mathematics/tests/test_dispersion.py b/plasmapy/mathematics/tests/test_dispersion.py
index 9dabb8e6..1c7eb3ca 100644
--- a/plasmapy/mathematics/tests/test_dispersion.py
+++ b/plasmapy/mathematics/tests/test_dispersion.py
@@ -15,7 +15,7 @@
(0, 1j * np.sqrt(Ο)),
(1, -1.076_159_013_825_536_8 + 0.652_049_332_173_292_2j),
(1j, 0.757_872_156_141_311_87j),
- (1.2 + 4.4j, -0.054_246_146_372_377_471 + 0.207_960_589_336_958_13j),
+ (1.2 + 4.4j, -0.054_246_157_069_223_27+0.207_960_584_359_855_62j),
(9.2j, plasma_dispersion_func(9.2j * units.dimensionless_unscaled)),
(5.4 - 3.1j, -0.139_224_873_051_713_11 - 0.082_067_822_640_155_802j),
(9.9 - 10j, 2.013_835_257_947_027_6 - 25.901_274_737_989_727j),
| Use Dawson function for dispersion
`plasma_dispersion_func` under `mathematics.py` currently uses `erf()` along with some other terms. This can be simplified to Dawson function, [dawsn](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.dawsn.html), and may even offer some minor speedups if scipy implements it in C code. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(1.2+4.4j)-(-0.05424615706922327+0.20796058435985562j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[9.2j-0.10806460304119532j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[9j-(-0.012123822585585753+0j)]"
] | [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[0-1.7724538509055159j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[1-(-1.0761590138255368+0.6520493321732922j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[1j-0.7578721561413119j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(5.4-3.1j)-(-0.1392248730517131-0.0820678226401558j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(9.9-10j)-(2.0138352579470276-25.901274737989727j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(4.5-10j)-(-1.3674950463400947e+35-6.853923234842271e+34j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_power_series_expansion",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_roots",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[0--2]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[1-(0.152318-1.3041j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[1j--0.484257]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(1.2+4.4j)-(-0.0397561-0.0217392j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(5.4-3.1j)-(0.0124491+0.0231383j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(9.9-10j)-(476.153+553.121j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(5+7j)-(-0.0045912-0.0126104j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(4.5-10j)-(2.60153e+36-2.11814e+36j)]"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2018-01-07T15:28:48Z" | bsd-3-clause |
|
PlasmaPy__PlasmaPy-743 | diff --git a/changelog/743.feature.rst b/changelog/743.feature.rst
new file mode 100644
index 00000000..52ebf1bc
--- /dev/null
+++ b/changelog/743.feature.rst
@@ -0,0 +1,1 @@
+Added units to reprs of .formulary.magnetostatics classes.
\ No newline at end of file
diff --git a/docs/about/credits.rst b/docs/about/credits.rst
index 22d12556..d008e528 100644
--- a/docs/about/credits.rst
+++ b/docs/about/credits.rst
@@ -78,6 +78,7 @@ in parentheses are `ORCID author identifiers <https://orcid.org>`__.
* `Thomas Varnish <https://github.com/tvarnish>`__
* `Aditya Magarde <https://github.com/adityamagarde>`__
* `Diego A. Diaz Riega <https://github.com/diego7319>`__
+* `Jakub Polak <https://github.com/Ishinomori>`__
This list contains contributors to PlasmaPy's core package and vision
statement, including a few people who do not show up as `PlasmaPy
diff --git a/plasmapy/formulary/magnetostatics.py b/plasmapy/formulary/magnetostatics.py
index 29eb09ae..2a2de9f0 100644
--- a/plasmapy/formulary/magnetostatics.py
+++ b/plasmapy/formulary/magnetostatics.py
@@ -46,16 +46,21 @@ class MagneticDipole(MagnetoStatics):
Position of the dipole
"""
+
@validate_quantities
def __init__(self, moment: u.A * u.m**2, p0: u.m):
self.moment = moment.value
+ self._moment_u = moment.unit
self.p0 = p0.value
+ self._p0_u = p0.unit
def __repr__(self):
- return "{name}(moment={moment}, p0={p0})".format(
+ return "{name}(moment={moment}{moment_u}, p0={p0}{p0_u})".format(
name=self.__class__.__name__,
moment=self.moment,
- p0=self.p0
+ p0=self.p0,
+ moment_u=self._moment_u,
+ p0_u=self._p0_u,
)
def magnetic_field(self, p: u.m) -> u.T:
@@ -101,11 +106,9 @@ class GeneralWire(Wire):
electric current
"""
+
@validate_quantities
- def __init__(self, parametric_eq,
- t1,
- t2,
- current: u.A):
+ def __init__(self, parametric_eq, t1, t2, current: u.A):
if callable(parametric_eq):
self.parametric_eq = parametric_eq
else:
@@ -116,6 +119,20 @@ def __init__(self, parametric_eq,
else:
raise ValueError(f"t1={t1} is not smaller than t2={t2}")
self.current = current.value
+ self._current_u = current.unit
+
+ def __repr__(self):
+ return (
+ "{name}(parametric_eq={parametric_eq}, t1={t1}, t2={t2}, "
+ "current={current}{current_u})".format(
+ name=self.__class__.__name__,
+ parametric_eq=self.parametric_eq.__name__,
+ t1=self.t1,
+ t2=self.t2,
+ current=self.current,
+ current_u=self._current_u,
+ )
+ )
def magnetic_field(self, p: u.m, n: numbers.Integral = 1000) -> u.T:
r"""
@@ -181,20 +198,27 @@ class FiniteStraightWire(Wire):
electric current
"""
+
@validate_quantities
def __init__(self, p1: u.m, p2: u.m, current: u.A):
self.p1 = p1.value
self.p2 = p2.value
+ self._p1_u = p1.unit
+ self._p2_u = p2.unit
if np.all(p1 == p2):
raise ValueError("p1, p2 should not be the same point.")
self.current = current.value
+ self._current_u = current.unit
def __repr__(self):
- return "{name}(p1={p1}, p2={p2}, current={current})".format(
+ return "{name}(p1={p1}{p1_u}, p2={p2}{p2_u}, current={current}{current_u})".format(
name=self.__class__.__name__,
p1=self.p1,
p2=self.p2,
- current=self.current
+ current=self.current,
+ p1_u=self._p1_u,
+ p2_u=self._p2_u,
+ current_u=self._current_u,
)
def magnetic_field(self, p) -> u.T:
@@ -261,18 +285,23 @@ class InfiniteStraightWire(Wire):
electric current
"""
+
@validate_quantities
def __init__(self, direction, p0: u.m, current: u.A):
- self.direction = direction/np.linalg.norm(direction)
+ self.direction = direction / np.linalg.norm(direction)
self.p0 = p0.value
+ self._p0_u = p0.unit
self.current = current.value
+ self._current_u = current.unit
def __repr__(self):
- return "{name}(direction={direction}, p0={p0}, current={current})".format(
+ return "{name}(direction={direction}, p0={p0}{p0_u}, current={current}{current_u})".format(
name=self.__class__.__name__,
direction=self.direction,
p0=self.p0,
- current=self.current
+ current=self.current,
+ p0_u=self._p0_u,
+ current_u=self._current_u,
)
def magnetic_field(self, p) -> u.T:
@@ -320,16 +349,34 @@ class CircularWire(Wire):
electric current
"""
+
+ def __repr__(self):
+ return (
+ "{name}(normal={normal}, center={center}{center_u}, "
+ "radius={radius}{radius_u}, current={current}{current_u})".format(
+ name=self.__class__.__name__,
+ normal=self.normal,
+ center=self.center,
+ radius=self.radius,
+ current=self.current,
+ center_u=self._center_u,
+ radius_u=self._radius_u,
+ current_u=self._current_u,
+ )
+ )
+
@validate_quantities
- def __init__(self, normal, center: u.m, radius: u.m,
- current: u.A, n=300):
- self.normal = normal/np.linalg.norm(normal)
+ def __init__(self, normal, center: u.m, radius: u.m, current: u.A, n=300):
+ self.normal = normal / np.linalg.norm(normal)
self.center = center.value
+ self._center_u = center.unit
if radius > 0:
self.radius = radius.value
+ self._radius_u = radius.unit
else:
raise ValueError("Radius should bu larger than 0")
self.current = current.value
+ self._current_u = current.unit
# parametric equation
# find other two axises in the disc plane
@@ -362,16 +409,6 @@ def curve(t):
self.roots_legendre = scipy.special.roots_legendre(n)
self.n = n
- def __repr__(self):
- return "{name}(normal={normal}, center={center}, \
-radius={radius}, current={current})".format(
- name=self.__class__.__name__,
- normal=self.normal,
- center=self.center,
- radius=self.radius,
- current=self.current
- )
-
def magnetic_field(self, p) -> u.T:
r"""
Calculate magnetic field generated by this wire at position `p`
| PlasmaPy/PlasmaPy | dfeea4ef1c4816433afee82c4da423cb91861cc9 | diff --git a/plasmapy/formulary/tests/test_magnetostatics.py b/plasmapy/formulary/tests/test_magnetostatics.py
index 5531cdcb..5f71331e 100644
--- a/plasmapy/formulary/tests/test_magnetostatics.py
+++ b/plasmapy/formulary/tests/test_magnetostatics.py
@@ -32,6 +32,11 @@ def test_value2(self):
assert np.all(np.isclose(B2.value, B2_expected.value))
assert B2.unit == u.T
+ def test_repr(self):
+ "Test __repr__ function"
+ B1 = MagneticDipole(self.moment, self.p0)
+ assert repr(B1) == r"MagneticDipole(moment=[0. 0. 1.]A m2, p0=[0. 0. 0.]m)"
+
class Test_GeneralWire:
def setup_method(self):
@@ -55,6 +60,16 @@ def test_close_cw(self):
assert np.all(np.isclose(B_cw.value, B_gw_cw.value))
assert B_cw.unit == B_gw_cw.unit
+ def test_repr(self):
+ "Test __repr__ function"
+ gw_cw = self.cw.to_GeneralWire()
+ # round numbers to avoid calculation accuracy mismatch
+ gw_cw.t1 = -3.1516
+ gw_cw.t2 = +3.1516
+ assert repr(
+ gw_cw
+ ) == r"GeneralWire(parametric_eq=curve, t1=-3.1516, t2=3.1516, current=1.0A)"
+
def test_close_fw(self):
"Test if the GeneralWire is close to the FiniteWire it converted from"
gw_fw = self.fw.to_GeneralWire()
@@ -93,7 +108,7 @@ def test_value1(self):
def test_repr(self):
"Test __repr__ function"
fw = FiniteStraightWire(self.p1, self.p2, self.current)
- assert repr(fw) == r"FiniteStraightWire(p1=[ 0. 0. -1.], p2=[0. 0. 1.], current=1.0)"
+ assert repr(fw) == r"FiniteStraightWire(p1=[ 0. 0. -1.]m, p2=[0. 0. 1.]m, current=1.0A)"
class Test_InfiniteStraightWire:
@@ -114,7 +129,7 @@ def test_repr(self):
"Test __repr__ function"
iw = InfiniteStraightWire(self.direction, self.p0, self.current)
assert repr(iw) == \
- r"InfiniteStraightWire(direction=[0. 1. 0.], p0=[0. 0. 0.], current=1.0)"
+ r"InfiniteStraightWire(direction=[0. 1. 0.], p0=[0. 0. 0.]m, current=1.0A)"
class Test_CircularWire:
@@ -150,4 +165,4 @@ def test_repr(self):
"Test __repr__ function"
cw = CircularWire(self.normalz, self.center, self.radius, self.current)
assert repr(cw) == \
- r"CircularWire(normal=[0. 0. 1.], center=[0. 0. 0.], radius=1.0, current=1.0)"
+ r"CircularWire(normal=[0. 0. 1.], center=[0. 0. 0.]m, radius=1.0m, current=1.0A)"
| Add units to Magnetostatics reprs
Objects from the Magnetostatics module currently show up in reprs without units, because they're being cast to floats on class construction. It would be nice to have them save the input units, or at the very least, to display the units they're saving their inputs to, in their reprs and strs. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"plasmapy/formulary/tests/test_magnetostatics.py::Test_GeneralWire::test_repr",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_FiniteStraightWire::test_repr",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_InfiniteStraightWire::test_repr",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_CircularWire::test_repr"
] | [
"plasmapy/formulary/tests/test_magnetostatics.py::Test_MagneticDipole::test_value1",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_MagneticDipole::test_value2",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_GeneralWire::test_not_callable",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_GeneralWire::test_close_cw",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_GeneralWire::test_close_fw",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_GeneralWire::test_value_error",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_FiniteStraightWire::test_same_point",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_FiniteStraightWire::test_value1",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_InfiniteStraightWire::test_value1",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_CircularWire::test_negative_radius",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_CircularWire::test_value1",
"plasmapy/formulary/tests/test_magnetostatics.py::Test_CircularWire::test_value2"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-01-29T20:05:04Z" | bsd-3-clause |
|
PostHog__posthog-python-30 | diff --git a/posthog/client.py b/posthog/client.py
index 0588082..22157cf 100644
--- a/posthog/client.py
+++ b/posthog/client.py
@@ -330,8 +330,8 @@ class Client(object):
except IndexError:
return default
- if feature_flag.get("is_simple_flag") and feature_flag.get("rollout_percentage"):
- response = _hash(key, distinct_id) <= (feature_flag["rollout_percentage"] / 100)
+ if feature_flag.get("is_simple_flag"):
+ response = _hash(key, distinct_id) <= ((feature_flag.get("rollout_percentage", 100) or 100) / 100)
else:
try:
request_data = {
| PostHog/posthog-python | 372fb74637bbcdc0c1de70f9f11b2b5d754553f0 | diff --git a/posthog/test/test_client.py b/posthog/test/test_client.py
index e4971db..9c867a0 100644
--- a/posthog/test/test_client.py
+++ b/posthog/test/test_client.py
@@ -325,14 +325,28 @@ class TestClient(unittest.TestCase):
self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
@mock.patch("posthog.client.decide")
- def test_feature_enabled_request(self, patch_get):
- patch_get.return_value = {"featureFlags": ["beta-feature"]}
+ def test_feature_enabled_request(self, patch_decide):
+ patch_decide.return_value = {"featureFlags": ["beta-feature"]}
client = Client(TEST_API_KEY)
client.feature_flags = [
{"id": 1, "name": "Beta Feature", "key": "beta-feature", "is_simple_flag": False, "rollout_percentage": 100}
]
self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+ @mock.patch("posthog.client.get")
+ def test_feature_enabled_simple_without_rollout_percentage(self, patch_get):
+ client = Client(TEST_API_KEY)
+ client.feature_flags = [{"id": 1, "name": "Beta Feature", "key": "beta-feature", "is_simple_flag": True}]
+ self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+
+ @mock.patch("posthog.client.get")
+ def test_feature_enabled_simple_with_none_rollout_percentage(self, patch_get):
+ client = Client(TEST_API_KEY)
+ client.feature_flags = [
+ {"id": 1, "name": "Beta Feature", "key": "beta-feature", "is_simple_flag": True, "rollout_percantage": None}
+ ]
+ self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+
@mock.patch("posthog.client.Poller")
@mock.patch("posthog.client.get")
def test_feature_enabled_doesnt_exist(self, patch_get, patch_poll):
| Library does not properly handle feature flags with no % rollout
See sentry issue https://sentry.io/organizations/posthog/issues/2220836956/?project=1899813. If a feature flag is enabled for everyone without a % rollout, the client crashes. Notice that this is different from a 100% rollout which works fine.
```
unsupported operand type(s) for /: 'NoneType' and 'int'
```
posthoganalytics/client.py#L276
```python
response = _hash(key, distinct_id) <= (feature_flag['rollout_percentage'] / 100)
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple_with_none_rollout_percentage",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple_without_rollout_percentage"
] | [
"posthog/test/test_client.py::TestClient::test_advanced_capture",
"posthog/test/test_client.py::TestClient::test_advanced_identify",
"posthog/test/test_client.py::TestClient::test_advanced_page",
"posthog/test/test_client.py::TestClient::test_advanced_set",
"posthog/test/test_client.py::TestClient::test_advanced_set_once",
"posthog/test/test_client.py::TestClient::test_basic_alias",
"posthog/test/test_client.py::TestClient::test_basic_capture",
"posthog/test/test_client.py::TestClient::test_basic_identify",
"posthog/test/test_client.py::TestClient::test_basic_page",
"posthog/test/test_client.py::TestClient::test_basic_page_distinct_uuid",
"posthog/test/test_client.py::TestClient::test_basic_set",
"posthog/test/test_client.py::TestClient::test_basic_set_once",
"posthog/test/test_client.py::TestClient::test_call_identify_fails",
"posthog/test/test_client.py::TestClient::test_debug",
"posthog/test/test_client.py::TestClient::test_default_timeout_15",
"posthog/test/test_client.py::TestClient::test_empty_flush",
"posthog/test/test_client.py::TestClient::test_feature_enabled_doesnt_exist",
"posthog/test/test_client.py::TestClient::test_feature_enabled_request",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple",
"posthog/test/test_client.py::TestClient::test_flush",
"posthog/test/test_client.py::TestClient::test_gzip",
"posthog/test/test_client.py::TestClient::test_load_feature_flags",
"posthog/test/test_client.py::TestClient::test_load_feature_flags_error",
"posthog/test/test_client.py::TestClient::test_load_feature_flags_wrong_key",
"posthog/test/test_client.py::TestClient::test_numeric_distinct_id",
"posthog/test/test_client.py::TestClient::test_overflow",
"posthog/test/test_client.py::TestClient::test_personal_api_key_doesnt_exist",
"posthog/test/test_client.py::TestClient::test_requires_api_key",
"posthog/test/test_client.py::TestClient::test_shutdown",
"posthog/test/test_client.py::TestClient::test_stringifies_distinct_id",
"posthog/test/test_client.py::TestClient::test_synchronous",
"posthog/test/test_client.py::TestClient::test_unicode",
"posthog/test/test_client.py::TestClient::test_user_defined_flush_at",
"posthog/test/test_client.py::TestClient::test_user_defined_timeout"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-05-14T11:50:01Z" | mit |
|
PostHog__posthog-python-32 | diff --git a/example.py b/example.py
index 7121430..08d64e6 100644
--- a/example.py
+++ b/example.py
@@ -6,7 +6,7 @@ import time
import posthog
# You can find this key on the /setup page in PostHog
-posthog.api_key = ""
+posthog.project_api_key = ""
posthog.personal_api_key = ""
# Where you host PostHog, with no trailing /.
diff --git a/posthog/__init__.py b/posthog/__init__.py
index 0894139..be488ce 100644
--- a/posthog/__init__.py
+++ b/posthog/__init__.py
@@ -14,13 +14,14 @@ send = True # type: bool
sync_mode = False # type: bool
disabled = False # type: bool
personal_api_key = None # type: str
+project_api_key = None # type: str
default_client = None
def capture(
- distinct_id, # type: str,
- event, # type: str,
+ distinct_id, # type: str
+ event, # type: str
properties=None, # type: Optional[Dict]
context=None, # type: Optional[Dict]
timestamp=None, # type: Optional[datetime.datetime]
@@ -252,6 +253,7 @@ def _proxy(method, *args, **kwargs):
send=send,
sync_mode=sync_mode,
personal_api_key=personal_api_key,
+ project_api_key=project_api_key,
)
fn = getattr(default_client, method)
diff --git a/posthog/client.py b/posthog/client.py
index 4a58631..a416b7d 100644
--- a/posthog/client.py
+++ b/posthog/client.py
@@ -52,7 +52,7 @@ class Client(object):
self.queue = queue.Queue(max_queue_size)
# api_key: This should be the Team API Key (token), public
- self.api_key = api_key or project_api_key
+ self.api_key = project_api_key or api_key
require("api_key", self.api_key, string_types)
@@ -88,7 +88,7 @@ class Client(object):
self.consumers = []
consumer = Consumer(
self.queue,
- api_key,
+ self.api_key,
host=host,
on_error=on_error,
flush_at=flush_at,
diff --git a/posthog/consumer.py b/posthog/consumer.py
index e5e4acf..5e403e8 100644
--- a/posthog/consumer.py
+++ b/posthog/consumer.py
@@ -124,6 +124,8 @@ class Consumer(Thread):
# retry on server errors and client errors
# with 429 status code (rate limited),
# don't retry on other client errors
+ if exc.status == "N/A":
+ return False
return (400 <= exc.status < 500) and exc.status != 429
else:
# retry on all other errors (eg. network)
| PostHog/posthog-python | fe6d0dc1ecc2df09ee2fbd3f79e8843c03622b70 | diff --git a/posthog/test/test_client.py b/posthog/test/test_client.py
index 9c867a0..3c9ea4a 100644
--- a/posthog/test/test_client.py
+++ b/posthog/test/test_client.py
@@ -43,6 +43,22 @@ class TestClient(unittest.TestCase):
self.assertEqual(msg["properties"]["$lib"], "posthog-python")
self.assertEqual(msg["properties"]["$lib_version"], VERSION)
+ def test_basic_capture_with_project_api_key(self):
+
+ client = Client(project_api_key=TEST_API_KEY, on_error=self.set_fail)
+
+ success, msg = client.capture("distinct_id", "python test event")
+ client.flush()
+ self.assertTrue(success)
+ self.assertFalse(self.failed)
+
+ self.assertEqual(msg["event"], "python test event")
+ self.assertTrue(isinstance(msg["timestamp"], str))
+ self.assertTrue(isinstance(msg["messageId"], str))
+ self.assertEqual(msg["distinct_id"], "distinct_id")
+ self.assertEqual(msg["properties"]["$lib"], "posthog-python")
+ self.assertEqual(msg["properties"]["$lib_version"], VERSION)
+
def test_stringifies_distinct_id(self):
# A large number that loses precision in node:
# node -e "console.log(157963456373623802 + 1)" > 157963456373623800
@@ -324,6 +340,14 @@ class TestClient(unittest.TestCase):
]
self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+ @mock.patch("posthog.client.get")
+ def test_feature_enabled_simple_with_project_api_key(self, patch_get):
+ client = Client(project_api_key=TEST_API_KEY, on_error=self.set_fail)
+ client.feature_flags = [
+ {"id": 1, "name": "Beta Feature", "key": "beta-feature", "is_simple_flag": True, "rollout_percentage": 100}
+ ]
+ self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+
@mock.patch("posthog.client.decide")
def test_feature_enabled_request(self, patch_decide):
patch_decide.return_value = {"featureFlags": ["beta-feature"]}
| posthog.project_api_key doesn't work
According to the docs (https://posthog.com/docs/libraries/python#installation) `posthog.api_key` has been deprecated in favour of `posthog.project_api_key`.
However, when I use `project_api_key` with the current version, 1.3.1, it does not work, while `api_key` does. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"posthog/test/test_client.py::TestClient::test_basic_capture_with_project_api_key"
] | [
"posthog/test/test_client.py::TestClient::test_advanced_capture",
"posthog/test/test_client.py::TestClient::test_advanced_identify",
"posthog/test/test_client.py::TestClient::test_advanced_page",
"posthog/test/test_client.py::TestClient::test_advanced_set",
"posthog/test/test_client.py::TestClient::test_advanced_set_once",
"posthog/test/test_client.py::TestClient::test_basic_alias",
"posthog/test/test_client.py::TestClient::test_basic_capture",
"posthog/test/test_client.py::TestClient::test_basic_identify",
"posthog/test/test_client.py::TestClient::test_basic_page",
"posthog/test/test_client.py::TestClient::test_basic_page_distinct_uuid",
"posthog/test/test_client.py::TestClient::test_basic_set",
"posthog/test/test_client.py::TestClient::test_basic_set_once",
"posthog/test/test_client.py::TestClient::test_call_identify_fails",
"posthog/test/test_client.py::TestClient::test_debug",
"posthog/test/test_client.py::TestClient::test_default_timeout_15",
"posthog/test/test_client.py::TestClient::test_empty_flush",
"posthog/test/test_client.py::TestClient::test_feature_enabled_doesnt_exist",
"posthog/test/test_client.py::TestClient::test_feature_enabled_request",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple_with_none_rollout_percentage",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple_with_project_api_key",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple_without_rollout_percentage",
"posthog/test/test_client.py::TestClient::test_flush",
"posthog/test/test_client.py::TestClient::test_gzip",
"posthog/test/test_client.py::TestClient::test_load_feature_flags",
"posthog/test/test_client.py::TestClient::test_load_feature_flags_error",
"posthog/test/test_client.py::TestClient::test_load_feature_flags_wrong_key",
"posthog/test/test_client.py::TestClient::test_numeric_distinct_id",
"posthog/test/test_client.py::TestClient::test_overflow",
"posthog/test/test_client.py::TestClient::test_personal_api_key_doesnt_exist",
"posthog/test/test_client.py::TestClient::test_requires_api_key",
"posthog/test/test_client.py::TestClient::test_shutdown",
"posthog/test/test_client.py::TestClient::test_stringifies_distinct_id",
"posthog/test/test_client.py::TestClient::test_synchronous",
"posthog/test/test_client.py::TestClient::test_unicode",
"posthog/test/test_client.py::TestClient::test_user_defined_flush_at",
"posthog/test/test_client.py::TestClient::test_user_defined_timeout"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-05-18T13:09:54Z" | mit |
|
PrefectHQ__prefect-3518 | diff --git a/changes/issue3517.yaml b/changes/issue3517.yaml
new file mode 100644
index 0000000000..f0e161d288
--- /dev/null
+++ b/changes/issue3517.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix `get flow-runs` and `describe flow-runs` CLI commands querying of removed `duration` field - [#3517](https://github.com/PrefectHQ/prefect/issues/3517)"
diff --git a/changes/pr3511.yaml b/changes/pr3511.yaml
new file mode 100644
index 0000000000..9389fc944c
--- /dev/null
+++ b/changes/pr3511.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix multiprocessing scheduler failure while running tasks with timeouts - [#3511](https://github.com/PrefectHQ/prefect/pull/3511)"
diff --git a/docs/core/task_library/overview.md b/docs/core/task_library/overview.md
index f1c02ea2a5..1d84f2b59b 100644
--- a/docs/core/task_library/overview.md
+++ b/docs/core/task_library/overview.md
@@ -19,7 +19,7 @@ for the `prefect.tasks` module.
| <img src="/logos/dbt.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>DBT</p>](https://docs.prefect.io/api/latest/tasks/dbt.html) | <img src="/logos/docker.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Docker</p>](https://docs.prefect.io/api/latest/tasks/docker.html) | <img src="/logos/dropbox.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Dropbox</p>](https://docs.prefect.io/api/latest/tasks/dropbox.html) | <img src="/logos/email.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Email</p>](https://docs.prefect.io/api/latest/tasks/email.html) | <img src="/logos/github.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>GitHub</p>](https://docs.prefect.io/api/latest/tasks/github.html) |
| <img src="/logos/google_cloud.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Google Cloud</p>](https://docs.prefect.io/api/latest/tasks/gcp.html) | <img src="/logos/sheets.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Google Sheets</p>](https://docs.prefect.io/api/latest/tasks/google_sheets.html) | <img src="/logos/ge.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Great Expectations</p>](https://docs.prefect.io/api/latest/tasks/great_expectations.html) | <img src="/logos/jira.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Jira</p>](https://docs.prefect.io/api/latest/tasks/jira.html) | <img src="/logos/kubernetes.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Kubernetes</p>](https://docs.prefect.io/api/latest/tasks/kubernetes.html) |
| <img src="/logos/mysql.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>MySQL</p>](https://docs.prefect.io/api/latest/tasks/mysql.html) | <img src="/logos/monday.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Monday</p>](https://docs.prefect.io/api/latest/tasks/monday.html) | <img src="/logos/postgres.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>PostgreSQL</p>](https://docs.prefect.io/api/latest/tasks/postgres.html) | <img src="/logos/python.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Python</p>](https://docs.prefect.io/api/latest/tasks/function.html) | <img src="/logos/pushbullet.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Pushbullet</p>](https://docs.prefect.io/api/latest/tasks/pushbullet.html) |
-| <img src="/logos/redis.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Redis</p>](https://docs.prefect.io/api/latest/tasks/redis.html) | <img src="/logos/rlogo.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>RSS</p>](https://docs.prefect.io/api/latest/tasks/rss.html) | <img src="/logos/shell.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Shell</p>](https://docs.prefect.io/api/latest/tasks/shell.html) | <img src="/logos/slack.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Slack</p>](https://docs.prefect.io/api/latest/tasks/slack.html)| <img src="/logos/snowflake.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Snowflake</p>](https://docs.prefect.io/api/latest/tasks/snowflake.html) |
+| <img src="/logos/redis.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Redis</p>](https://docs.prefect.io/api/latest/tasks/redis.html) | <img src="/logos/rlogo.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>RSS</p>](https://docs.prefect.io/api/latest/tasks/rss.html) | <img src="/logos/shell.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Shell</p>](https://docs.prefect.io/api/latest/tasks/shell.html) | <img src="/logos/slack.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Slack</p>](https://docs.prefect.io/api/latest/tasks/notifications.html#slacktask)| <img src="/logos/snowflake.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Snowflake</p>](https://docs.prefect.io/api/latest/tasks/snowflake.html) |
| <img src="/logos/spacy.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>SpaCy</p>](https://docs.prefect.io/api/latest/tasks/spacy.html) | <img src="/logos/sqlite.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>SQLite</p>](https://docs.prefect.io/api/latest/tasks/sqlite.html) | <img src="/logos/tlogo.png" height=128 width=128 style="max-height: 128px; max-width: 128px;"> [<p>Twitter</p>](https://docs.prefect.io/api/latest/tasks/twitter.html) |
## Task library in action
diff --git a/src/prefect/cli/describe.py b/src/prefect/cli/describe.py
index 7fc7cdd3c1..32150765a2 100644
--- a/src/prefect/cli/describe.py
+++ b/src/prefect/cli/describe.py
@@ -239,7 +239,6 @@ def flow_runs(name, flow_name, output):
"scheduled_start_time": True,
"start_time": True,
"end_time": True,
- "duration": True,
"serialized_state": True,
}
}
diff --git a/src/prefect/cli/get.py b/src/prefect/cli/get.py
index f908f10cb5..f81ef7919b 100644
--- a/src/prefect/cli/get.py
+++ b/src/prefect/cli/get.py
@@ -247,7 +247,6 @@ def flow_runs(limit, flow, project, started):
"created": True,
"state": True,
"name": True,
- "duration": True,
"start_time": True,
}
}
@@ -271,7 +270,6 @@ def flow_runs(limit, flow, project, started):
item.state,
pendulum.parse(item.created).diff_for_humans(),
start_time,
- item.duration,
item.id,
]
)
@@ -285,7 +283,6 @@ def flow_runs(limit, flow, project, started):
"STATE",
"AGE",
"START TIME",
- "DURATION",
"ID",
],
tablefmt="plain",
diff --git a/src/prefect/utilities/executors.py b/src/prefect/utilities/executors.py
index bc2a189b53..2c0027b75c 100644
--- a/src/prefect/utilities/executors.py
+++ b/src/prefect/utilities/executors.py
@@ -10,7 +10,9 @@ import warnings
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import TimeoutError as FutureTimeout
from functools import wraps
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Union
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Union, Sequence
+
+import cloudpickle
import prefect
@@ -108,20 +110,68 @@ def main_thread_timeout(
signal.alarm(0)
+def multiprocessing_safe_retrieve_value(
+ queue: multiprocessing.Queue,
+ payload: bytes,
+) -> None:
+ """
+ Gets the return value from a function and puts it in a multiprocessing-safe
+ container. Helper function for `multiprocessing_timeout`, must be defined top-level
+ so it can be pickled and sent to `multiprocessing.Process`
+
+ Passing the payload serialized allows us to escape the limitations of the python
+ native pickler which will fail on tasks defined in scripts because of name
+ mismatches. Whilst this particular example only affects the `func` arg, any of the
+ others could be affected by other pickle limitations as well.
+
+ Args:
+ - queue (multiprocessing.Queue): The queue to pass the resulting payload to
+ - payload (bytes): A serialized dictionary containing the data required to run
+ the function. Should be serialized with `cloudpickle.dumps`
+ Expects the following keys:
+ - fn (Callable): The function to call
+ - args (list): Positional argument values to call the function with
+ - kwargs (dict): Keyword arguments to call the function with
+ - context (dict): The prefect context dictionary to use during execution
+
+ Returns:
+ - None
+ Passes the serialized (with cloudpickle) return value or exception into the
+ queue. Callers are expected to re-raise any exceptions.
+ """
+ request = cloudpickle.loads(payload)
+
+ fn: Callable = request["fn"]
+ context: dict = request.get("context", {})
+ args: Sequence = request.get("args", [])
+ kwargs: dict = request.get("kwargs", {})
+
+ try:
+ with prefect.context(context):
+ return_val = fn(*args, **kwargs)
+ except Exception as exc:
+ return_val = exc
+
+ queue.put(cloudpickle.dumps(return_val))
+
+
def multiprocessing_timeout(
fn: Callable, *args: Any, timeout: int = None, **kwargs: Any
) -> Any:
"""
Helper function for implementing timeouts on function executions.
Implemented by spawning a new multiprocess.Process() and joining with timeout.
+
Args:
- fn (callable): the function to execute
- *args (Any): arguments to pass to the function
- - timeout (int): the length of time to allow for
- execution before raising a `TimeoutError`, represented as an integer in seconds
+ - timeout (int): the length of time to allow for execution before raising a
+ `TimeoutError`, represented as an integer in seconds
- **kwargs (Any): keyword arguments to pass to the function
+
Returns:
- the result of `f(*args, **kwargs)`
+
Raises:
- AssertionError: if run from a daemonic process
- TimeoutError: if function execution exceeds the allowed timeout
@@ -130,26 +180,29 @@ def multiprocessing_timeout(
if timeout is None:
return fn(*args, **kwargs)
- def retrieve_value(
- *args: Any, _container: multiprocessing.Queue, _ctx_dict: dict, **kwargs: Any
- ) -> None:
- """Puts the return value in a multiprocessing-safe container"""
- try:
- with prefect.context(_ctx_dict):
- val = fn(*args, **kwargs)
- _container.put(val)
- except Exception as exc:
- _container.put(exc)
-
- q = multiprocessing.Queue() # type: multiprocessing.Queue
- kwargs["_container"] = q
- kwargs["_ctx_dict"] = prefect.context.to_dict()
- p = multiprocessing.Process(target=retrieve_value, args=args, kwargs=kwargs)
+ # Create a queue to pass the function return value back
+ queue = multiprocessing.Queue() # type: multiprocessing.Queue
+
+ # Set internal kwargs for the helper function
+ request = {
+ "fn": fn,
+ "args": args,
+ "kwargs": kwargs,
+ "context": prefect.context.to_dict(),
+ }
+ payload = cloudpickle.dumps(request)
+
+ p = multiprocessing.Process(
+ target=multiprocessing_safe_retrieve_value, args=(queue, payload)
+ )
p.start()
p.join(timeout)
p.terminate()
- if not q.empty():
- res = q.get()
+
+ # Handle the process result, if the queue is empty the function did not finish
+ # before the timeout
+ if not queue.empty():
+ res = cloudpickle.loads(queue.get())
if isinstance(res, Exception):
raise res
return res
@@ -173,8 +226,8 @@ def timeout_handler(
Args:
- fn (callable): the function to execute
- *args (Any): arguments to pass to the function
- - timeout (int): the length of time to allow for
- execution before raising a `TimeoutError`, represented as an integer in seconds
+ - timeout (int): the length of time to allow for execution before raising a
+ `TimeoutError`, represented as an integer in seconds
- **kwargs (Any): keyword arguments to pass to the function
Returns:
@@ -196,19 +249,16 @@ def timeout_handler(
elif multiprocessing.current_process().daemon is False:
return multiprocessing_timeout(fn, *args, timeout=timeout, **kwargs)
- msg = (
- "This task is running in a daemonic subprocess; "
- "consequently Prefect can only enforce a soft timeout limit, i.e., "
- "if your Task reaches its timeout limit it will enter a TimedOut state "
- "but continue running in the background."
- )
+ soft_timeout_reason = "in a daemonic subprocess"
else:
- msg = (
- "This task is running on Windows; "
- "consequently Prefect can only enforce a soft timeout limit, i.e., "
- "if your Task reaches its timeout limit it will enter a TimedOut state "
- "but continue running in the background."
- )
+ soft_timeout_reason = "on Windows"
+
+ msg = (
+ f"This task is running {soft_timeout_reason}; "
+ "consequently Prefect can only enforce a soft timeout limit, i.e., "
+ "if your Task reaches its timeout limit it will enter a TimedOut state "
+ "but continue running in the background."
+ )
warnings.warn(msg, stacklevel=2)
executor = ThreadPoolExecutor()
@@ -224,7 +274,10 @@ def timeout_handler(
try:
return fut.result(timeout=timeout)
except FutureTimeout as exc:
- raise TimeoutError("Execution timed out.") from exc
+ raise TimeoutError(
+ f"Execution timed out but was executed {soft_timeout_reason} and will "
+ "continue to run in the background."
+ ) from exc
class RecursiveCall(Exception):
| PrefectHQ/prefect | 8563cbb4b0a9e5db99045fb46717cac6ba172af5 | diff --git a/tests/cli/test_describe.py b/tests/cli/test_describe.py
index b06dfb87ba..a56c746142 100644
--- a/tests/cli/test_describe.py
+++ b/tests/cli/test_describe.py
@@ -263,7 +263,6 @@ def test_describe_flow_runs(monkeypatch, cloud_api):
scheduled_start_time
start_time
end_time
- duration
serialized_state
}
}
@@ -320,7 +319,6 @@ def test_describe_flow_runs_populated(monkeypatch, cloud_api):
scheduled_start_time
start_time
end_time
- duration
serialized_state
}
}
diff --git a/tests/cli/test_get.py b/tests/cli/test_get.py
index 70fcef9541..93edc3be0f 100644
--- a/tests/cli/test_get.py
+++ b/tests/cli/test_get.py
@@ -195,7 +195,6 @@ def test_get_flow_runs_cloud(monkeypatch, cloud_api):
and "STATE" in result.output
and "AGE" in result.output
and "START TIME" in result.output
- and "DURATION" in result.output
)
query = """
@@ -208,7 +207,6 @@ def test_get_flow_runs_cloud(monkeypatch, cloud_api):
created
state
name
- duration
start_time
}
}
@@ -255,7 +253,6 @@ def test_get_flow_runs_populated(monkeypatch, cloud_api):
created
state
name
- duration
start_time
}
}
diff --git a/tests/conftest.py b/tests/conftest.py
index ad70a37d9c..4f582e8562 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -35,7 +35,7 @@ def prefect_home_dir():
# ----------------
@pytest.fixture(scope="session")
def mthread():
- "Multi-threaded executor"
+ "Multi-threaded executor using dask distributed"
with Client(
processes=False, scheduler_port=0, dashboard_address=":0", n_workers=2
) as client:
@@ -51,12 +51,18 @@ def local():
@pytest.fixture()
def sync():
"Synchronous dask (not dask.distributed) executor"
- yield LocalDaskExecutor()
+ yield LocalDaskExecutor(scheduler="sync")
+
+
[email protected]()
+def mproc_local():
+ "Multiprocessing executor using local dask (not distributed cluster)"
+ yield LocalDaskExecutor(scheduler="processes")
@pytest.fixture(scope="session")
def mproc():
- "Multi-processing executor"
+ "Multi-processing executor using dask distributed"
with Client(
processes=True, scheduler_port=0, dashboard_address=":0", n_workers=2
) as client:
@@ -64,14 +70,16 @@ def mproc():
@pytest.fixture()
-def _switch(mthread, local, sync, mproc):
+def _switch(mthread, local, sync, mproc, mproc_local):
"""
A construct needed so we can parametrize the executor fixture.
This isn't straightforward since each executor needs to be initialized
in slightly different ways.
"""
- execs = dict(mthread=mthread, local=local, sync=sync, mproc=mproc)
+ execs = dict(
+ mthread=mthread, local=local, sync=sync, mproc=mproc, mproc_local=mproc_local
+ )
return lambda e: execs[e]
diff --git a/tests/core/test_flow.py b/tests/core/test_flow.py
index 9460bf4e9f..6b763a07ae 100644
--- a/tests/core/test_flow.py
+++ b/tests/core/test_flow.py
@@ -22,7 +22,7 @@ from prefect.core.task import Task
from prefect.tasks.core import constants
from prefect.core.parameter import Parameter
from prefect.engine.cache_validators import all_inputs, partial_inputs_only
-from prefect.engine.executors import LocalExecutor
+from prefect.engine.executors import LocalExecutor, DaskExecutor
from prefect.engine.result import Result
from prefect.engine.results import LocalResult, PrefectResult
from prefect.engine.result_handlers import LocalResultHandler, ResultHandler
@@ -2928,8 +2928,12 @@ class TestSaveLoad:
sys.platform == "win32" or sys.version_info.minor == 6,
reason="Windows doesn't support any timeout logic",
)
[email protected]("executor", ["local", "sync", "mthread"], indirect=True)
-def test_timeout_actually_stops_execution(executor):
[email protected](
+ "executor", ["local", "sync", "mthread", "mproc_local", "mproc"], indirect=True
+)
+def test_timeout_actually_stops_execution(
+ executor,
+):
# Note: this is a potentially brittle test! In some cases (local and sync) signal.alarm
# is used as the mechanism for timing out a task. This passes off the job of measuring
# the time for the timeout to the OS, which uses the "wallclock" as reference (the real
@@ -2943,15 +2947,25 @@ def test_timeout_actually_stops_execution(executor):
# the task implementation" we got, but instead do a simple task (create a file) and sleep.
# This will drastically reduce the brittleness of the test (but not completely).
+ # The amount of time to sleep before writing 'invalid' to the file
+ # lower values will decrease test time but increase chances of intermittent failure
+ SLEEP_TIME = 3
+
+ # Determine if the executor is distributed and using daemonic processes which
+ # cannot be cancelled and throw a warning instead.
+ in_daemon_process = isinstance(
+ executor, DaskExecutor
+ ) and not executor.address.startswith("inproc")
+
with tempfile.TemporaryDirectory() as call_dir:
# Note: a real file must be used in the case of "mthread"
FILE = os.path.join(call_dir, "test.txt")
- @prefect.task(timeout=1)
+ @prefect.task(timeout=2)
def slow_fn():
with open(FILE, "w") as f:
f.write("called!")
- time.sleep(2)
+ time.sleep(SLEEP_TIME)
with open(FILE, "a") as f:
f.write("invalid")
@@ -2962,15 +2976,25 @@ def test_timeout_actually_stops_execution(executor):
start_time = time.time()
state = flow.run(executor=executor)
stop_time = time.time()
- time.sleep(max(0, 3 - (stop_time - start_time)))
+
+ # Sleep so 'invalid' will be written if the task is not killed, subtracting the
+ # actual runtime to speed up the test a little
+ time.sleep(max(1, SLEEP_TIME - (stop_time - start_time)))
assert os.path.exists(FILE)
with open(FILE, "r") as f:
- assert "invalid" not in f.read()
+ # `invalid` should *only be in the file if a daemon process was used
+ assert ("invalid" in f.read()) == in_daemon_process
assert state.is_failed()
assert isinstance(state.result[slow_fn], TimedOut)
assert isinstance(state.result[slow_fn].result, TimeoutError)
+ # We cannot capture the UserWarning because it is being run by a Dask worker
+ # but we can make sure the TimeoutError includes a note about it
+ assert (
+ "executed in a daemonic subprocess and will continue to run"
+ in str(state.result[slow_fn].result)
+ ) == in_daemon_process
@pytest.mark.skip("Result handlers not yet deprecated")
| "Bad request for URL" when running "prefect get flow-runs"
## Description
I see the following error when running `prefect get flow-runs`:
```
Traceback (most recent call last):
File "/home/jc/.local/bin/prefect", line 10, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/jc/.local/lib/python3.7/site-packages/prefect/cli/get.py", line 256, in flow_runs
result = Client().graphql(query)
File "/home/jc/.local/lib/python3.7/site-packages/prefect/client/client.py", line 281, in graphql
retry_on_api_error=retry_on_api_error,
File "/home/jc/.local/lib/python3.7/site-packages/prefect/client/client.py", line 237, in post
retry_on_api_error=retry_on_api_error,
File "/home/jc/.local/lib/python3.7/site-packages/prefect/client/client.py", line 401, in _request
session=session, method=method, url=url, params=params, headers=headers
File "/home/jc/.local/lib/python3.7/site-packages/prefect/client/client.py", line 333, in _send_request
response.raise_for_status()
File "/home/jc/.local/lib/python3.7/site-packages/requests/models.py", line 941, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: http://localhost:4200/graphql
```
The same error occurs if I specify a flow (`--flow`) or project (`--project`).
I'm running 0.13.11. The version of prefect matches the tags of the running prefect containers.
## Reproduction
On a VM:
```
pip3 install prefect
prefect backend server
prefect server start
```
Wait for images to download and the services to start. Then run:
```
prefect get flow-runs
```
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mproc]"
] | [
"tests/cli/test_describe.py::test_describe_init",
"tests/cli/test_describe.py::test_describe_help",
"tests/cli/test_get.py::test_get_init",
"tests/cli/test_get.py::test_get_help",
"tests/cli/test_get.py::test_get_logs_fails_no_name_or_id",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_no_args",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_no_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_none",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_empty_string",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_false",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_edges",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_schedule",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_without_state_handler",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_on_failure",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_state_handler[handlers0]",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_state_handler[handlers1]",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_illegal_handler",
"tests/core/test_flow.py::TestCreateFlow::test_flow_has_logger",
"tests/core/test_flow.py::TestCreateFlow::test_flow_has_logger_with_informative_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_result",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_storage",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_storage_and_result",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_environment",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_has_default_environment",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_auto_generates_tasks",
"tests/core/test_flow.py::test_add_task_to_flow",
"tests/core/test_flow.py::test_add_task_returns_task",
"tests/core/test_flow.py::test_add_task_raise_an_error_if_the_task_is_not_a_task_class",
"tests/core/test_flow.py::test_set_dependencies_adds_all_arguments_to_flow",
"tests/core/test_flow.py::test_set_dependencies_converts_unkeyed_arguments_to_tasks",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val0]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val1]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val2]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val3]",
"tests/core/test_flow.py::test_set_dependencies_creates_mapped_edges",
"tests/core/test_flow.py::test_set_dependencies_respects_unmapped",
"tests/core/test_flow.py::test_binding_a_task_in_context_adds_it_to_flow",
"tests/core/test_flow.py::test_binding_a_task_adds_it_to_flow",
"tests/core/test_flow.py::test_binding_a_task_no_with_flow_raises_error",
"tests/core/test_flow.py::test_adding_a_task_to_a_flow_twice_is_ok",
"tests/core/test_flow.py::test_binding_a_task_to_two_different_flows_is_ok",
"tests/core/test_flow.py::test_binding_a_task_with_var_kwargs_expands_the_kwargs",
"tests/core/test_flow.py::test_calling_a_task_without_context_returns_a_copy",
"tests/core/test_flow.py::test_calling_a_task_returns_a_copy",
"tests/core/test_flow.py::test_calling_a_slugged_task_in_different_flows_is_ok",
"tests/core/test_flow.py::test_context_manager_is_properly_applied_to_tasks",
"tests/core/test_flow.py::test_that_flow_adds_and_removes_itself_from_prefect_context",
"tests/core/test_flow.py::test_add_edge",
"tests/core/test_flow.py::test_add_edge_raise_error_for_downstream_parameter",
"tests/core/test_flow.py::test_add_edge_raise_error_for_duplicate_key_if_validate",
"tests/core/test_flow.py::test_add_edge_returns_edge",
"tests/core/test_flow.py::test_add_edge_from_contant",
"tests/core/test_flow.py::test_chain",
"tests/core/test_flow.py::test_splatting_chain_works_in_flow_context_without_duplication",
"tests/core/test_flow.py::test_chain_works_in_flow_context_without_duplication",
"tests/core/test_flow.py::test_iter",
"tests/core/test_flow.py::test_detect_cycle",
"tests/core/test_flow.py::test_eager_cycle_detection_defaults_false",
"tests/core/test_flow.py::test_direct_cycles_are_always_detected_1",
"tests/core/test_flow.py::test_direct_cycles_are_always_detected_2",
"tests/core/test_flow.py::test_eager_validation_is_off_by_default",
"tests/core/test_flow.py::test_eager_cycle_detection_works",
"tests/core/test_flow.py::test_copy_handles_constants",
"tests/core/test_flow.py::test_copy",
"tests/core/test_flow.py::test_infer_root_tasks",
"tests/core/test_flow.py::test_infer_terminal_tasks",
"tests/core/test_flow.py::test_reference_tasks_are_terminal_tasks_by_default",
"tests/core/test_flow.py::test_set_reference_tasks",
"tests/core/test_flow.py::test_set_reference_tasks_at_init_with_empty_flow_raises_error",
"tests/core/test_flow.py::test_set_reference_tasks_at_init",
"tests/core/test_flow.py::test_reset_reference_tasks_to_terminal_tasks",
"tests/core/test_flow.py::test_key_states_raises_error_if_not_part_of_flow",
"tests/core/test_flow.py::test_key_states_raises_error_if_not_iterable",
"tests/core/test_flow.py::test_warning_raised_if_tasks_are_created_but_not_added_to_flow",
"tests/core/test_flow.py::test_warning_raised_if_tasks_are_copied_but_not_added_to_flow",
"tests/core/test_flow.py::test_warning_raised_for_tasks_defined_in_flow_context_and_unused",
"tests/core/test_flow.py::test_warning_raised_for_lambda_tasks_defined_in_flow_context_and_unused",
"tests/core/test_flow.py::test_context_is_scoped_to_flow_context",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_tasks",
"tests/core/test_flow.py::TestEquality::test_object_inequality",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_edges",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_name",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_reference_tasks",
"tests/core/test_flow.py::test_update",
"tests/core/test_flow.py::test_update_with_constants",
"tests/core/test_flow.py::test_update_with_mapped_edges",
"tests/core/test_flow.py::test_update_with_parameter_merge",
"tests/core/test_flow.py::test_upstream_and_downstream_error_msgs_when_task_is_not_in_flow",
"tests/core/test_flow.py::test_sorted_tasks",
"tests/core/test_flow.py::test_sorted_tasks_with_ambiguous_sort",
"tests/core/test_flow.py::test_sorted_tasks_with_start_task",
"tests/core/test_flow.py::test_sorted_tasks_with_invalid_start_task",
"tests/core/test_flow.py::test_flow_raises_for_irrelevant_user_provided_parameters",
"tests/core/test_flow.py::test_flow_raises_for_missing_required_parameters",
"tests/core/test_flow.py::test_flow_doesnt_raises_for_missing_nonrequired_parameters",
"tests/core/test_flow.py::test_flow_accepts_unserializeable_parameters",
"tests/core/test_flow.py::test_parameters_can_not_be_downstream_dependencies",
"tests/core/test_flow.py::test_validate_cycles",
"tests/core/test_flow.py::test_validate_missing_edge_downstream_tasks",
"tests/core/test_flow.py::test_validate_missing_edge_upstream_tasks",
"tests/core/test_flow.py::test_validate_missing_reference_tasks",
"tests/core/test_flow.py::test_validate_edges_kwarg",
"tests/core/test_flow.py::test_validate_edges",
"tests/core/test_flow.py::test_skip_validate_edges",
"tests/core/test_flow.py::test_skip_validation_in_init_with_kwarg",
"tests/core/test_flow.py::TestCache::test_cache_created",
"tests/core/test_flow.py::TestCache::test_cache_sorted_tasks",
"tests/core/test_flow.py::TestCache::test_cache_sorted_tasks_with_args",
"tests/core/test_flow.py::TestCache::test_cache_root_tasks",
"tests/core/test_flow.py::TestCache::test_cache_terminal_tasks",
"tests/core/test_flow.py::TestCache::test_cache_all_upstream_edges",
"tests/core/test_flow.py::TestCache::test_cache_all_downstream_edges",
"tests/core/test_flow.py::TestCache::test_cache_survives_pickling",
"tests/core/test_flow.py::TestCache::test_adding_task_clears_cache",
"tests/core/test_flow.py::TestCache::test_adding_edge_clears_cache",
"tests/core/test_flow.py::TestCache::test_setting_reference_tasks_clears_cache",
"tests/core/test_flow.py::TestReplace::test_replace_replaces_all_the_things",
"tests/core/test_flow.py::TestReplace::test_replace_update_slugs",
"tests/core/test_flow.py::TestReplace::test_replace_complains_about_tasks_not_in_flow",
"tests/core/test_flow.py::TestReplace::test_replace_runs_smoothly",
"tests/core/test_flow.py::TestReplace::test_replace_converts_new_to_task",
"tests/core/test_flow.py::TestReplace::test_replace_converts_new_collections_to_tasks",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_defaults_to_return_everything",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_defaults_to_name",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_takes_intersection",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_accepts_tags_and_requires_all_tags",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_can_check_types",
"tests/core/test_flow.py::TestSerialize::test_serialization",
"tests/core/test_flow.py::TestSerialize::test_deserialization",
"tests/core/test_flow.py::TestSerialize::test_serialize_validates_invalid_flows",
"tests/core/test_flow.py::TestSerialize::test_serialize_includes_storage",
"tests/core/test_flow.py::TestSerialize::test_serialize_adds_flow_to_storage_if_build",
"tests/core/test_flow.py::TestSerialize::test_serialize_can_be_called_twice",
"tests/core/test_flow.py::TestSerialize::test_serialize_fails_with_no_storage",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_runs_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_with_paused_states_hangs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_passes_scheduled_parameters",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_doesnt_persist_stale_scheduled_params",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_doesnt_run_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_returns_tasks_when_running_off_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_responds_to_config",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_stops_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_schedule_continues_on_executor_failure",
"tests/core/test_flow.py::TestFlowRunMethod::test_scheduled_runs_handle_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states_across_runs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_without_schedule_can_run_cached_tasks",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states_across_runs_with_always_run_trigger",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_across_runs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_with_differing_lengths",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_with_non_cached",
"tests/core/test_flow.py::TestFlowRunMethod::test_scheduled_runs_handle_mapped_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_run_accepts_state_kwarg",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_sets_scheduled_start_time",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_does_not_set_scheduled_start_time_globally",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_persists_scheduled_start_time_across_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_updates_the_scheduled_start_time_of_each_scheduled_run",
"tests/core/test_flow.py::TestFlowDiagnostics::test_flow_diagnostics",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_uses_default_storage[prefect.environments.storage.Docker]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_uses_default_storage[prefect.environments.storage.Local]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_passes_kwargs_to_storage",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_environment_if_labeled_storage_used[storage0]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_environment_if_labeled_storage_used[storage1]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_environment_if_labeled_storage_used[storage2]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_environment_if_labeled_storage_used[storage3]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage0]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage1]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage2]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage3]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_doesnt_override_custom_set_result",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_environment_with_storage_labels",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_doesnt_overwrite_labels_if_local_storage_is_used",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_errors_if_in_flow_context",
"tests/core/test_flow.py::test_bad_flow_runner_code_still_returns_state_obj",
"tests/core/test_flow.py::test_flow_run_raises_informative_error_for_certain_kwargs",
"tests/core/test_flow.py::test_flow_run_raises_if_no_more_scheduled_runs",
"tests/core/test_flow.py::test_flow_run_respects_state_kwarg",
"tests/core/test_flow.py::test_flow_run_respects_task_state_kwarg",
"tests/core/test_flow.py::test_flow_run_handles_error_states_when_initial_state_is_provided",
"tests/core/test_flow.py::test_looping_works_in_a_flow",
"tests/core/test_flow.py::test_pause_resume_works_with_retries",
"tests/core/test_flow.py::test_looping_with_retries_works_in_a_flow",
"tests/core/test_flow.py::test_looping_with_retries_resets_run_count",
"tests/core/test_flow.py::test_starting_at_arbitrary_loop_index",
"tests/core/test_flow.py::test_flow_run_name_as_run_param",
"tests/core/test_flow.py::TestSaveLoad::test_save_saves_and_load_loads",
"tests/core/test_flow.py::TestSaveLoad::test_save_saves_has_a_default",
"tests/core/test_flow.py::TestSaveLoad::test_load_accepts_name_and_sluggified_name",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[local]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[sync]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mthread]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mproc_local]",
"tests/core/test_flow.py::test_results_write_to_formatted_locations",
"tests/core/test_flow.py::test_results_write_to_custom_formatters",
"tests/core/test_flow.py::test_run_agent_passes_environment_labels",
"tests/core/test_flow.py::TestSlugGeneration::test_slugs_are_stable",
"tests/core/test_flow.py::TestSlugGeneration::test_slugs_incorporate_tags_and_order"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-16T14:11:13Z" | apache-2.0 |
|
PrefectHQ__prefect-3551 | diff --git a/changes/pr3548.yaml b/changes/pr3548.yaml
new file mode 100644
index 0000000000..f9b4d08522
--- /dev/null
+++ b/changes/pr3548.yaml
@@ -0,0 +1,5 @@
+enhancement:
+ - "`RenameFlowRunTask`: use default `flow_run_id` value from context - [#3548](https://github.com/PrefectHQ/prefect/pull/3548)"
+
+contributor:
+ - "[Panagiotis Simakis](https://github.com/sp1thas)"
diff --git a/changes/pr3549.yaml b/changes/pr3549.yaml
new file mode 100644
index 0000000000..f8c16afd92
--- /dev/null
+++ b/changes/pr3549.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "Raise a better error message when trying to register a flow with parameters with JSON-incompatible defaults - [#3549](https://github.com/PrefectHQ/prefect/pull/3549)"
diff --git a/changes/pr3551.yaml b/changes/pr3551.yaml
new file mode 100644
index 0000000000..729d7b304b
--- /dev/null
+++ b/changes/pr3551.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix LocalAgent PYTHONPATH construction on Windows - [#3551](https://github.com/PrefectHQ/prefect/pull/3551)"
diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py
index 61cb6ba95a..30ff1588df 100644
--- a/src/prefect/agent/local/agent.py
+++ b/src/prefect/agent/local/agent.py
@@ -225,7 +225,7 @@ class LocalAgent(Agent):
python_path.append(os.environ["PYTHONPATH"])
if self.import_paths:
python_path.extend(self.import_paths)
- env["PYTHONPATH"] = ":".join(python_path)
+ env["PYTHONPATH"] = os.pathsep.join(python_path)
# 4. Values set on the agent via `--env`
env.update(self.env_vars)
diff --git a/src/prefect/tasks/prefect/flow_run_rename.py b/src/prefect/tasks/prefect/flow_run_rename.py
index d4127a231c..69bd46312d 100644
--- a/src/prefect/tasks/prefect/flow_run_rename.py
+++ b/src/prefect/tasks/prefect/flow_run_rename.py
@@ -1,6 +1,7 @@
import warnings
from typing import Any
+import prefect
from prefect import Task
from prefect.client import Client
from prefect.utilities.tasks import defaults_from_attrs
@@ -30,24 +31,30 @@ class RenameFlowRun(Task):
def run(self, flow_run_id: str, flow_run_name: str) -> bool:
"""
Args:
- - flow_run_id (str, optional): The ID of the flow run to rename
+ - flow_run_id (str, optional): The ID of the flow run to rename. If `None`,
+ the `flow_run_id` from `prefect.context` will be used as default value
- flow_run_name (str, optional): The new flow run name
Returns:
- bool: Boolean representing whether the flow run was renamed successfully or not.
Raises:
- - ValueError: If flow_run_id or name is not provided
+ - ValueError: If `flow_run_id` is not provided and `flow_run_id` does not exist
+ in `prefect.context`
+ - ValueError: If `flow_run_name` is not provided
Example:
```python
from prefect.tasks.prefect.flow_rename import FlowRenameTask
- rename_flow = FlowRenameTask(flow_run_id="id123", flow_name="A new flow run name")
+ rename_flow = FlowRenameTask(flow_name="A new flow run name")
```
"""
- if flow_run_id is None:
- raise ValueError("Must provide a flow run ID.")
+ flow_run_id = flow_run_id or prefect.context.get("flow_run_id")
+ if not flow_run_id:
+ raise ValueError(
+ "`flow_run_id` must be explicitly provided or available in the context"
+ )
if flow_run_name is None:
raise ValueError("Must provide a flow name.")
diff --git a/src/prefect/utilities/serialization.py b/src/prefect/utilities/serialization.py
index 7225c02b8f..47eb01e970 100644
--- a/src/prefect/utilities/serialization.py
+++ b/src/prefect/utilities/serialization.py
@@ -179,14 +179,23 @@ class JSONCompatible(fields.Field):
self.validators.insert(0, self._validate_json)
def _serialize(self, value, attr, obj, **kwargs): # type: ignore
- self._validate_json(value)
+ try:
+ json.dumps(value)
+ except TypeError:
+ raise ValidationError(
+ "When running with Prefect Cloud/Server, values for "
+ f"`{type(obj).__name__}.{attr}` must be JSON compatible. "
+ f"Unable to serialize `{value!r}`."
+ ) from None
return super()._serialize(value, attr, obj, **kwargs)
def _validate_json(self, value: Any) -> None:
try:
json.dumps(value)
- except TypeError as type_error:
- raise ValidationError("Value is not JSON-compatible") from type_error
+ except TypeError:
+ raise ValidationError(
+ f"Values must be JSON compatible, got `{value!r}`"
+ ) from None
class Nested(fields.Nested):
| PrefectHQ/prefect | bd6e47379594d4e26e6810380482320eeee714ae | diff --git a/tests/agent/test_local_agent.py b/tests/agent/test_local_agent.py
index 4c2d4d657d..5abedcfe12 100644
--- a/tests/agent/test_local_agent.py
+++ b/tests/agent/test_local_agent.py
@@ -87,18 +87,22 @@ def test_local_agent_uses_ip_if_dockerdesktop_hostname(monkeypatch):
assert "IP" in agent.labels
-def test_populate_env_vars():
+def test_populate_env_vars(monkeypatch):
agent = LocalAgent()
+ # The python path may be a single item and we want to ensure the correct separator
+ # is added so we will ensure PYTHONPATH has an item in it to start
+ if not os.environ.get("PYTHONPATH", ""):
+ monkeypatch.setenv("PYTHONPATH", "foobar")
+
env_vars = agent.populate_env_vars(
GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}})
)
- python_path = env_vars.pop("PYTHONPATH", "")
- assert os.getcwd() in python_path
expected = os.environ.copy()
expected.update(
{
+ "PYTHONPATH": os.getcwd() + os.pathsep + expected.get("PYTHONPATH", ""),
"PREFECT__CLOUD__API": "https://api.prefect.io",
"PREFECT__CLOUD__AUTH_TOKEN": "TEST_TOKEN",
"PREFECT__CLOUD__AGENT__LABELS": str(DEFAULT_AGENT_LABELS),
diff --git a/tests/tasks/prefect/test_flow_run_rename.py b/tests/tasks/prefect/test_flow_run_rename.py
index 6801c0c7a2..d6dba9f2da 100644
--- a/tests/tasks/prefect/test_flow_run_rename.py
+++ b/tests/tasks/prefect/test_flow_run_rename.py
@@ -1,6 +1,7 @@
import pytest
from unittest.mock import MagicMock
+import prefect
from prefect.tasks.prefect.flow_run_rename import RenameFlowRun
@@ -34,9 +35,29 @@ def test_flow_run_rename_task(monkeypatch):
assert client.set_flow_run_name.call_args[0][1] == "a_new_name!"
+def test_flow_run_id_defaults_from_context(monkeypatch):
+ client = MagicMock()
+ client.set_flow_run_name = MagicMock(return_value=True)
+ monkeypatch.setattr(
+ "prefect.tasks.prefect.flow_run_rename.Client", MagicMock(return_value=client)
+ )
+
+ task = RenameFlowRun(flow_run_name="a_new_name!")
+
+ # Verify client called with arguments
+ with prefect.context(flow_run_id="id123"):
+ task.run()
+ assert client.set_flow_run_name.called
+ assert client.set_flow_run_name.call_args[0][0] == "id123"
+ assert client.set_flow_run_name.call_args[0][1] == "a_new_name!"
+
+
def test_missing_flow_run_id():
task = RenameFlowRun()
- with pytest.raises(ValueError, match="Must provide a flow run ID."):
+ with pytest.raises(
+ ValueError,
+ match="`flow_run_id` must be explicitly provided or available in the context",
+ ):
task.run(flow_run_name="a_new_name!")
diff --git a/tests/utilities/test_serialization.py b/tests/utilities/test_serialization.py
index e4313f9404..6a72ca3787 100644
--- a/tests/utilities/test_serialization.py
+++ b/tests/utilities/test_serialization.py
@@ -71,11 +71,15 @@ class TestJSONCompatibleField:
assert serialized["j"] == value
def test_validate_on_dump(self):
- with pytest.raises(marshmallow.ValidationError):
+ with pytest.raises(
+ marshmallow.ValidationError, match="must be JSON compatible"
+ ):
self.Schema().dump({"j": lambda: 1})
def test_validate_on_load(self):
- with pytest.raises(marshmallow.ValidationError):
+ with pytest.raises(
+ marshmallow.ValidationError, match="must be JSON compatible"
+ ):
self.Schema().load({"j": lambda: 1})
| Change "PYTHONPATH" construct for local agent
## Current behavior
Local agent has the "--import-path" feature, but its now availible only for unix enviroment, because PYTHONPATH is concatenated with ":"
current_env["PYTHONPATH"] = ":".join(python_path)
## Proposed behavior
Use "os.pathsep" instead ":", to get the correct value aswell for Windows users
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/tasks/prefect/test_flow_run_rename.py::test_flow_run_id_defaults_from_context",
"tests/tasks/prefect/test_flow_run_rename.py::test_missing_flow_run_id",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_validate_on_dump",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_validate_on_load"
] | [
"tests/agent/test_local_agent.py::test_local_agent_init",
"tests/agent/test_local_agent.py::test_local_agent_deduplicates_labels",
"tests/agent/test_local_agent.py::test_local_agent_config_options",
"tests/agent/test_local_agent.py::test_local_agent_config_no_storage_labels",
"tests/agent/test_local_agent.py::test_local_agent_config_options_hostname[True]",
"tests/agent/test_local_agent.py::test_local_agent_config_options_hostname[False]",
"tests/agent/test_local_agent.py::test_local_agent_uses_ip_if_dockerdesktop_hostname",
"tests/agent/test_local_agent.py::test_populate_env_vars",
"tests/agent/test_local_agent.py::test_populate_env_vars_sets_log_to_cloud[True]",
"tests/agent/test_local_agent.py::test_populate_env_vars_sets_log_to_cloud[False]",
"tests/agent/test_local_agent.py::test_populate_env_vars_from_agent_config",
"tests/agent/test_local_agent.py::test_populate_env_vars_removes_none_values",
"tests/agent/test_local_agent.py::test_populate_env_vars_includes_agent_labels",
"tests/agent/test_local_agent.py::test_populate_env_vars_import_paths",
"tests/agent/test_local_agent.py::test_populate_env_vars_keep_existing_python_path",
"tests/agent/test_local_agent.py::test_populate_env_vars_no_existing_python_path",
"tests/agent/test_local_agent.py::test_populate_env_vars_from_run_config",
"tests/agent/test_local_agent.py::test_local_agent_deploy_processes_valid_storage[storage0]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_processes_valid_storage[storage1]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_processes_valid_storage[storage2]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_processes_valid_storage[storage3]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_processes_valid_storage[storage4]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_processes_valid_storage[storage5]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_raises_unsupported_storage",
"tests/agent/test_local_agent.py::test_local_agent_deploy_storage_fails_none",
"tests/agent/test_local_agent.py::test_local_agent_deploy_unsupported_run_config",
"tests/agent/test_local_agent.py::test_local_agent_deploy_run_config_working_dir[None]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_run_config_working_dir[existing]",
"tests/agent/test_local_agent.py::test_local_agent_deploy_run_config_missing_working_dir",
"tests/agent/test_local_agent.py::test_generate_supervisor_conf",
"tests/agent/test_local_agent.py::test_local_agent_heartbeat[0-False-None]",
"tests/agent/test_local_agent.py::test_local_agent_heartbeat[1-False-logs1]",
"tests/agent/test_local_agent.py::test_local_agent_heartbeat[1-True-logs2]",
"tests/agent/test_local_agent.py::test_local_agent_start_max_polls[0]",
"tests/agent/test_local_agent.py::test_local_agent_start_max_polls[1]",
"tests/agent/test_local_agent.py::test_local_agent_start_max_polls[2]",
"tests/tasks/prefect/test_flow_run_rename.py::test_deprecated_old_name",
"tests/tasks/prefect/test_flow_run_rename.py::test_flow_run_rename_task",
"tests/tasks/prefect/test_flow_run_rename.py::test_missing_flow_run_name",
"tests/utilities/test_serialization.py::TestNestedField::test_nested_calls_value_selection_fn",
"tests/utilities/test_serialization.py::TestNestedField::test_nested_calls_value_selection_fn_if_key_is_missing",
"tests/utilities/test_serialization.py::TestNestedField::test_nested_respects_missing",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_serialization[10]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_serialization[value1]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_serialization[11]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_serialization[value3]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_serialization[value4]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_serialization[value5]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_serialization[value6]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_deserialization[10]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_deserialization[value1]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_deserialization[11]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_deserialization[value3]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_deserialization[value4]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_deserialization[value5]",
"tests/utilities/test_serialization.py::TestJSONCompatibleField::test_json_deserialization[value6]",
"tests/utilities/test_serialization.py::TestBytesField::test_bytes_serialize",
"tests/utilities/test_serialization.py::TestBytesField::test_bytes_deserialize",
"tests/utilities/test_serialization.py::TestBytesField::test_bytes_serialize_none",
"tests/utilities/test_serialization.py::TestBytesField::test_bytes_deserialize_none",
"tests/utilities/test_serialization.py::TestUUIDField::test_serialize_uuid",
"tests/utilities/test_serialization.py::TestUUIDField::test_serialize_str",
"tests/utilities/test_serialization.py::TestUUIDField::test_deserialize_uuid",
"tests/utilities/test_serialization.py::TestUUIDField::test_deserialize_str",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_datetime[dt0]",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_datetime[dt1]",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_datetime[dt2]",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_datetime[dt3]",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_datetime[dt4]",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_datetime[dt5]",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_datetime[dt6]",
"tests/utilities/test_serialization.py::TestDateTimeTZField::test_deserialize_respects_dst",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_serialize_fn",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_serialize_invalid_fn",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_serialize_invalid_fn_without_validation",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_deserialize_fn",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_deserialize_invalid_fn",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_deserialize_invalid_fn_without_validation",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_serialize_none",
"tests/utilities/test_serialization.py::TestFunctionReferenceField::test_deserialize_none",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_serialize_outer_no_state",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_serialize_outer_with_state",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_serialize_invalid_fn",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_serialize_invalid_fn_without_validation",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_deserialize_outer_no_state",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_deserialize_outer_with_state",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_deserialize_outer_with_state_doesnt_mutate_payload",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_deserialize_invalid_fn",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_deserialize_invalid_fn_without_validation",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_serialize_non_function_good_error",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_serialize_none",
"tests/utilities/test_serialization.py::TestStatefulFunctionReferenceField::test_deserialize_none",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_writes_version_to_serialized_object",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_doesnt_mutate_object_on_load",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_creates_object",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_does_not_create_object_if_arg_is_false",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_has_error_if_fields_cant_be_supplied_to_init",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_with_excluded_fields",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_creates_object_with_lambda",
"tests/utilities/test_serialization.py::TestObjectSchema::test_schema_handles_unknown_fields",
"tests/utilities/test_serialization.py::TestOneOfSchema::test_oneofschema_load_box",
"tests/utilities/test_serialization.py::TestOneOfSchema::test_oneofschema_handles_unknown_fields"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-20T16:40:15Z" | apache-2.0 |
|
PrefectHQ__prefect-3593 | diff --git a/changes/pr3593.yaml b/changes/pr3593.yaml
new file mode 100644
index 0000000000..e65bcec38a
--- /dev/null
+++ b/changes/pr3593.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "`prefect diagnostics` no longer displays keys that have values matching the default config - [#3593](https://github.com/PrefectHQ/prefect/pull/3593)"
diff --git a/docs/orchestration/agents/overview.md b/docs/orchestration/agents/overview.md
index 57c6762583..69f6e9bfbf 100644
--- a/docs/orchestration/agents/overview.md
+++ b/docs/orchestration/agents/overview.md
@@ -16,6 +16,12 @@ Once the agent submits the flow run for execution, the agent returns to waiting
If for any reason the agent encounters an issue deploying the flow run for execution then it will mark that flow run as `Failed` with the message set to the error it encountered.
+:::tip Agent tracking
+Agents are tracked and uniquely identified by Prefect Cloud or Prefect Server through a combination of agent name, labels, token <Badge text="Cloud"/>, and Core Version.
+
+Prefect doesn't talk to agents but instead relies on agents to talk to it, which means agents identify themselves and communicate their settings. Independent agent processes with the same configurations will be interpreted by Prefect as the same agent; changing any piece of the configuration will tell Prefect to track these agents independently.
+:::
+
### Installation
If Prefect is already [installed](../../core/getting_started/installation.html) no additional work is required to begin using Prefect agents!
@@ -97,7 +103,7 @@ Setting labels through the `PREFECT__CLOUD__AGENT__LABELS` environment variable
Agents can optionally run a private HTTP server for use as a health check.
Health checks can be used by common orchestration services (e.g.
-``supervisord``, ``docker``, ``kubernetes``, ...) to check that the agent is
+`supervisord`, `docker`, `kubernetes`, ...) to check that the agent is
running properly and take actions (such as restarting the agent) if it's not.
A few ways to enable:
@@ -114,8 +120,8 @@ $ prefect agent start --agent-address http://localhost:8080
$ export PREFECT__CLOUD__AGENT__AGENT_ADDRESS=http://localhost:8080
```
-- Setting ``cloud.agent.agent_address`` in your [configuration](../../core/concepts/configuration.html):
+- Setting `cloud.agent.agent_address` in your [configuration](../../core/concepts/configuration.html):
-If enabled, the HTTP health check will be available via the ``/api/health``
-route at the configured agent address. This route returns ``200 OK`` if the
+If enabled, the HTTP health check will be available via the `/api/health`
+route at the configured agent address. This route returns `200 OK` if the
agent is running and health, and will error otherwise.
diff --git a/src/prefect/utilities/collections.py b/src/prefect/utilities/collections.py
index 048d2cfd7f..4eee035892 100644
--- a/src/prefect/utilities/collections.py
+++ b/src/prefect/utilities/collections.py
@@ -1,10 +1,11 @@
import collections
from collections.abc import MutableMapping
-from typing import Any, Generator, Iterable, Iterator, Union, cast
+from typing import Any, Generator, Iterable, Iterator, Union, cast, TypeVar, Type
from box import Box
DictLike = Union[dict, "DotDict"]
+D = TypeVar("D", bound=Union[dict, MutableMapping])
def flatten_seq(seq: Iterable) -> Generator:
@@ -181,7 +182,7 @@ class CompoundKey(tuple):
pass
-def dict_to_flatdict(dct: dict, parent: CompoundKey = None) -> dict:
+def dict_to_flatdict(dct: DictLike, parent: CompoundKey = None) -> dict:
"""Converts a (nested) dictionary to a flattened representation.
Each key of the flat dict will be a CompoundKey tuple containing the "chain of keys"
@@ -207,19 +208,19 @@ def dict_to_flatdict(dct: dict, parent: CompoundKey = None) -> dict:
return dict(items)
-def flatdict_to_dict(dct: dict, dct_class: type = None) -> MutableMapping:
+def flatdict_to_dict(dct: dict, dct_class: Type[D] = None) -> D:
"""Converts a flattened dictionary back to a nested dictionary.
Args:
- dct (dict): The dictionary to be nested. Each key should be a
- `CompoundKey`, as generated by `dict_to_flatdict()`
+ `CompoundKey`, as generated by `dict_to_flatdict()`
- dct_class (type, optional): the type of the result; defaults to `dict`
Returns:
- - MutableMapping: A `MutableMapping` used to represent a nested dictionary
+ - D: An instance of `dct_class` used to represent a nested dictionary, bounded
+ as a MutableMapping or dict
"""
-
- result = (dct_class or dict)() # type: MutableMapping
+ result = cast(D, (dct_class or dict)())
for k, v in dct.items():
if isinstance(k, CompoundKey):
current_dict = result
diff --git a/src/prefect/utilities/diagnostics.py b/src/prefect/utilities/diagnostics.py
index e02673b1b7..16211fe21f 100644
--- a/src/prefect/utilities/diagnostics.py
+++ b/src/prefect/utilities/diagnostics.py
@@ -25,11 +25,14 @@ def system_information() -> dict:
def config_overrides(include_secret_names: bool = False) -> dict:
"""
- Get user configuration overrides
+ Get user configuration keys that differ from the default configuration. Will only
+ return an indication if a key is set and differs from the defaults, values are
+ *not* returned.
Args:
- - include_secret_names (bool, optional): toggle output of Secret names, defaults to False.
- Note: Secret values are never returned, only their names.
+ - include_secret_names (bool, optional): toggle inclusion of secret config keys
+ in the output. Note that secret values are never returned, only their names
+ when this is `True`. Defaults to `False`.
Returns:
- dict: a dictionary containing names of user configuration overrides
@@ -45,13 +48,42 @@ def config_overrides(include_secret_names: bool = False) -> dict:
}
return True
+ # Load the default config to compare values
+ default_config = dict()
+ default_config_path = prefect.configuration.DEFAULT_CONFIG
+ if default_config_path and os.path.isfile(default_config_path):
+ default_config = prefect.configuration.load_toml(default_config_path)
+
user_config = dict() # type: ignore
user_config_path = prefect.configuration.USER_CONFIG
if user_config_path and os.path.isfile(
str(prefect.configuration.interpolate_env_vars(user_config_path))
):
user_config = prefect.configuration.load_toml(user_config_path)
- user_config = _replace_values(user_config)
+
+ # Create some shorter names for fully specified imports avoiding circular
+ # dependencies in the utilities
+ dict_to_flatdict = prefect.utilities.collections.dict_to_flatdict
+ flatdict_to_dict = prefect.utilities.collections.flatdict_to_dict
+
+ # Drop keys from `user_config` that have values identical to `default_config`
+ # converting to flat dictionaries for ease of comparison
+ user_config = dict_to_flatdict(user_config)
+ default_config = dict_to_flatdict(default_config)
+
+ # Collect keys to drop in a list so we aren't dropping keys during iteration
+ keys_to_drop = [
+ key
+ for key, val in user_config.items()
+ if key in default_config and val == default_config[key]
+ ]
+
+ for key in keys_to_drop:
+ user_config.pop(key)
+
+ # Restore to a nested dictionary then replace values with bools
+ user_config = flatdict_to_dict(user_config)
+ user_config = _replace_values(user_config)
return dict(config_overrides=user_config)
| PrefectHQ/prefect | 695d8d50a56c9e4d1ba54c848787032042f3088d | diff --git a/tests/utilities/test_diagnostics.py b/tests/utilities/test_diagnostics.py
index b52d1daf04..f86cb74d4f 100644
--- a/tests/utilities/test_diagnostics.py
+++ b/tests/utilities/test_diagnostics.py
@@ -38,6 +38,39 @@ def test_config_overrides_populated(monkeypatch):
assert config_overrides["config_overrides"] == {"debug": True}
+def test_config_overrides_excludes_all_default_matches(monkeypatch):
+ monkeypatch.setattr(
+ "prefect.configuration.USER_CONFIG", prefect.configuration.DEFAULT_CONFIG
+ )
+
+ config_overrides = diagnostics.config_overrides()
+
+ assert config_overrides["config_overrides"] == {}
+
+
+def test_config_overrides_excludes_some_default_matches(monkeypatch, tmpdir):
+ # Load and modify the default config
+ default_config = prefect.configuration.load_toml(
+ prefect.configuration.DEFAULT_CONFIG
+ )
+ default_config["debug"] = True
+ default_config["cloud"]["agent"]["name"] = "foo"
+
+ # Write it as a new user config
+ user_config_path = str(tmpdir.join("config.toml"))
+ file = open(user_config_path, "w+")
+ toml.dump(default_config, file)
+ file.close()
+ monkeypatch.setattr("prefect.configuration.USER_CONFIG", user_config_path)
+
+ config_overrides = diagnostics.config_overrides()
+
+ assert config_overrides["config_overrides"] == {
+ "debug": True,
+ "cloud": {"agent": {"name": True}},
+ }
+
+
def test_config_overrides_secrets(monkeypatch):
with tempfile.TemporaryDirectory() as tempdir:
file = open("{}/config.toml".format(tempdir), "w+")
| Diagnostics displays values that do not differ from defaults
## Current behavior
<!-- Please describe how the feature works today -->
If you copy the `prefect/src/prefect/config.toml` file to a local copy at `~/.prefect/config.toml` to easily have all the settings available, `prefect diagnostics` will display all of the settings as non-default even if they have not been changed.
## Proposed behavior
<!-- Please describe your proposed change to the current behavior -->
Resolving the differences between your `config.toml` and the built-in config should be relatively straightforward and would display more helpful diagnostic information.
## Example
<!-- Please give an example of how the enhancement would be useful -->
- User copies config file
- User posts diagnostics
- Support gets confused about all the non-defaults and cannot find what has actually changed
vs
- Only truly changed values are displayed
- Support has a really nice day | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/utilities/test_diagnostics.py::test_config_overrides_excludes_all_default_matches",
"tests/utilities/test_diagnostics.py::test_config_overrides_excludes_some_default_matches"
] | [
"tests/utilities/test_diagnostics.py::test_system_information",
"tests/utilities/test_diagnostics.py::test_config_overrides_empty",
"tests/utilities/test_diagnostics.py::test_config_overrides_populated",
"tests/utilities/test_diagnostics.py::test_config_overrides_secrets",
"tests/utilities/test_diagnostics.py::test_config_overrides_no_secrets",
"tests/utilities/test_diagnostics.py::test_environment_variables_populated",
"tests/utilities/test_diagnostics.py::test_flow_information",
"tests/utilities/test_diagnostics.py::test_diagnostic_info_with_flow_no_secrets"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-29T17:54:24Z" | apache-2.0 |
|
PrefectHQ__prefect-3643 | diff --git a/changes/issue3619.yaml b/changes/issue3619.yaml
new file mode 100644
index 0000000000..8b23031c28
--- /dev/null
+++ b/changes/issue3619.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "Checkpoint all iterations of Looped tasks - [#3619](https://github.com/PrefectHQ/prefect/issues/3619)"
diff --git a/changes/pr3624.yaml b/changes/pr3624.yaml
new file mode 100644
index 0000000000..8b156975eb
--- /dev/null
+++ b/changes/pr3624.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix breaking change in flow registration with old server versions - [#3624](https://github.com/PrefectHQ/prefect/pull/3624)"
diff --git a/src/prefect/client/client.py b/src/prefect/client/client.py
index 11e81be590..9c6bfdffcf 100644
--- a/src/prefect/client/client.py
+++ b/src/prefect/client/client.py
@@ -795,17 +795,20 @@ class Client:
if compressed:
serialized_flow = compress(serialized_flow)
+
+ inputs = dict(
+ project_id=(project[0].id if project else None),
+ serialized_flow=serialized_flow,
+ set_schedule_active=set_schedule_active,
+ version_group_id=version_group_id,
+ )
+ # Add newly added inputs only when set for backwards compatibility
+ if idempotency_key is not None:
+ inputs.update(idempotency_key=idempotency_key)
+
res = self.graphql(
create_mutation,
- variables=dict(
- input=dict(
- project_id=(project[0].id if project else None),
- serialized_flow=serialized_flow,
- set_schedule_active=set_schedule_active,
- version_group_id=version_group_id,
- idempotency_key=idempotency_key,
- )
- ),
+ variables=dict(input=inputs),
retry_on_api_error=False,
) # type: Any
diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py
index 03f4faa235..056df601db 100644
--- a/src/prefect/engine/task_runner.py
+++ b/src/prefect/engine/task_runner.py
@@ -827,6 +827,7 @@ class TaskRunner(Runner):
value = None
raw_inputs = {k: r.value for k, r in inputs.items()}
+ new_state = None
try:
self.logger.debug(
"Task '{name}': Calling task.run() method...".format(
@@ -859,11 +860,10 @@ class TaskRunner(Runner):
except signals.LOOP as exc:
new_state = exc.state
assert isinstance(new_state, Looped)
- new_state.result = self.result.from_value(value=new_state.result)
+ value = new_state.result
new_state.message = exc.state.message or "Task is looping ({})".format(
new_state.loop_count
)
- return new_state
# checkpoint tasks if a result is present, except for when the user has opted out by
# disabling checkpointing
@@ -884,6 +884,10 @@ class TaskRunner(Runner):
else:
result = self.result.from_value(value=value)
+ if new_state is not None:
+ new_state.result = result
+ return new_state
+
state = Success(result=result, message="Task run succeeded.")
return state
| PrefectHQ/prefect | 7ef943f1e1dcea54b095403c1b1ca0b47eaf7883 | diff --git a/tests/engine/test_task_runner.py b/tests/engine/test_task_runner.py
index f6f4551a13..841623656d 100644
--- a/tests/engine/test_task_runner.py
+++ b/tests/engine/test_task_runner.py
@@ -2165,7 +2165,7 @@ class TestLooping:
assert state.result == 3
@pytest.mark.parametrize("checkpoint", [True, None])
- def test_looping_only_checkpoints_the_final_result(self, checkpoint):
+ def test_looping_checkpoints_all_iterations(self, checkpoint):
class Handler(ResultHandler):
data = []
@@ -2189,7 +2189,7 @@ class TestLooping:
state = TaskRunner(my_task).run(context={"checkpointing": True})
assert state.is_successful()
assert state.result == 3
- assert handler.data == [3]
+ assert handler.data == [1, 2, 3]
def test_looping_works_with_retries(self):
@prefect.task(max_retries=2, retry_delay=timedelta(seconds=0))
| Templated results are overwritten on raised LOOP
## Opened from the [Prefect Public Slack Community](https://join.slack.com/t/prefect-public/shared_invite/enQtNzE5OTU3OTQwNzc1LTQ5M2FkZmQzZjI0ODg1ZTBmOTc0ZjVjYWFjMWExZDAyYzBmYjVmMTE1NTQ1Y2IxZTllOTc4MmI3NzYxMDlhYWU)
**me1548**: Hey, Iβm trying to use the `LOOP` and write the results on each iteration as documented <https://docs.prefect.io/core/advanced_tutorials/using-results.html#looping|here>. However, it doesnβt seem to work. It only writes the last iteration.
```
@task()
def log_output(result):
logger = prefect.context.get('logger')
<http://logger.info|logger.info>(result)
@task(result=LocalResult(dir='./results', location='test-{task_loop_count}.prefect'))
def loop_test():
loop_payload = prefect.context.get("task_loop_result", {})
n = loop_payload.get("n", 1)
print(n)
if n > 5:
return n
raise LOOP(f'Iteration {n}', result=dict(n=n+1))
with Flow("Postgres -> BigQuery") as flow:
x = loop_test()
log_output(x)
```
_See output logging and diagnostics in thread_
**me1548**:
```
[2020-11-05 07:11:07] INFO - prefect.FlowRunner | Beginning Flow run for 'Postgres -> BigQuery'
[2020-11-05 07:11:07] DEBUG - prefect.FlowRunner | Using executor type LocalExecutor
[2020-11-05 07:11:07] DEBUG - prefect.FlowRunner | Flow 'Postgres -> BigQuery': Handling state change from Scheduled to Running
[2020-11-05 07:11:07] INFO - prefect.TaskRunner | Task 'loop_test': Starting task run...
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Pending to Running
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Calling task.run() method...
1
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Running to Looped
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Pending to Running
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Calling task.run() method...
2
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Running to Looped
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Pending to Running
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Calling task.run() method...
3
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Running to Looped
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Pending to Running
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Calling task.run() method...
4
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Running to Looped
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Pending to Running
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Calling task.run() method...
5
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Running to Looped
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Pending to Running
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Calling task.run() method...
6
[2020-11-05 07:11:07] DEBUG - prefect.LocalResult | Starting to upload result to test-6.prefect...
[2020-11-05 07:11:07] DEBUG - prefect.LocalResult | Finished uploading result to /Users/joell/joell.dev/Scraper/scraper-next/prefect-sync/results/test-6.prefect...
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'loop_test': Handling state change from Running to Success
[2020-11-05 07:11:07] INFO - prefect.TaskRunner | Task 'loop_test': Finished task run for task with final state: 'Success'
[2020-11-05 07:11:07] INFO - prefect.TaskRunner | Task 'log_output': Starting task run...
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'log_output': Handling state change from Pending to Running
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'log_output': Calling task.run() method...
[2020-11-05 07:11:07] INFO - prefect.log_output | 6
[2020-11-05 07:11:07] DEBUG - prefect.TaskRunner | Task 'log_output': Handling state change from Running to Success
[2020-11-05 07:11:07] INFO - prefect.TaskRunner | Task 'log_output': Finished task run for task with final state: 'Success'
[2020-11-05 07:11:07] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded
[2020-11-05 07:11:07] DEBUG - prefect.FlowRunner | Flow 'Postgres -> BigQuery': Handling state change from Running to Success
{
"config_overrides": {},
"env_vars": [
"PREFECT__LOGGING__LEVEL",
"PREFECT__CONTEXT__SECRETS__POSTGRES",
"PREFECT__CONTEXT__SECRETS__GCP_CREDENTIALS",
"PREFECT__FLOWS__CHECKPOINTING"
],
"flow_information": {
"environment": {
"executor": true,
"labels": true,
"logger": true,
"metadata": {
"image": true
},
"on_exit": false,
"on_start": false,
"type": "LocalEnvironment"
},
"result": {
"type": "LocalResult"
},
"schedule": {},
"storage": {
"_flows": {
"Postgres -> BigQuery": true
},
"_labels": false,
"add_default_labels": true,
"directory": true,
"flows": {
"Postgres -> BigQuery": true
},
"path": false,
"result": true,
"secrets": false,
"stored_as_script": false,
"type": "Local"
},
"task_count": 2
},
"system_information": {
"platform": "macOS-10.15.7-x86_64-i386-64bit",
"prefect_backend": "server",
"prefect_version": "0.13.13",
"python_version": "3.8.6"
}
}
```
**znicholasbrown**: Hi <@U01DW286KGC> - thanks for reporting this - I can confirm each iteration of the loop is overwriting the previous result as <https://docs.prefect.io/core/advanced_tutorials/using-results.html#looping|described here>, despite a templated location being present (this holds true for strings and callables on the location kwarg).
**znicholasbrown**: <@ULVA73B9P> open "Templated results are overwritten on raised LOOP"
Original thread can be found [here](https://prefect-community.slack.com/archives/CL09KU1K7/p1604560491067100?thread_ts=1604560491.067100&cid=CL09KU1K7).
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/engine/test_task_runner.py::TestLooping::test_looping_checkpoints_all_iterations[True]",
"tests/engine/test_task_runner.py::TestLooping::test_looping_checkpoints_all_iterations[None]"
] | [
"tests/engine/test_task_runner.py::test_task_runner_has_logger",
"tests/engine/test_task_runner.py::test_task_that_succeeds_is_marked_success",
"tests/engine/test_task_runner.py::test_task_that_raises_success_is_marked_success",
"tests/engine/test_task_runner.py::test_task_that_has_an_error_is_marked_fail",
"tests/engine/test_task_runner.py::test_task_that_raises_fail_is_marked_fail",
"tests/engine/test_task_runner.py::test_task_that_fails_gets_retried_up_to_max_retry_time",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_has_start_time_recognized",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_with_naive_datetime_is_assumed_UTC",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_gets_retried_even_if_max_retries_is_set",
"tests/engine/test_task_runner.py::test_task_that_raises_skip_gets_skipped",
"tests/engine/test_task_runner.py::test_task_that_has_upstream_skip_gets_skipped_with_informative_message",
"tests/engine/test_task_runner.py::test_task_that_is_running_doesnt_run",
"tests/engine/test_task_runner.py::test_running_task_that_already_has_finished_state_doesnt_run",
"tests/engine/test_task_runner.py::test_task_runner_preserves_error_type",
"tests/engine/test_task_runner.py::test_task_runner_raise_on_exception_when_task_errors",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_when_task_signals",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_mapping",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_state[state0]",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_state[state1]",
"tests/engine/test_task_runner.py::test_task_runner_accepts_dictionary_of_edges",
"tests/engine/test_task_runner.py::test_timeout_actually_stops_execution",
"tests/engine/test_task_runner.py::test_task_runner_can_handle_timeouts_by_default",
"tests/engine/test_task_runner.py::test_task_runner_handles_secrets",
"tests/engine/test_task_runner.py::test_task_that_starts_failed_doesnt_get_retried",
"tests/engine/test_task_runner.py::test_runner_checks_hashed_inputs_correctly",
"tests/engine/test_task_runner.py::TestContext::test_task_runner_inits_with_current_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state4]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state5]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_resume_in_context_if_state_is_resume",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_checkpointing_in_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_tags_in_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state4]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state5]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_submitted_states",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_queued_states",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_nested_meta_states",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_with_empty",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_with_two_finished",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_raises_with_one_unfinished",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_raises_if_mapped_upstream_retrying",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_doesnt_raise_if_mapped_upstream_complete",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_empty",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_unskipped_states",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_raises_with_skipped",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_doesnt_raise_with_skipped_and_flag_set",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_raises_if_single_mapped_upstream_skipped",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_doesnt_raise_if_single_mapped_upstream_skipped_and_edge_is_mapped",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_only",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_only_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_passes_when_context_is_resume",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_custom_trigger_function_raise",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_custom_trigger_returns_false",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state3]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state4]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state3]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state4]",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_empty_inputs",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_unkeyed_inputs",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_with_non_key_edges",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_failed",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_mapped",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_same_inputs",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_different_inputs",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_duration",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_duration_fail",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_reads_result_from_context_if_cached_valid",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_state_kwarg_is_prioritized_over_context_caches",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_reads_result_from_context_with_cache_key_if_cached_valid",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_all_of_run_context_is_available_to_custom_cache_validators",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_pending[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_inputs_are_cached[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state1]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state2]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state3]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_running_state",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state0]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state1]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state2]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state3]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_success_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_fail_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_loop_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_loop_signal_with_custom_message",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_skip_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_pause_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_run_with_error",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_invalid_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_returns_success_with_hydrated_result_obj",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_returns_success_with_correct_result_type",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_without_checkpoint",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_config[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_config[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_checkpointing",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_custom_formatter",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_templated_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_templated_inputs_inputs_take_precedence",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_input_named_value",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_context[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_context[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_is_checkpointed_if_result_handler_present[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_is_checkpointed_if_result_handler_present[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_for_parameter",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_results_in_failed_state[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_results_in_failed_state[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_and_checkpointing_disabled",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state0]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state1]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state2]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state3]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state4]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_zero_max_retry",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_one_max_retry",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_one_max_retry_second_run",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_retry_caches_inputs",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_when_run_count_greater_than_max_retries",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_with_start_time",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_when_state_has_explicit_run_count_set",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state0]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state1]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state2]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state3]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state4]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state0]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state1]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state2]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state3]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[all_inputs]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[all_parameters]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[duration_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[partial_inputs_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[partial_parameters_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_cache_for",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state0]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state1]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state2]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state3]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state4]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_calls_state_handlers",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state0]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state1]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state2]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state3]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state4]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_exists",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_exists_multiple_checks",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_uses_callable",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_callable_uses_context",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_respects_multiple_flow_runs",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_with_callable_uses_run_context",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_with_callable_uses_task_inputs",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state2]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state3]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state4]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_default_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_default_start_time[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_none_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state2]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_past_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_past_start_time[state1]",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_failure_is_not_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_failure_is_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_trigger_failure_is_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called_on_retry",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_can_return_none",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called_on_failure",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_respect_signals",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_handle_retry_signals",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_multiple_task_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_multiple_task_handlers_are_called_in_sequence",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handler_that_doesnt_return_state_or_none",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handler_errors_are_logged",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_retry",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_triggerfailed",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_mapped_parent",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_multiple_task_runner_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_multiple_task_runner_handlers_are_called_in_sequence",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handler_that_doesnt_return_state_or_none",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_handler_that_raises_signal_is_trapped",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_handler_that_has_error_is_trapped",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_failed_if_no_success_upstream",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_sets_n_map_states",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_handles_upstream_mapped_states",
"tests/engine/test_task_runner.py::test_task_runner_skips_upstream_check_for_parent_mapped_task",
"tests/engine/test_task_runner.py::test_task_runner_converts_pause_signal_to_paused_state_for_manual_only_triggers",
"tests/engine/test_task_runner.py::test_task_runner_passes_manual_only_trigger_when_resume_state_is_passed",
"tests/engine/test_task_runner.py::test_task_runner_converts_pause_signal_to_paused_state_for_internally_raised_pauses",
"tests/engine/test_task_runner.py::test_task_runner_bypasses_pause_when_requested",
"tests/engine/test_task_runner.py::test_mapped_tasks_parents_and_children_respond_to_individual_triggers",
"tests/engine/test_task_runner.py::test_mapped_tasks_parent_regenerates_child_pipeline",
"tests/engine/test_task_runner.py::test_retry_has_updated_metadata",
"tests/engine/test_task_runner.py::test_pending_raised_from_endrun_has_updated_metadata",
"tests/engine/test_task_runner.py::test_failures_arent_checkpointed[True]",
"tests/engine/test_task_runner.py::test_failures_arent_checkpointed[None]",
"tests/engine/test_task_runner.py::test_skips_arent_checkpointed[True]",
"tests/engine/test_task_runner.py::test_skips_arent_checkpointed[None]",
"tests/engine/test_task_runner.py::test_task_runner_provides_logger",
"tests/engine/test_task_runner.py::TestLooping::test_looping_works",
"tests/engine/test_task_runner.py::TestLooping::test_looping_calls_state_handlers_appropriately",
"tests/engine/test_task_runner.py::TestLooping::test_looping_doesnt_aggressively_log_task_starting",
"tests/engine/test_task_runner.py::TestLooping::test_looping_doesnt_aggressively_log_task_finished",
"tests/engine/test_task_runner.py::TestLooping::test_looping_accumulates",
"tests/engine/test_task_runner.py::TestLooping::test_looping_works_with_retries",
"tests/engine/test_task_runner.py::TestLooping::test_loop_results_work_with_retries",
"tests/engine/test_task_runner.py::test_task_tags_are_attached_to_all_states",
"tests/engine/test_task_runner.py::test_task_runner_logs_stdout",
"tests/engine/test_task_runner.py::test_task_runner_logs_stdout_disabled",
"tests/engine/test_task_runner.py::test_task_runner_logs_map_index_for_mapped_tasks"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-10T19:48:33Z" | apache-2.0 |
|
PrefectHQ__prefect-3645 | diff --git a/changes/issue3519.yaml b/changes/issue3519.yaml
new file mode 100644
index 0000000000..d912d92da7
--- /dev/null
+++ b/changes/issue3519.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix issue with retrying mapped pipelines on dask - [#3519](https://github.com/PrefectHQ/prefect/issues/3519)"
diff --git a/changes/issue3603.yaml b/changes/issue3603.yaml
new file mode 100644
index 0000000000..d48de4ecae
--- /dev/null
+++ b/changes/issue3603.yaml
@@ -0,0 +1,5 @@
+fix:
+ - "Parsing backslashes in environment variable values without escape - [#3603](https://github.com/PrefectHQ/prefect/issues/3603)"
+
+contributor:
+ - "[JoΓ«l Luijmes](https://github.com/joelluijmes)"
\ No newline at end of file
diff --git a/src/prefect/configuration.py b/src/prefect/configuration.py
index 8251ff5b5f..025ca3f181 100644
--- a/src/prefect/configuration.py
+++ b/src/prefect/configuration.py
@@ -206,11 +206,6 @@ def interpolate_config(config: dict, env_var_prefix: str = None) -> Config:
if "__" not in env_var:
continue
- # env vars with escaped characters are interpreted as literal "\", which
- # Python helpfully escapes with a second "\". This step makes sure that
- # escaped characters are properly interpreted.
- value = cast(str, env_var_value.encode().decode("unicode_escape"))
-
# place the env var in the flat config as a compound key
if env_var_option.upper().startswith("CONTEXT__SECRETS"):
formatted_option = env_var_option.split("__")
@@ -224,7 +219,7 @@ def interpolate_config(config: dict, env_var_prefix: str = None) -> Config:
)
flat_config[config_option] = string_to_type(
- cast(str, interpolate_env_vars(value))
+ cast(str, interpolate_env_vars(env_var_value))
)
# interpolate any env vars referenced
diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py
index 056df601db..501a00cbec 100644
--- a/src/prefect/engine/task_runner.py
+++ b/src/prefect/engine/task_runner.py
@@ -430,8 +430,18 @@ class TaskRunner(Runner):
- ENDRUN: either way, we dont continue past this point
"""
if state.is_mapped():
+ # this indicates we are executing a re-run of a mapped pipeline;
+ # in this case, we populate both `map_states` and `cached_inputs`
+ # to ensure the flow runner can properly regenerate the child tasks,
+ # regardless of whether we mapped over an exchanged piece of data
+ # or a non-data-exchanging upstream dependency
if len(state.map_states) == 0 and state.n_map_states > 0: # type: ignore
state.map_states = [None] * state.n_map_states # type: ignore
+ state.cached_inputs = {
+ edge.key: state._result # type: ignore
+ for edge, state in upstream_states.items()
+ if edge.key
+ }
raise ENDRUN(state)
# we can't map if there are no success states with iterables upstream
diff --git a/src/prefect/utilities/executors.py b/src/prefect/utilities/executors.py
index 9dbff70d26..2001cbee4e 100644
--- a/src/prefect/utilities/executors.py
+++ b/src/prefect/utilities/executors.py
@@ -506,21 +506,14 @@ def prepare_upstream_states_for_mapping(
not state.is_mapped()
or upstream_state._result != prefect.engine.result.NoResult
):
- # this line should never be hit due to a check
- # in the TaskRunner when evaluating the mapped parent
if not hasattr(upstream_state.result, "__getitem__"):
- raise TypeError(
- (
- "Cannot map over unsubscriptable object of type {t}: {val}..."
- ).format(
- t=type(upstream_state.result),
- val=repr(upstream_state.result)[:10],
- )
- )
- upstream_result = upstream_state._result.from_value( # type: ignore
- upstream_state.result[i]
- )
+ value = None
+ else:
+ value = upstream_state.result[i]
+ upstream_result = upstream_state._result.from_value(value) # type: ignore
states[edge].result = upstream_result
+ if state.map_states and i >= len(state.map_states): # type: ignore
+ raise IndexError()
elif state.is_mapped():
if i >= len(state.map_states): # type: ignore
raise IndexError()
| PrefectHQ/prefect | 5f6b21e37093e2ef8caea676dfbbb02a3d1f5413 | diff --git a/tests/core/test_task_map.py b/tests/core/test_task_map.py
index 7646d486d7..18fbd48d27 100644
--- a/tests/core/test_task_map.py
+++ b/tests/core/test_task_map.py
@@ -1154,3 +1154,34 @@ class TestFlatMap:
state.result[z].message
== "At least one upstream state has an unmappable result."
)
+
+
+def test_mapped_retries_regenerate_child_pipelines():
+ """
+ This test sets up a situation analogous to one found in Cloud: if a reduce task fails, and a user
+ retries it in the future, we want to make sure that the mapped children pipelines are correctly
+ regenerated. When run against Cloud, these child tasks will correctly query for their states and
+ the run will proceed with the correct data.
+
+ This test mimics this scenario by running this flow with a provided set of states that only contain
+ metadata about the runs with no actual data to reference. The child runs should still be produced
+ based only on the n_map_states attribute of the parent.
+ """
+ idt = IdTask()
+ ll = ListTask()
+ with Flow("test") as flow:
+ mapped = idt.map(ll)
+ reduced = idt(mapped)
+
+ flow_state = flow.run()
+ assert flow_state.is_successful()
+ assert flow_state.result[mapped].is_mapped()
+ assert flow_state.result[reduced].is_successful()
+ assert flow_state.result[reduced].result == [1, 2, 3]
+
+ second_pass_states = {mapped: Mapped(n_map_states=3), ll: Success(result=Result())}
+
+ new_state = flow.run(task_states=second_pass_states)
+ assert new_state.is_successful()
+ assert new_state.result[mapped].is_mapped()
+ assert new_state.result[reduced].is_successful()
diff --git a/tests/engine/test_task_runner.py b/tests/engine/test_task_runner.py
index 841623656d..5cc5bd63af 100644
--- a/tests/engine/test_task_runner.py
+++ b/tests/engine/test_task_runner.py
@@ -1891,6 +1891,23 @@ class TestCheckTaskReadyToMapStep:
)
assert exc.value.state.is_mapped()
+ @pytest.mark.parametrize("state", [Pending(), Mapped(), Scheduled()])
+ def test_run_mapped_returns_cached_inputs_if_rerun(self, state):
+ """
+ This is important to communicate result information back to the
+ FlowRunner for regenerating the mapped children.
+ """
+ result = LocalResult(value="y")
+ edge = Edge(Task(), Task(), key="x")
+ with pytest.raises(ENDRUN) as exc:
+ TaskRunner(task=Task()).check_task_ready_to_map(
+ state=state, upstream_states={edge: Success(result=result)}
+ )
+ if state.is_mapped():
+ assert exc.value.state.cached_inputs == dict(x=result)
+ else:
+ assert exc.value.state.cached_inputs == dict()
+
def test_run_mapped_returns_failed_if_no_success_upstream(self):
with pytest.raises(ENDRUN) as exc:
TaskRunner(task=Task()).check_task_ready_to_map(
diff --git a/tests/test_configuration.py b/tests/test_configuration.py
index d6a5e457f1..5cd9cba9cd 100644
--- a/tests/test_configuration.py
+++ b/tests/test_configuration.py
@@ -67,7 +67,7 @@ def config(test_config_file_path, monkeypatch):
)
monkeypatch.setenv("PATH", "1/2/3")
monkeypatch.setenv(
- "PREFECT_TEST__ENV_VARS__ESCAPED_CHARACTERS", r"line 1\nline 2\rand 3\tand 4"
+ "PREFECT_TEST__ENV_VARS__ESCAPED_CHARACTERS", "line 1\nline 2\rand 3\tand 4"
)
yield configuration.load_configuration(
| Cannot restart flow run after mapped task
An example flow that maps and then fails:
```
from datetime import timedelta
from prefect import *
from prefect.engine.executors import DaskExecutor
from prefect.engine.results import GCSResult
from prefect.environments.storage import GCS
with Flow("TestFlow", result=GCSResult(bucket="model_bigquery_tmp"), storage=GCS(bucket="model_bigquery_tmp"), executor=DaskExecutor("brett-daskscheduler:8786")) as TestFlow:
@task(max_retries=1, retry_delay=timedelta(seconds=0.1))
def generate_random_list():
n = 10
return list(range(n))
@task(max_retries=1, retry_delay=timedelta(seconds=0.1))
def wait(n):
from time import sleep
sleep(n)
return n
@task(max_retries=1, retry_delay=timedelta(seconds=0.1))
def fail(values):
raise ValueError(f"n: {len(values)}")
values = wait.map(generate_random_list())
fail(values)
```
On restarting I get the following error:
```
brett_replicahq restarted this flow run
Submitted for execution: Job prefect-job-871b5d1e
Downloading testflow/2020-10-16t14-25-00-009370-00-00 from model_bigquery_tmp
Beginning Flow run for 'TestFlow'
Task 'wait': Starting task run...
Task 'wait': finished task run for task with final state: 'Mapped'
Unexpected error: TypeError("Cannot map over unsubscriptable object of type <class 'NoneType'>: None...")
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/prefect/engine/runner.py", line 48, in inner
new_state = method(self, state, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/prefect/engine/flow_runner.py", line 526, in get_flow_run_state
executors.prepare_upstream_states_for_mapping(
File "/usr/local/lib/python3.8/site-packages/prefect/utilities/executors.py", line 372, in prepare_upstream_states_for_mapping
raise TypeError(
TypeError: Cannot map over unsubscriptable object of type <class 'NoneType'>: None...
```
cc @cicdw seems related to #3322 ? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/core/test_task_map.py::test_mapped_retries_regenerate_child_pipelines",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_cached_inputs_if_rerun[state1]"
] | [
"tests/core/test_task_map.py::test_map_returns_a_task_copy",
"tests/core/test_task_map.py::test_map_returns_a_task_copy_without_context",
"tests/core/test_task_map.py::test_calling_map_with_bind_returns_self",
"tests/core/test_task_map.py::test_map_spawns_new_tasks[local]",
"tests/core/test_task_map.py::test_map_spawns_new_tasks[sync]",
"tests/core/test_task_map.py::test_map_spawns_new_tasks[mproc]",
"tests/core/test_task_map.py::test_map_spawns_new_tasks[mthread]",
"tests/core/test_task_map.py::test_map_over_parameters[local]",
"tests/core/test_task_map.py::test_map_over_parameters[sync]",
"tests/core/test_task_map.py::test_map_over_parameters[mproc]",
"tests/core/test_task_map.py::test_map_over_parameters[mthread]",
"tests/core/test_task_map.py::test_map_composition[local]",
"tests/core/test_task_map.py::test_map_composition[sync]",
"tests/core/test_task_map.py::test_map_composition[mproc]",
"tests/core/test_task_map.py::test_map_composition[mthread]",
"tests/core/test_task_map.py::test_deep_map_composition[local]",
"tests/core/test_task_map.py::test_deep_map_composition[sync]",
"tests/core/test_task_map.py::test_deep_map_composition[mproc]",
"tests/core/test_task_map.py::test_deep_map_composition[mthread]",
"tests/core/test_task_map.py::test_multiple_map_arguments[local]",
"tests/core/test_task_map.py::test_multiple_map_arguments[sync]",
"tests/core/test_task_map.py::test_multiple_map_arguments[mproc]",
"tests/core/test_task_map.py::test_multiple_map_arguments[mthread]",
"tests/core/test_task_map.py::test_mapping_over_no_successful_upstreams[local]",
"tests/core/test_task_map.py::test_mapping_over_no_successful_upstreams[sync]",
"tests/core/test_task_map.py::test_mapping_over_no_successful_upstreams[mproc]",
"tests/core/test_task_map.py::test_mapping_over_no_successful_upstreams[mthread]",
"tests/core/test_task_map.py::test_mapping_over_one_unmappable_input[local]",
"tests/core/test_task_map.py::test_mapping_over_one_unmappable_input[sync]",
"tests/core/test_task_map.py::test_mapping_over_one_unmappable_input[mproc]",
"tests/core/test_task_map.py::test_mapping_over_one_unmappable_input[mthread]",
"tests/core/test_task_map.py::test_map_failures_dont_leak_out[local]",
"tests/core/test_task_map.py::test_map_failures_dont_leak_out[sync]",
"tests/core/test_task_map.py::test_map_failures_dont_leak_out[mproc]",
"tests/core/test_task_map.py::test_map_failures_dont_leak_out[mthread]",
"tests/core/test_task_map.py::test_map_skips_return_exception_as_result[local]",
"tests/core/test_task_map.py::test_map_skips_return_exception_as_result[sync]",
"tests/core/test_task_map.py::test_map_skips_return_exception_as_result[mproc]",
"tests/core/test_task_map.py::test_map_skips_return_exception_as_result[mthread]",
"tests/core/test_task_map.py::test_upstream_skip_signals_are_handled_properly[local]",
"tests/core/test_task_map.py::test_upstream_skip_signals_are_handled_properly[sync]",
"tests/core/test_task_map.py::test_upstream_skip_signals_are_handled_properly[mproc]",
"tests/core/test_task_map.py::test_upstream_skip_signals_are_handled_properly[mthread]",
"tests/core/test_task_map.py::test_upstream_skipped_states_are_handled_properly[local]",
"tests/core/test_task_map.py::test_upstream_skipped_states_are_handled_properly[sync]",
"tests/core/test_task_map.py::test_upstream_skipped_states_are_handled_properly[mproc]",
"tests/core/test_task_map.py::test_upstream_skipped_states_are_handled_properly[mthread]",
"tests/core/test_task_map.py::test_map_skips_dont_leak_out[local]",
"tests/core/test_task_map.py::test_map_skips_dont_leak_out[sync]",
"tests/core/test_task_map.py::test_map_skips_dont_leak_out[mproc]",
"tests/core/test_task_map.py::test_map_skips_dont_leak_out[mthread]",
"tests/core/test_task_map.py::test_map_handles_upstream_empty[local]",
"tests/core/test_task_map.py::test_map_handles_upstream_empty[sync]",
"tests/core/test_task_map.py::test_map_handles_upstream_empty[mproc]",
"tests/core/test_task_map.py::test_map_handles_upstream_empty[mthread]",
"tests/core/test_task_map.py::test_map_handles_non_keyed_upstream_empty[local]",
"tests/core/test_task_map.py::test_map_handles_non_keyed_upstream_empty[sync]",
"tests/core/test_task_map.py::test_map_handles_non_keyed_upstream_empty[mproc]",
"tests/core/test_task_map.py::test_map_handles_non_keyed_upstream_empty[mthread]",
"tests/core/test_task_map.py::test_map_can_handle_fixed_kwargs[local]",
"tests/core/test_task_map.py::test_map_can_handle_fixed_kwargs[sync]",
"tests/core/test_task_map.py::test_map_can_handle_fixed_kwargs[mproc]",
"tests/core/test_task_map.py::test_map_can_handle_fixed_kwargs[mthread]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_upstreams[local]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_upstreams[sync]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_upstreams[mproc]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_upstreams[mthread]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams[local]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams[sync]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams[mproc]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams[mthread]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args[local]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args[sync]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args[mproc]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args[mthread]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args_2[local]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args_2[sync]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args_2[mproc]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_nonmapped_upstreams_and_mapped_args_2[mthread]",
"tests/core/test_task_map.py::test_map_tracks_non_mapped_upstream_tasks[local]",
"tests/core/test_task_map.py::test_map_tracks_non_mapped_upstream_tasks[sync]",
"tests/core/test_task_map.py::test_map_tracks_non_mapped_upstream_tasks[mproc]",
"tests/core/test_task_map.py::test_map_tracks_non_mapped_upstream_tasks[mthread]",
"tests/core/test_task_map.py::test_map_preserves_flowrunners_run_context[local]",
"tests/core/test_task_map.py::test_map_preserves_flowrunners_run_context[sync]",
"tests/core/test_task_map.py::test_map_preserves_flowrunners_run_context[mproc]",
"tests/core/test_task_map.py::test_map_preserves_flowrunners_run_context[mthread]",
"tests/core/test_task_map.py::test_map_allows_for_retries[local]",
"tests/core/test_task_map.py::test_map_allows_for_retries[sync]",
"tests/core/test_task_map.py::test_map_allows_for_retries[mproc]",
"tests/core/test_task_map.py::test_map_allows_for_retries[mthread]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams_and_mapped_args[local]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams_and_mapped_args[sync]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams_and_mapped_args[mproc]",
"tests/core/test_task_map.py::test_map_can_handle_nonkeyed_mapped_upstreams_and_mapped_args[mthread]",
"tests/core/test_task_map.py::test_map_behaves_like_zip_with_differing_length_results[local]",
"tests/core/test_task_map.py::test_map_behaves_like_zip_with_differing_length_results[mproc]",
"tests/core/test_task_map.py::test_map_behaves_like_zip_with_differing_length_results[mthread]",
"tests/core/test_task_map.py::test_map_behaves_like_zip_with_differing_length_results[sync]",
"tests/core/test_task_map.py::test_map_allows_retries_2[local]",
"tests/core/test_task_map.py::test_map_allows_retries_2[sync]",
"tests/core/test_task_map.py::test_map_allows_retries_2[mproc]",
"tests/core/test_task_map.py::test_map_allows_retries_2[mthread]",
"tests/core/test_task_map.py::test_reduce_task_honors_trigger_across_all_mapped_states[local]",
"tests/core/test_task_map.py::test_reduce_task_honors_trigger_across_all_mapped_states[sync]",
"tests/core/test_task_map.py::test_reduce_task_honors_trigger_across_all_mapped_states[mproc]",
"tests/core/test_task_map.py::test_reduce_task_honors_trigger_across_all_mapped_states[mthread]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states[local]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states[sync]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states[mproc]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states[mthread]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states_for_deep_pipelines[local]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states_for_deep_pipelines[sync]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states_for_deep_pipelines[mproc]",
"tests/core/test_task_map.py::test_reduce_task_properly_applies_trigger_across_all_mapped_states_for_deep_pipelines[mthread]",
"tests/core/test_task_map.py::test_task_map_downstreams_handle_single_failures[local]",
"tests/core/test_task_map.py::test_task_map_downstreams_handle_single_failures[sync]",
"tests/core/test_task_map.py::test_task_map_downstreams_handle_single_failures[mproc]",
"tests/core/test_task_map.py::test_task_map_downstreams_handle_single_failures[mthread]",
"tests/core/test_task_map.py::test_task_map_can_be_passed_to_upstream_with_and_without_map[local]",
"tests/core/test_task_map.py::test_task_map_can_be_passed_to_upstream_with_and_without_map[sync]",
"tests/core/test_task_map.py::test_task_map_can_be_passed_to_upstream_with_and_without_map[mproc]",
"tests/core/test_task_map.py::test_task_map_can_be_passed_to_upstream_with_and_without_map[mthread]",
"tests/core/test_task_map.py::test_task_map_doesnt_assume_purity_of_functions[local]",
"tests/core/test_task_map.py::test_task_map_doesnt_assume_purity_of_functions[sync]",
"tests/core/test_task_map.py::test_task_map_doesnt_assume_purity_of_functions[mproc]",
"tests/core/test_task_map.py::test_task_map_doesnt_assume_purity_of_functions[mthread]",
"tests/core/test_task_map.py::test_map_reduce[local]",
"tests/core/test_task_map.py::test_map_reduce[sync]",
"tests/core/test_task_map.py::test_map_reduce[mproc]",
"tests/core/test_task_map.py::test_map_reduce[mthread]",
"tests/core/test_task_map.py::test_map_over_map_and_unmapped[local]",
"tests/core/test_task_map.py::test_map_over_map_and_unmapped[sync]",
"tests/core/test_task_map.py::test_map_over_map_and_unmapped[mproc]",
"tests/core/test_task_map.py::test_map_over_map_and_unmapped[mthread]",
"tests/core/test_task_map.py::test_task_map_with_all_inputs_unmapped[1-2-3]",
"tests/core/test_task_map.py::test_task_map_with_all_inputs_unmapped[x1-y1-out1]",
"tests/core/test_task_map.py::test_task_map_with_no_upstream_results_and_a_mapped_state[local]",
"tests/core/test_task_map.py::test_task_map_with_no_upstream_results_and_a_mapped_state[sync]",
"tests/core/test_task_map.py::test_task_map_with_no_upstream_results_and_a_mapped_state[mproc]",
"tests/core/test_task_map.py::test_task_map_with_no_upstream_results_and_a_mapped_state[mthread]",
"tests/core/test_task_map.py::test_unmapped_on_mapped[local]",
"tests/core/test_task_map.py::test_unmapped_on_mapped[sync]",
"tests/core/test_task_map.py::test_unmapped_on_mapped[mproc]",
"tests/core/test_task_map.py::test_unmapped_on_mapped[mthread]",
"tests/core/test_task_map.py::test_all_tasks_only_called_once[local]",
"tests/core/test_task_map.py::test_all_tasks_only_called_once[sync]",
"tests/core/test_task_map.py::test_all_tasks_only_called_once[mproc]",
"tests/core/test_task_map.py::test_all_tasks_only_called_once[mthread]",
"tests/core/test_task_map.py::test_mapping_over_constants",
"tests/core/test_task_map.py::TestLooping::test_looping_works_with_mapping",
"tests/core/test_task_map.py::TestLooping::test_looping_works_with_mapping_and_individual_retries",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_constant[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_constant[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_constant[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_constant[mthread]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_task_result[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_task_result[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_task_result[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_task_result[mthread]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_mapped_result[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_mapped_result[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_mapped_result[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_mapped_result[mthread]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_flatmapped_result[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_flatmapped_result[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_flatmapped_result[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_flatmapped_result[mthread]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_reduced_result[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_reduced_result[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_reduced_result[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_reduced_result[mthread]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_unnested_input[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_unnested_input[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_unnested_input[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_unnested_input[mthread]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unnested_input[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unnested_input[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unnested_input[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unnested_input[mthread]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unmappable_input[local]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unmappable_input[sync]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unmappable_input[mproc]",
"tests/core/test_task_map.py::TestFlatMap::test_flatmap_one_unmappable_input[mthread]",
"tests/engine/test_task_runner.py::test_task_runner_has_logger",
"tests/engine/test_task_runner.py::test_task_that_succeeds_is_marked_success",
"tests/engine/test_task_runner.py::test_task_that_raises_success_is_marked_success",
"tests/engine/test_task_runner.py::test_task_that_has_an_error_is_marked_fail",
"tests/engine/test_task_runner.py::test_task_that_raises_fail_is_marked_fail",
"tests/engine/test_task_runner.py::test_task_that_fails_gets_retried_up_to_max_retry_time",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_has_start_time_recognized",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_with_naive_datetime_is_assumed_UTC",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_gets_retried_even_if_max_retries_is_set",
"tests/engine/test_task_runner.py::test_task_that_raises_skip_gets_skipped",
"tests/engine/test_task_runner.py::test_task_that_has_upstream_skip_gets_skipped_with_informative_message",
"tests/engine/test_task_runner.py::test_task_that_is_running_doesnt_run",
"tests/engine/test_task_runner.py::test_running_task_that_already_has_finished_state_doesnt_run",
"tests/engine/test_task_runner.py::test_task_runner_preserves_error_type",
"tests/engine/test_task_runner.py::test_task_runner_raise_on_exception_when_task_errors",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_when_task_signals",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_mapping",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_state[state0]",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_state[state1]",
"tests/engine/test_task_runner.py::test_task_runner_accepts_dictionary_of_edges",
"tests/engine/test_task_runner.py::test_timeout_actually_stops_execution",
"tests/engine/test_task_runner.py::test_task_runner_can_handle_timeouts_by_default",
"tests/engine/test_task_runner.py::test_task_runner_handles_secrets",
"tests/engine/test_task_runner.py::test_task_that_starts_failed_doesnt_get_retried",
"tests/engine/test_task_runner.py::test_runner_checks_hashed_inputs_correctly",
"tests/engine/test_task_runner.py::TestContext::test_task_runner_inits_with_current_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state4]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state5]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_resume_in_context_if_state_is_resume",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_checkpointing_in_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_tags_in_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state4]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state5]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_submitted_states",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_queued_states",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_nested_meta_states",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_with_empty",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_with_two_finished",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_raises_with_one_unfinished",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_raises_if_mapped_upstream_retrying",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_doesnt_raise_if_mapped_upstream_complete",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_empty",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_unskipped_states",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_raises_with_skipped",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_doesnt_raise_with_skipped_and_flag_set",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_raises_if_single_mapped_upstream_skipped",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_doesnt_raise_if_single_mapped_upstream_skipped_and_edge_is_mapped",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_only",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_only_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_passes_when_context_is_resume",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_custom_trigger_function_raise",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_custom_trigger_returns_false",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state3]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state4]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state3]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state4]",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_empty_inputs",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_unkeyed_inputs",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_with_non_key_edges",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_failed",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_mapped",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_same_inputs",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_different_inputs",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_duration",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_duration_fail",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_reads_result_from_context_if_cached_valid",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_state_kwarg_is_prioritized_over_context_caches",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_reads_result_from_context_with_cache_key_if_cached_valid",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_all_of_run_context_is_available_to_custom_cache_validators",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_pending[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_inputs_are_cached[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state1]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state2]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state3]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_running_state",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state0]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state1]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state2]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state3]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_success_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_fail_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_loop_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_loop_signal_with_custom_message",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_skip_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_pause_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_run_with_error",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_invalid_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_returns_success_with_hydrated_result_obj",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_returns_success_with_correct_result_type",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_without_checkpoint",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_config[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_config[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_checkpointing",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_custom_formatter",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_templated_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_templated_inputs_inputs_take_precedence",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_input_named_value",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_context[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_context[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_is_checkpointed_if_result_handler_present[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_is_checkpointed_if_result_handler_present[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_for_parameter",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_results_in_failed_state[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_results_in_failed_state[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_and_checkpointing_disabled",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state0]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state1]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state2]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state3]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state4]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_zero_max_retry",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_one_max_retry",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_one_max_retry_second_run",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_retry_caches_inputs",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_when_run_count_greater_than_max_retries",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_with_start_time",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_when_state_has_explicit_run_count_set",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state0]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state1]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state2]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state3]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state4]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state0]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state1]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state2]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state3]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[all_inputs]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[all_parameters]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[duration_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[partial_inputs_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[partial_parameters_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_cache_for",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state0]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state1]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state2]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state3]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state4]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_calls_state_handlers",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state0]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state1]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state2]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state3]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state4]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_exists",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_exists_multiple_checks",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_uses_callable",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_callable_uses_context",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_respects_multiple_flow_runs",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_with_callable_uses_run_context",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_with_callable_uses_task_inputs",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state2]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state3]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state4]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_default_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_default_start_time[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_none_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state2]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_past_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_past_start_time[state1]",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_failure_is_not_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_failure_is_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_trigger_failure_is_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called_on_retry",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_can_return_none",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called_on_failure",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_respect_signals",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_handle_retry_signals",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_multiple_task_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_multiple_task_handlers_are_called_in_sequence",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handler_that_doesnt_return_state_or_none",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handler_errors_are_logged",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_retry",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_triggerfailed",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_mapped_parent",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_multiple_task_runner_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_multiple_task_runner_handlers_are_called_in_sequence",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handler_that_doesnt_return_state_or_none",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_handler_that_raises_signal_is_trapped",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_handler_that_has_error_is_trapped",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_cached_inputs_if_rerun[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_cached_inputs_if_rerun[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_failed_if_no_success_upstream",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_sets_n_map_states",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_handles_upstream_mapped_states",
"tests/engine/test_task_runner.py::test_task_runner_skips_upstream_check_for_parent_mapped_task",
"tests/engine/test_task_runner.py::test_task_runner_converts_pause_signal_to_paused_state_for_manual_only_triggers",
"tests/engine/test_task_runner.py::test_task_runner_passes_manual_only_trigger_when_resume_state_is_passed",
"tests/engine/test_task_runner.py::test_task_runner_converts_pause_signal_to_paused_state_for_internally_raised_pauses",
"tests/engine/test_task_runner.py::test_task_runner_bypasses_pause_when_requested",
"tests/engine/test_task_runner.py::test_mapped_tasks_parents_and_children_respond_to_individual_triggers",
"tests/engine/test_task_runner.py::test_mapped_tasks_parent_regenerates_child_pipeline",
"tests/engine/test_task_runner.py::test_retry_has_updated_metadata",
"tests/engine/test_task_runner.py::test_pending_raised_from_endrun_has_updated_metadata",
"tests/engine/test_task_runner.py::test_failures_arent_checkpointed[True]",
"tests/engine/test_task_runner.py::test_failures_arent_checkpointed[None]",
"tests/engine/test_task_runner.py::test_skips_arent_checkpointed[True]",
"tests/engine/test_task_runner.py::test_skips_arent_checkpointed[None]",
"tests/engine/test_task_runner.py::test_task_runner_provides_logger",
"tests/engine/test_task_runner.py::TestLooping::test_looping_works",
"tests/engine/test_task_runner.py::TestLooping::test_looping_calls_state_handlers_appropriately",
"tests/engine/test_task_runner.py::TestLooping::test_looping_doesnt_aggressively_log_task_starting",
"tests/engine/test_task_runner.py::TestLooping::test_looping_doesnt_aggressively_log_task_finished",
"tests/engine/test_task_runner.py::TestLooping::test_looping_accumulates",
"tests/engine/test_task_runner.py::TestLooping::test_looping_checkpoints_all_iterations[True]",
"tests/engine/test_task_runner.py::TestLooping::test_looping_checkpoints_all_iterations[None]",
"tests/engine/test_task_runner.py::TestLooping::test_looping_works_with_retries",
"tests/engine/test_task_runner.py::TestLooping::test_loop_results_work_with_retries",
"tests/engine/test_task_runner.py::test_task_tags_are_attached_to_all_states",
"tests/engine/test_task_runner.py::test_task_runner_logs_stdout",
"tests/engine/test_task_runner.py::test_task_runner_logs_stdout_disabled",
"tests/engine/test_task_runner.py::test_task_runner_logs_map_index_for_mapped_tasks",
"tests/test_configuration.py::test_keys",
"tests/test_configuration.py::test_dicts_are_created",
"tests/test_configuration.py::test_getattr_missing",
"tests/test_configuration.py::test_debug",
"tests/test_configuration.py::test_general",
"tests/test_configuration.py::test_general_nested",
"tests/test_configuration.py::test_interpolation",
"tests/test_configuration.py::test_env_var_interpolation",
"tests/test_configuration.py::test_string_to_type_function",
"tests/test_configuration.py::test_env_var_interpolation_with_type_assignment",
"tests/test_configuration.py::test_env_var_interpolation_with_type_interpolation",
"tests/test_configuration.py::test_env_var_interpolation_doesnt_match_internal_dollar_sign",
"tests/test_configuration.py::test_env_var_interpolation_with_nonexistant_key",
"tests/test_configuration.py::test_env_var_overrides_new_key",
"tests/test_configuration.py::test_env_var_creates_nested_keys",
"tests/test_configuration.py::test_env_var_escaped",
"tests/test_configuration.py::test_copy_leaves_values_mutable",
"tests/test_configuration.py::test_copy_doesnt_make_keys_mutable",
"tests/test_configuration.py::TestUserConfig::test_load_user_config",
"tests/test_configuration.py::TestProcessTaskDefaults::test_process_task_defaults_called_on_prefect_config",
"tests/test_configuration.py::TestProcessTaskDefaults::test_max_retries_is_0_if_not_set",
"tests/test_configuration.py::TestProcessTaskDefaults::test_max_retries_is_0_if_false",
"tests/test_configuration.py::TestProcessTaskDefaults::test_max_retries_is_0_if_none",
"tests/test_configuration.py::TestProcessTaskDefaults::test_max_retries_is_0_if_0",
"tests/test_configuration.py::TestProcessTaskDefaults::test_max_retries_ignored_if_set",
"tests/test_configuration.py::TestProcessTaskDefaults::test_retry_delay_is_none_if_not_set",
"tests/test_configuration.py::TestProcessTaskDefaults::test_retry_delay_is_none_if_false",
"tests/test_configuration.py::TestProcessTaskDefaults::test_retry_delay_is_none_if_none",
"tests/test_configuration.py::TestProcessTaskDefaults::test_retry_delay_is_timedelta_if_int",
"tests/test_configuration.py::TestProcessTaskDefaults::test_retry_delay_is_timedelta_if_timedelta",
"tests/test_configuration.py::TestProcessTaskDefaults::test_timeout_is_none_if_not_set",
"tests/test_configuration.py::TestProcessTaskDefaults::test_timeout_is_none_if_false",
"tests/test_configuration.py::TestProcessTaskDefaults::test_timeout_is_none_if_none",
"tests/test_configuration.py::TestProcessTaskDefaults::test_timeout_is_timedelta_if_int",
"tests/test_configuration.py::TestProcessTaskDefaults::test_timeout_is_timedelta_if_timedelta",
"tests/test_configuration.py::TestConfigValidation::test_invalid_keys_raise_error",
"tests/test_configuration.py::TestConfigValidation::test_invalid_env_var_raises_error",
"tests/test_configuration.py::TestConfigValidation::test_mixed_case_keys_are_ok",
"tests/test_configuration.py::TestConfigValidation::test_env_vars_are_interpolated_as_lower_case",
"tests/test_configuration.py::TestConfigValidation::test_env_vars_for_secrets_alone_are_not_lower_cased"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-11T04:47:09Z" | apache-2.0 |
|
PrefectHQ__prefect-3654 | diff --git a/.circleci/config.yml b/.circleci/config.yml
index d6aa7c4fe7..2246d1b5a9 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -31,7 +31,7 @@ jobs:
check_static_analysis:
docker:
- - image: python:3.7
+ - image: python:3.8
auth:
username: $DOCKER_HUB_USER
password: $DOCKER_HUB_PW
@@ -97,7 +97,7 @@ jobs:
command: pytest docs -vvrfEsx
# test a standard install of prefect
- # with all requriements pinned to their lowest allowed versions
+ # with all requirements pinned to their lowest allowed versions
# to ensure our requirements.txt file is accurate
test_lower_prefect:
docker:
@@ -271,6 +271,38 @@ jobs:
name: Run tests
command: pytest tests -vvrfEsx
+ test_39:
+ docker:
+ - image: python:3.9.0
+ auth:
+ username: $DOCKER_HUB_USER
+ password: $DOCKER_HUB_PW
+ steps:
+ - checkout
+ - setup_remote_docker
+ - run:
+ name: Install zsh for tests
+ command: apt-get update && apt-get install -y zsh
+
+ - run:
+ name: Install graphviz
+ command: apt-get update && apt-get install -y graphviz
+
+ - run:
+ name: Upgrade pip
+ command: pip install -U pip
+
+ - run:
+ name: Install Prefect
+ # All extras cannot be tested because they do not support 3.9 yet, until then
+ # we will just guarantee that we pass the core test suite
+ # See https://github.com/PrefectHQ/prefect/pull/3441#issuecomment-708419324
+ command: pip install ".[test]"
+
+ - run:
+ name: Run tests
+ command: pytest tests -vvrfEsx
+
upload_coverage:
docker:
- image: python:3.7
@@ -283,7 +315,6 @@ jobs:
name: Upload Coverage
command: bash <(curl -s https://codecov.io/bash) -cF python -s "/tmp/workspace/coverage/"
-
build_docker_image:
docker:
- image: docker
@@ -525,13 +556,14 @@ workflows:
- test_36
- test_37
- test_38
+ - test_39
- test_lower_prefect
- test_vanilla_prefect
- upload_coverage:
requires:
- test_36
- test_37
- - test_38
+ - test_39
- test_vanilla_prefect
'Check code style and docs':
diff --git a/changes/issue3655.yaml b/changes/issue3655.yaml
new file mode 100644
index 0000000000..dd6f835024
--- /dev/null
+++ b/changes/issue3655.yaml
@@ -0,0 +1,23 @@
+# An example changelog entry
+#
+# 1. Choose one (or more if a PR encompasses multiple changes) of the following headers:
+# - feature
+# - enhancement
+# - task
+# - fix
+# - deprecation
+# - breaking (for breaking changes)
+#
+# 2. Fill in one (or more) bullet points under the heading, describing the change.
+# Markdown syntax may be used.
+#
+# 3. If you would like to be credited as helping with this release, add a
+# contributor section with your name and github username.
+#
+# Here's an example of a PR that adds an enhancement
+
+fix:
+ - "Fixes Flow.replace freezing reference tasks - [#3655](https://github.com/PrefectHQ/prefect/issues/3655)"
+
+contributor:
+ - "[Ben Fogelson](https://github.com/benfogelson)"
diff --git a/changes/pr3411.yaml b/changes/pr3411.yaml
new file mode 100644
index 0000000000..8bc8021ea4
--- /dev/null
+++ b/changes/pr3411.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "Experimental support for Python 3.9 - [#3411](https://github.com/PrefectHQ/prefect/pull/3411)"
diff --git a/changes/pr3654.yaml b/changes/pr3654.yaml
new file mode 100644
index 0000000000..b5c8ed15d2
--- /dev/null
+++ b/changes/pr3654.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fixed bug where `flow.serialized_hash()` could return inconsistent values across new python instances - [#3654](https://github.com/PrefectHQ/prefect/pull/3654)"
diff --git a/docs/core/getting_started/installation.md b/docs/core/getting_started/installation.md
index c79bb445f8..7eebf46a55 100644
--- a/docs/core/getting_started/installation.md
+++ b/docs/core/getting_started/installation.md
@@ -52,6 +52,10 @@ Examples of extra packages include:
- `spacy`: tools for building NLP pipelines using Spacy
- `redis`: tools for interacting with a Redis database
+:::warning Python 3.9
+Prefect support for Python 3.9 is experimental and extras are not expected to work yet as we wait for required packages to be updated.
+:::
+
## Running the local server and UI
Prefect includes an open-source server and UI for orchestrating and managing flows. The local server stores flow metadata in a Postgres database and exposes a GraphQL API. The local server requires [Docker](https://www.docker.com/) and [Docker Compose](https://docs.docker.com/compose/install/) to be installed. If you have [Docker Desktop](https://www.docker.com/products/docker-desktop) on your machine, you've got both of these.
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py
index 641617f220..c2afb76bee 100644
--- a/src/prefect/core/flow.py
+++ b/src/prefect/core/flow.py
@@ -333,12 +333,13 @@ class Flow:
validate=False,
)
- # update auxiliary task collections
- ref_tasks = self.reference_tasks()
- new_refs = [t for t in ref_tasks if t != old] + (
- [new] if old in ref_tasks else []
- )
- self.set_reference_tasks(new_refs)
+ if self._reference_tasks:
+ # update auxiliary task collections
+ ref_tasks = self.reference_tasks()
+ new_refs = [t for t in ref_tasks if t != old] + (
+ [new] if old in ref_tasks else []
+ )
+ self.set_reference_tasks(new_refs)
if validate:
self.validate()
@@ -1478,7 +1479,9 @@ class Flow:
Returns:
- str: the hash of the serialized flow
"""
- return hashlib.sha256(json.dumps(self.serialize(build)).encode()).hexdigest()
+ return hashlib.sha256(
+ json.dumps(self.serialize(build), sort_keys=True).encode()
+ ).hexdigest()
# Diagnostics ----------------------------------------------------------------
diff --git a/src/prefect/engine/executors/dask.py b/src/prefect/engine/executors/dask.py
index 6f3f563034..50f770b122 100644
--- a/src/prefect/engine/executors/dask.py
+++ b/src/prefect/engine/executors/dask.py
@@ -1,6 +1,7 @@
import asyncio
import logging
import uuid
+import sys
import warnings
import weakref
from contextlib import contextmanager
@@ -245,6 +246,9 @@ class DaskExecutor(Executor):
Creates a `dask.distributed.Client` and yields it.
"""
+ if sys.platform != "win32":
+ # Fix for https://github.com/dask/distributed/issues/4168
+ import multiprocessing.popen_spawn_posix # noqa
from distributed import Client
try:
| PrefectHQ/prefect | 92ec574d018f3759ea64c25f5283f0a6aedb6376 | diff --git a/tests/conftest.py b/tests/conftest.py
index 736b1f2d99..78cfaedc7d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,8 +1,13 @@
import os
import tempfile
+import sys
from unittest.mock import MagicMock
import pytest
+
+if sys.platform != "win32":
+ # Fix for https://github.com/dask/distributed/issues/4168
+ import multiprocessing.popen_spawn_posix # noqa
from distributed import Client
import prefect
diff --git a/tests/core/test_flow.py b/tests/core/test_flow.py
index 075f47912f..c5ee9de9cc 100644
--- a/tests/core/test_flow.py
+++ b/tests/core/test_flow.py
@@ -7,6 +7,8 @@ import random
import sys
import tempfile
import time
+import subprocess
+import textwrap
from unittest.mock import MagicMock, patch
import cloudpickle
@@ -1649,6 +1651,16 @@ class TestReplace:
with pytest.raises(ValueError):
f.edges_to(t1)
+ def test_replace_leaves_unset_reference_tasks_alone(self):
+ with Flow(name="test") as f:
+ t1 = Task(name="t1")()
+ t2 = Task(name="t2")(upstream_tasks=[t1])
+ t3 = Task(name="t3")
+ f.replace(t1, t3)
+ t4 = Task(name="t4")
+ f.add_task(t4)
+ assert f.reference_tasks() == {t2, t4}
+
def test_replace_update_slugs(self):
flow = Flow("test")
p1, p2 = Parameter("p"), Parameter("p")
@@ -1859,6 +1871,35 @@ class TestSerializedHash:
def test_is_different_with_different_flow_name(self):
assert Flow("foo").serialized_hash() != Flow("bar").serialized_hash()
+ def test_is_same_in_new_python_instance(self, tmpdir):
+ contents = textwrap.dedent(
+ """
+ from prefect import task, Flow
+
+ @task
+ def dummy_task():
+ return "nothing interesting"
+
+ with Flow("example-flow") as flow:
+ dummy_task()
+
+ if __name__ == "__main__":
+ print(flow.serialized_hash())
+ """
+ )
+ script = tmpdir.join("flow.py")
+ script.write_text(contents, encoding="utf-8")
+
+ hashes = []
+ for _ in range(2):
+ result = subprocess.run(
+ [sys.executable, script], stdout=subprocess.PIPE, check=True
+ )
+ hashes.append(result.stdout)
+
+ assert hashes[0] # Ensure we don't have an empty string or None
+ assert len(set(hashes)) == 1
+
def test_is_different_with_modified_flow_name(self):
f1 = Flow("foo")
f2 = f1.copy()
| Same flow generates different hashes on repeated runs
## Description
`flow.serialized_hash()` generates different values on different script runs. Based on the original feature description this isn't what I expected.
## Expected Behavior
The exact same flow will always generate the same hash.
## Reproduction
<!-- A minimal example that exhibits the behavior. -->
Run this **twice** as a module executing as a script (`python -m`) and observe different hashes in between runs:
```python
from prefect import task, Flow
@task
def dummy_task():
return "nothing interesting"
with Flow("example-flow") as flow:
dummy_task()
def main():
print(flow.serialized_hash())
print(flow.serialized_hash())
if __name__ == "__main__":
main()
```
## Environment
```
{
"config_overrides": {
"backend": true,
"server": {
"host": true,
"telemetry": {
"enabled": true
},
"ui": {
"apollo_url": true,
"host": true
}
}
},
"env_vars": [],
"system_information": {
"platform": "Windows-10-10.0.19041-SP0",
"prefect_backend": "server",
"prefect_version": "0.13.14",
"python_version": "3.8.5"
}
}
```
and also with
```
{
"config_overrides": {
"server": {
"host": true,
"telemetry": {
"enabled": true
},
"ui": {
"apollo_url": true
}
}
},
"env_vars": [],
"system_information": {
"platform": "Linux-4.15.0-118-generic-x86_64-with-debian-buster-sid",
"prefect_backend": "cloud",
"prefect_version": "0.13.15",
"python_version": "3.7.7"
}
}
```
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/core/test_flow.py::TestReplace::test_replace_leaves_unset_reference_tasks_alone"
] | [
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_no_args",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_no_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_none",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_empty_string",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_false",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_edges",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_schedule",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_without_state_handler",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_on_failure",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_state_handler[handlers0]",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_state_handler[handlers1]",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_illegal_handler",
"tests/core/test_flow.py::TestCreateFlow::test_flow_has_logger",
"tests/core/test_flow.py::TestCreateFlow::test_flow_has_logger_with_informative_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_result",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_storage",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_storage_and_result",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_environment",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_has_default_environment",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_auto_generates_tasks",
"tests/core/test_flow.py::test_add_task_to_flow",
"tests/core/test_flow.py::test_add_task_returns_task",
"tests/core/test_flow.py::test_add_task_raise_an_error_if_the_task_is_not_a_task_class",
"tests/core/test_flow.py::test_set_dependencies_adds_all_arguments_to_flow",
"tests/core/test_flow.py::test_set_dependencies_converts_unkeyed_arguments_to_tasks",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val0]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val1]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val2]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val3]",
"tests/core/test_flow.py::test_set_dependencies_creates_mapped_edges",
"tests/core/test_flow.py::test_set_dependencies_respects_unmapped",
"tests/core/test_flow.py::test_binding_a_task_in_context_adds_it_to_flow",
"tests/core/test_flow.py::test_binding_a_task_adds_it_to_flow",
"tests/core/test_flow.py::test_binding_a_task_no_with_flow_raises_error",
"tests/core/test_flow.py::test_adding_a_task_to_a_flow_twice_is_ok",
"tests/core/test_flow.py::test_binding_a_task_to_two_different_flows_is_ok",
"tests/core/test_flow.py::test_binding_a_task_with_var_kwargs_expands_the_kwargs",
"tests/core/test_flow.py::test_calling_a_task_without_context_returns_a_copy",
"tests/core/test_flow.py::test_calling_a_task_returns_a_copy",
"tests/core/test_flow.py::test_calling_a_slugged_task_in_different_flows_is_ok",
"tests/core/test_flow.py::test_context_manager_is_properly_applied_to_tasks",
"tests/core/test_flow.py::test_that_flow_adds_and_removes_itself_from_prefect_context",
"tests/core/test_flow.py::test_add_edge",
"tests/core/test_flow.py::test_add_edge_raise_error_for_downstream_parameter",
"tests/core/test_flow.py::test_add_edge_raise_error_for_duplicate_key_if_validate",
"tests/core/test_flow.py::test_add_edge_returns_edge",
"tests/core/test_flow.py::test_add_edge_from_contant",
"tests/core/test_flow.py::test_chain",
"tests/core/test_flow.py::test_splatting_chain_works_in_flow_context_without_duplication",
"tests/core/test_flow.py::test_chain_works_in_flow_context_without_duplication",
"tests/core/test_flow.py::test_iter",
"tests/core/test_flow.py::test_detect_cycle",
"tests/core/test_flow.py::test_eager_cycle_detection_defaults_false",
"tests/core/test_flow.py::test_direct_cycles_are_always_detected_1",
"tests/core/test_flow.py::test_direct_cycles_are_always_detected_2",
"tests/core/test_flow.py::test_eager_validation_is_off_by_default",
"tests/core/test_flow.py::test_eager_cycle_detection_works",
"tests/core/test_flow.py::test_copy_handles_constants",
"tests/core/test_flow.py::test_copy",
"tests/core/test_flow.py::test_infer_root_tasks",
"tests/core/test_flow.py::test_infer_terminal_tasks",
"tests/core/test_flow.py::test_reference_tasks_are_terminal_tasks_by_default",
"tests/core/test_flow.py::test_set_reference_tasks",
"tests/core/test_flow.py::test_set_reference_tasks_at_init_with_empty_flow_raises_error",
"tests/core/test_flow.py::test_set_reference_tasks_at_init",
"tests/core/test_flow.py::test_reset_reference_tasks_to_terminal_tasks",
"tests/core/test_flow.py::test_key_states_raises_error_if_not_part_of_flow",
"tests/core/test_flow.py::test_key_states_raises_error_if_not_iterable",
"tests/core/test_flow.py::test_warning_raised_if_tasks_are_created_but_not_added_to_flow",
"tests/core/test_flow.py::test_warning_raised_if_tasks_are_copied_but_not_added_to_flow",
"tests/core/test_flow.py::test_warning_raised_for_tasks_defined_in_flow_context_and_unused",
"tests/core/test_flow.py::test_warning_raised_for_lambda_tasks_defined_in_flow_context_and_unused",
"tests/core/test_flow.py::test_context_is_scoped_to_flow_context",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_tasks",
"tests/core/test_flow.py::TestEquality::test_object_inequality",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_edges",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_name",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_reference_tasks",
"tests/core/test_flow.py::test_update",
"tests/core/test_flow.py::test_update_with_constants",
"tests/core/test_flow.py::test_update_with_mapped_edges",
"tests/core/test_flow.py::test_update_with_parameter_merge",
"tests/core/test_flow.py::test_upstream_and_downstream_error_msgs_when_task_is_not_in_flow",
"tests/core/test_flow.py::test_sorted_tasks",
"tests/core/test_flow.py::test_sorted_tasks_with_ambiguous_sort",
"tests/core/test_flow.py::test_sorted_tasks_with_start_task",
"tests/core/test_flow.py::test_sorted_tasks_with_invalid_start_task",
"tests/core/test_flow.py::test_flow_raises_for_irrelevant_user_provided_parameters",
"tests/core/test_flow.py::test_flow_raises_for_missing_required_parameters",
"tests/core/test_flow.py::test_flow_doesnt_raises_for_missing_nonrequired_parameters",
"tests/core/test_flow.py::test_flow_accepts_unserializeable_parameters",
"tests/core/test_flow.py::test_parameters_can_not_be_downstream_dependencies",
"tests/core/test_flow.py::test_validate_cycles",
"tests/core/test_flow.py::test_validate_missing_edge_downstream_tasks",
"tests/core/test_flow.py::test_validate_missing_edge_upstream_tasks",
"tests/core/test_flow.py::test_validate_missing_reference_tasks",
"tests/core/test_flow.py::test_validate_edges_kwarg",
"tests/core/test_flow.py::test_validate_edges",
"tests/core/test_flow.py::test_skip_validate_edges",
"tests/core/test_flow.py::test_skip_validation_in_init_with_kwarg",
"tests/core/test_flow.py::TestCache::test_cache_created",
"tests/core/test_flow.py::TestCache::test_cache_sorted_tasks",
"tests/core/test_flow.py::TestCache::test_cache_sorted_tasks_with_args",
"tests/core/test_flow.py::TestCache::test_cache_root_tasks",
"tests/core/test_flow.py::TestCache::test_cache_terminal_tasks",
"tests/core/test_flow.py::TestCache::test_cache_all_upstream_edges",
"tests/core/test_flow.py::TestCache::test_cache_all_downstream_edges",
"tests/core/test_flow.py::TestCache::test_cache_survives_pickling",
"tests/core/test_flow.py::TestCache::test_adding_task_clears_cache",
"tests/core/test_flow.py::TestCache::test_adding_edge_clears_cache",
"tests/core/test_flow.py::TestCache::test_setting_reference_tasks_clears_cache",
"tests/core/test_flow.py::TestReplace::test_replace_replaces_all_the_things",
"tests/core/test_flow.py::TestReplace::test_replace_update_slugs",
"tests/core/test_flow.py::TestReplace::test_replace_complains_about_tasks_not_in_flow",
"tests/core/test_flow.py::TestReplace::test_replace_runs_smoothly",
"tests/core/test_flow.py::TestReplace::test_replace_converts_new_to_task",
"tests/core/test_flow.py::TestReplace::test_replace_converts_new_collections_to_tasks",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_defaults_to_return_everything",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_defaults_to_name",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_takes_intersection",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_accepts_tags_and_requires_all_tags",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_can_check_types",
"tests/core/test_flow.py::TestSerialize::test_serialization",
"tests/core/test_flow.py::TestSerialize::test_deserialization",
"tests/core/test_flow.py::TestSerialize::test_serialize_validates_invalid_flows",
"tests/core/test_flow.py::TestSerialize::test_serialize_includes_storage",
"tests/core/test_flow.py::TestSerialize::test_serialize_adds_flow_to_storage_if_build",
"tests/core/test_flow.py::TestSerialize::test_serialize_can_be_called_twice",
"tests/core/test_flow.py::TestSerialize::test_serialize_fails_with_no_storage",
"tests/core/test_flow.py::TestSerializedHash::test_is_same_with_same_flow",
"tests/core/test_flow.py::TestSerializedHash::test_is_same_with_copied_flow",
"tests/core/test_flow.py::TestSerializedHash::test_is_consistent_after_storage_build",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_before_and_after_storage_build",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_different_flow_name",
"tests/core/test_flow.py::TestSerializedHash::test_is_same_in_new_python_instance",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_modified_flow_name",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_modified_flow_storage",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_different_flow_tasks",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_runs_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_with_paused_states_hangs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_passes_scheduled_parameters",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_doesnt_persist_stale_scheduled_params",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_doesnt_run_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_returns_tasks_when_running_off_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_responds_to_config",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_stops_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_schedule_continues_on_executor_failure",
"tests/core/test_flow.py::TestFlowRunMethod::test_scheduled_runs_handle_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states_across_runs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_without_schedule_can_run_cached_tasks",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states_across_runs_with_always_run_trigger",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_across_runs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_with_differing_lengths",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_with_non_cached",
"tests/core/test_flow.py::TestFlowRunMethod::test_scheduled_runs_handle_mapped_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_run_accepts_state_kwarg",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_sets_scheduled_start_time",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_does_not_set_scheduled_start_time_globally",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_persists_scheduled_start_time_across_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_updates_the_scheduled_start_time_of_each_scheduled_run",
"tests/core/test_flow.py::TestFlowDiagnostics::test_flow_diagnostics",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_uses_default_storage[prefect.environments.storage.Docker]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_uses_default_storage[prefect.environments.storage.Local]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_passes_kwargs_to_storage",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage0-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage0-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage1-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage1-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage2-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage2-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage3-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage3-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage0]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage1]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage2]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage3]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_doesnt_override_custom_set_result",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_environment_with_storage_labels",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_doesnt_overwrite_labels_if_local_storage_is_used",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_errors_if_in_flow_context",
"tests/core/test_flow.py::test_bad_flow_runner_code_still_returns_state_obj",
"tests/core/test_flow.py::test_flow_run_raises_informative_error_for_certain_kwargs",
"tests/core/test_flow.py::test_flow_run_raises_if_no_more_scheduled_runs",
"tests/core/test_flow.py::test_flow_run_respects_state_kwarg",
"tests/core/test_flow.py::test_flow_run_respects_task_state_kwarg",
"tests/core/test_flow.py::test_flow_run_handles_error_states_when_initial_state_is_provided",
"tests/core/test_flow.py::test_looping_works_in_a_flow",
"tests/core/test_flow.py::test_pause_resume_works_with_retries",
"tests/core/test_flow.py::test_looping_with_retries_works_in_a_flow",
"tests/core/test_flow.py::test_looping_with_retries_resets_run_count",
"tests/core/test_flow.py::test_starting_at_arbitrary_loop_index",
"tests/core/test_flow.py::test_flow_run_name_as_run_param",
"tests/core/test_flow.py::TestSaveLoad::test_save_saves_and_load_loads",
"tests/core/test_flow.py::TestSaveLoad::test_save_saves_has_a_default",
"tests/core/test_flow.py::TestSaveLoad::test_load_accepts_name_and_sluggified_name",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[local]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[sync]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mthread]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mproc_local]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mproc]",
"tests/core/test_flow.py::test_results_write_to_formatted_locations",
"tests/core/test_flow.py::test_results_write_to_custom_formatters",
"tests/core/test_flow.py::test_run_agent_passes_flow_labels[environment]",
"tests/core/test_flow.py::test_run_agent_passes_flow_labels[run_config]",
"tests/core/test_flow.py::TestSlugGeneration::test_slugs_are_stable",
"tests/core/test_flow.py::TestSlugGeneration::test_slugs_incorporate_tags_and_order"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-12T16:38:14Z" | apache-2.0 |
|
PrefectHQ__prefect-3681 | diff --git a/changes/pr3681.yaml b/changes/pr3681.yaml
new file mode 100644
index 0000000000..e707ece5c9
--- /dev/null
+++ b/changes/pr3681.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "Add `task_definition_arn` to `ECSRun` run-config - [#3681](https://github.com/PrefectHQ/prefect/pull/3681)"
diff --git a/src/prefect/agent/ecs/agent.py b/src/prefect/agent/ecs/agent.py
index 7d9df496a4..1052add1dc 100644
--- a/src/prefect/agent/ecs/agent.py
+++ b/src/prefect/agent/ecs/agent.py
@@ -310,8 +310,7 @@ class ECSAgent(Agent):
)
raise ValueError("Flow is missing a `run_config`")
- # Check if a task definition already exists
- taskdef_arn = self.lookup_task_definition_arn(flow_run)
+ taskdef_arn = self.get_task_definition_arn(flow_run, run_config)
if taskdef_arn is None:
# Register a new task definition
self.logger.debug(
@@ -359,16 +358,22 @@ class ECSAgent(Agent):
"prefect:flow-version": str(flow_run.flow.version),
}
- def lookup_task_definition_arn(self, flow_run: GraphQLResult) -> Optional[str]:
- """Lookup an existing task definition ARN for a flow run.
+ def get_task_definition_arn(
+ self, flow_run: GraphQLResult, run_config: ECSRun
+ ) -> Optional[str]:
+ """Get an existing task definition ARN for a flow run.
Args:
- flow_run (GraphQLResult): the flow run
+ - run_config (ECSRun): The flow's run config
Returns:
- Optional[str]: the task definition ARN. Returns `None` if no
existing definition is found.
"""
+ if run_config.task_definition_arn is not None:
+ return run_config.task_definition_arn
+
tags = self.get_task_definition_tags(flow_run)
from botocore.exceptions import ClientError
diff --git a/src/prefect/run_configs/ecs.py b/src/prefect/run_configs/ecs.py
index 6e2f3d9183..f75f55c80f 100644
--- a/src/prefect/run_configs/ecs.py
+++ b/src/prefect/run_configs/ecs.py
@@ -11,11 +11,11 @@ class ECSRun(RunConfig):
ECS Tasks are composed of task definitions and runtime parameters.
- Task definitions can be configured using either the `task_definition` or
- `task_definition_path` parameters. If neither is specified, the default
- configured on the agent will be used. At runtime this task definition will
- be registered once per flow version - subsequent runs of the same flow
- version will reuse the existing definition.
+ Task definitions can be configured using either the `task_definition`,
+ `task_definition_path`, or `task_definition_arn` parameters. If neither is
+ specified, the default configured on the agent will be used. At runtime
+ this task definition will be registered once per flow version - subsequent
+ runs of the same flow version will reuse the existing definition.
Runtime parameters can be specified via `run_task_kwargs`. These will be
merged with any runtime parameters configured on the agent when starting
@@ -34,6 +34,9 @@ class ECSRun(RunConfig):
Otherwise the task definition will be loaded at runtime on the
agent. Supported runtime file schemes include (`s3`, `gcs`, and
`agent` (for paths local to the runtime agent)).
+ - task_definition_arn (str, optional): A pre-registered task definition
+ ARN to use (either `family`, `family:version`, or a full task
+ definition ARN).
- image (str, optional): The image to use for this task. If not
provided, will be either inferred from the flow's storage (if using
`Docker` storage), or use the default configured on the agent.
@@ -99,6 +102,7 @@ ecs.html#ECS.Client.run_task
*,
task_definition: dict = None,
task_definition_path: str = None,
+ task_definition_arn: str = None,
image: str = None,
env: dict = None,
cpu: Union[int, str] = None,
@@ -109,9 +113,19 @@ ecs.html#ECS.Client.run_task
) -> None:
super().__init__(labels=labels)
- if task_definition is not None and task_definition_path is not None:
+ if (
+ sum(
+ [
+ task_definition is not None,
+ task_definition_path is not None,
+ task_definition_arn is not None,
+ ]
+ )
+ > 1
+ ):
raise ValueError(
- "Cannot provide both `task_definition` and `task_definition_path`"
+ "Can only provide one of `task_definition`, `task_definition_path`, "
+ "or `task_definition_arn`"
)
if task_definition_path is not None:
parsed = parse_path(task_definition_path)
@@ -127,6 +141,7 @@ ecs.html#ECS.Client.run_task
self.task_definition = task_definition
self.task_definition_path = task_definition_path
+ self.task_definition_arn = task_definition_arn
self.image = image
self.env = env
self.cpu = cpu
diff --git a/src/prefect/serialization/run_config.py b/src/prefect/serialization/run_config.py
index 96129353cf..ca94f386ee 100644
--- a/src/prefect/serialization/run_config.py
+++ b/src/prefect/serialization/run_config.py
@@ -28,6 +28,7 @@ class ECSRunSchema(RunConfigSchemaBase):
task_definition_path = fields.String(allow_none=True)
task_definition = JSONCompatible(allow_none=True)
+ task_definition_arn = fields.String(allow_none=True)
image = fields.String(allow_none=True)
env = fields.Dict(keys=fields.String(), allow_none=True)
cpu = fields.String(allow_none=True)
| PrefectHQ/prefect | 5715d587e9f30c9e5fced891566a3d7d6c693d4a | diff --git a/tests/agent/test_ecs_agent.py b/tests/agent/test_ecs_agent.py
index f6de39f5d0..79925e4d43 100644
--- a/tests/agent/test_ecs_agent.py
+++ b/tests/agent/test_ecs_agent.py
@@ -506,7 +506,7 @@ class TestGetRunTaskKwargs:
@pytest.mark.parametrize("kind", ["exists", "missing", "error"])
-def test_lookup_task_definition_arn(aws, kind):
+def test_get_task_definition_arn(aws, kind):
if kind == "exists":
aws.resourcegroupstaggingapi.get_resources.return_value = {
"ResourceTagMappingList": [{"ResourceARN": "my-taskdef-arn"}]
@@ -525,10 +525,11 @@ def test_lookup_task_definition_arn(aws, kind):
)
expected = None
+ run_config = ECSRun()
flow_run = GraphQLResult({"flow": GraphQLResult({"id": "flow-id", "version": 1})})
agent = ECSAgent()
- res = agent.lookup_task_definition_arn(flow_run)
+ res = agent.get_task_definition_arn(flow_run, run_config)
assert res == expected
kwargs = aws.resourcegroupstaggingapi.get_resources.call_args[1]
assert sorted(kwargs["TagFilters"], key=lambda x: x["Key"]) == [
@@ -538,6 +539,15 @@ def test_lookup_task_definition_arn(aws, kind):
assert kwargs["ResourceTypeFilters"] == ["ecs:task-definition"]
+def test_get_task_definition_arn_provided_task_definition_arn():
+ run_config = ECSRun(task_definition_arn="my-taskdef-arn")
+ flow_run = GraphQLResult({"flow": GraphQLResult({"id": "flow-id", "version": 1})})
+ agent = ECSAgent()
+
+ res = agent.get_task_definition_arn(flow_run, run_config)
+ assert res == "my-taskdef-arn"
+
+
class TestDeployFlow:
def deploy_flow(self, run_config, **kwargs):
agent = ECSAgent(**kwargs)
@@ -599,6 +609,15 @@ class TestDeployFlow:
assert aws.ecs.run_task.call_args[1]["enableECSManagedTags"] is True
assert "my-task-arn" in res
+ def test_deploy_flow_uses_provided_task_definition_arn(self, aws):
+ aws.ecs.run_task.return_value = {"tasks": [{"taskArn": "my-task-arn"}]}
+
+ res = self.deploy_flow(ECSRun(task_definition_arn="my-taskdef-arn"))
+ assert not aws.ecs.register_task_definition.called
+ assert aws.ecs.run_task.called
+ assert aws.ecs.run_task.call_args[1]["taskDefinition"] == "my-taskdef-arn"
+ assert "my-task-arn" in res
+
def test_deploy_flow_run_task_fails(self, aws):
aws.resourcegroupstaggingapi.get_resources.return_value = {
"ResourceTagMappingList": [{"ResourceARN": "my-taskdef-arn"}]
diff --git a/tests/run_configs/test_ecs.py b/tests/run_configs/test_ecs.py
index 64c04c303b..ffa2251573 100644
--- a/tests/run_configs/test_ecs.py
+++ b/tests/run_configs/test_ecs.py
@@ -10,6 +10,7 @@ def test_no_args():
config = ECSRun()
assert config.task_definition is None
assert config.task_definition_path is None
+ assert config.task_definition_arn is None
assert config.image is None
assert config.env is None
assert config.cpu is None
@@ -45,15 +46,29 @@ def test_labels():
assert config.labels == {"a", "b"}
-def test_cant_specify_both_task_definition_and_task_definition_path():
- with pytest.raises(ValueError, match="Cannot provide both"):
- ECSRun(task_definition={}, task_definition_path="/some/path")
[email protected](
+ "kwargs",
+ [
+ dict(task_definition={}, task_definition_path="/some/path"),
+ dict(task_definition={}, task_definition_arn="some_arn"),
+ dict(task_definition_path="/some/path", task_definition_arn="some_arn"),
+ dict(
+ task_definition={},
+ task_definition_path="/some/path",
+ task_definition_arn="some_arn",
+ ),
+ ],
+)
+def test_can_only_specify_task_definition_one_way(kwargs):
+ with pytest.raises(ValueError, match="Can only provide one of"):
+ ECSRun(**kwargs)
def test_remote_task_definition_path():
config = ECSRun(task_definition_path="s3://bucket/example.yaml")
assert config.task_definition_path == "s3://bucket/example.yaml"
assert config.task_definition is None
+ assert config.task_definition_arn is None
@pytest.mark.parametrize("scheme", ["local", "file", None])
@@ -78,9 +93,17 @@ def test_local_task_definition_path(tmpdir, scheme):
config = ECSRun(task_definition_path=task_definition_path)
assert config.task_definition_path is None
+ assert config.task_definition_arn is None
assert config.task_definition == task_definition
+def test_task_definition_arn():
+ config = ECSRun(task_definition_arn="my-task-definition")
+ assert config.task_definition_arn == "my-task-definition"
+ assert config.task_definition is None
+ assert config.task_definition_path is None
+
+
def test_task_definition():
task_definition = {
"containerDefinitions": [
@@ -90,6 +113,7 @@ def test_task_definition():
config = ECSRun(task_definition=task_definition)
assert config.task_definition_path is None
+ assert config.task_definition_arn is None
assert config.task_definition == task_definition
diff --git a/tests/serialization/test_run_configs.py b/tests/serialization/test_run_configs.py
index f4cb6c00ad..38d9c23c28 100644
--- a/tests/serialization/test_run_configs.py
+++ b/tests/serialization/test_run_configs.py
@@ -109,6 +109,7 @@ def test_serialize_docker_run(config):
]
}
),
+ ECSRun(task_definition_arn="my-task-definition"),
],
)
def test_serialize_ecs_run(config):
@@ -118,6 +119,7 @@ def test_serialize_ecs_run(config):
fields = [
"task_definition",
"task_definition_path",
+ "task_definition_arn",
"image",
"env",
"cpu",
| ECSRun should accept task_definition_arn
## Current behavior
The new ECSRun config offers great flexibility and fine-grained control over how a task is defined and executed, especially when custom values for specific parameters have to provided to override the default configuration.
However, in a CI/CD environment, you may want to create the task definition outside Prefect and then provide the task_definition_arn as the only parameter.
## Proposed behavior
Extend ECSRun config to accept a new parameter `task_definition_arn` that, if provided, take precedence over `task_definition` and `task_definition_path`.
## Example
In a CI/CD environment, you may want to manage task definition outside Prefect.
Moreover, being able to just pass the `task_definition_arn`, would also simplify Prefect code, because the definition of the infrastructure/environment is completely externalized.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/run_configs/test_ecs.py::test_no_args",
"tests/run_configs/test_ecs.py::test_all_args",
"tests/run_configs/test_ecs.py::test_labels",
"tests/run_configs/test_ecs.py::test_can_only_specify_task_definition_one_way[kwargs0]",
"tests/run_configs/test_ecs.py::test_can_only_specify_task_definition_one_way[kwargs1]",
"tests/run_configs/test_ecs.py::test_can_only_specify_task_definition_one_way[kwargs2]",
"tests/run_configs/test_ecs.py::test_can_only_specify_task_definition_one_way[kwargs3]",
"tests/run_configs/test_ecs.py::test_remote_task_definition_path",
"tests/run_configs/test_ecs.py::test_local_task_definition_path[local]",
"tests/run_configs/test_ecs.py::test_local_task_definition_path[file]",
"tests/run_configs/test_ecs.py::test_local_task_definition_path[None]",
"tests/run_configs/test_ecs.py::test_task_definition_arn",
"tests/run_configs/test_ecs.py::test_task_definition",
"tests/run_configs/test_ecs.py::test_cpu_and_memory_acceptable_types",
"tests/serialization/test_run_configs.py::test_serialize_kubernetes_run[config0]",
"tests/serialization/test_run_configs.py::test_serialize_kubernetes_run[config1]",
"tests/serialization/test_run_configs.py::test_serialize_kubernetes_run[config2]",
"tests/serialization/test_run_configs.py::test_serialize_local_run[config0]",
"tests/serialization/test_run_configs.py::test_serialize_local_run[config1]",
"tests/serialization/test_run_configs.py::test_serialize_docker_run[config0]",
"tests/serialization/test_run_configs.py::test_serialize_docker_run[config1]",
"tests/serialization/test_run_configs.py::test_serialize_ecs_run[config0]",
"tests/serialization/test_run_configs.py::test_serialize_ecs_run[config1]",
"tests/serialization/test_run_configs.py::test_serialize_ecs_run[config2]",
"tests/serialization/test_run_configs.py::test_serialize_ecs_run[config3]"
] | [] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-18T20:27:36Z" | apache-2.0 |
|
PrefectHQ__prefect-3869 | diff --git a/changes/pr3869.yaml b/changes/pr3869.yaml
new file mode 100644
index 0000000000..14678395dd
--- /dev/null
+++ b/changes/pr3869.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "Make `setup` method optional for `resource_manager` tasks - [#3869](https://github.com/PrefectHQ/prefect/pull/3869)"
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py
index 951f5b1822..7fb07fb504 100644
--- a/src/prefect/core/flow.py
+++ b/src/prefect/core/flow.py
@@ -401,10 +401,13 @@ class Flow:
@cache
def _default_reference_tasks(self) -> Set[Task]:
- from prefect.tasks.core.resource_manager import ResourceCleanupTask
+ from prefect.tasks.core.resource_manager import (
+ ResourceInitTask,
+ ResourceCleanupTask,
+ )
- # Select all tasks that aren't ResourceCleanupTasks and have no
- # downstream dependencies that aren't ResourceCleanupTasks
+ # Select all tasks that aren't a ResourceInitTask/ResourceCleanupTask
+ # and have no downstream dependencies that aren't ResourceCleanupTasks
#
# Note: this feels a bit gross, since it special cases a certain
# subclass inside the flow runner. If this behavior expands to other
@@ -413,7 +416,7 @@ class Flow:
return {
t
for t in self.tasks
- if not isinstance(t, ResourceCleanupTask)
+ if not isinstance(t, (ResourceInitTask, ResourceCleanupTask))
and not any(
t
for t in self.downstream_tasks(t)
diff --git a/src/prefect/tasks/core/resource_manager.py b/src/prefect/tasks/core/resource_manager.py
index de463baea4..33008cf0bf 100644
--- a/src/prefect/tasks/core/resource_manager.py
+++ b/src/prefect/tasks/core/resource_manager.py
@@ -1,4 +1,4 @@
-from typing import Any, Callable, Dict, Union, Set, overload
+from typing import Any, Callable, Dict, Union, Set, Optional, overload
import prefect
from prefect import Task, Flow
@@ -28,7 +28,7 @@ class ResourceSetupTask(Task):
class ResourceCleanupTask(Task):
"""Cleanup a resource with its resource manager"""
- def run(self, mgr: Any, resource: Any) -> None:
+ def run(self, mgr: Any, resource: Any = None) -> None:
mgr.cleanup(resource)
@@ -67,7 +67,11 @@ class ResourceContext:
"""
def __init__(
- self, init_task: Task, setup_task: Task, cleanup_task: Task, flow: Flow
+ self,
+ init_task: Task,
+ setup_task: Optional[Task],
+ cleanup_task: Task,
+ flow: Flow,
):
self.init_task = init_task
self.setup_task = setup_task
@@ -88,7 +92,7 @@ class ResourceContext:
)
self._tasks.add(task)
- def __enter__(self) -> Task:
+ def __enter__(self) -> Optional[Task]:
self.__prev_resource = prefect.context.get("resource")
prefect.context.update(resource=self)
return self.setup_task
@@ -106,13 +110,15 @@ class ResourceContext:
# the resource cleanup should be set as a downstream task.
upstream = self._flow.upstream_tasks(child)
if (
- not self._tasks.intersection(upstream)
+ self.setup_task is not None
+ and not self._tasks.intersection(upstream)
and self.setup_task not in upstream
):
child.set_upstream(self.setup_task, flow=self._flow)
downstream = self._flow.downstream_tasks(child)
if (
- not self._tasks.intersection(downstream)
+ self.cleanup_task is not None
+ and not self._tasks.intersection(downstream)
and self.cleanup_task not in downstream
):
child.set_downstream(self.cleanup_task, flow=self._flow)
@@ -212,11 +218,18 @@ class ResourceManager:
*args, flow=flow, **kwargs
)
- setup_task = ResourceSetupTask(**self.setup_task_kwargs)(init_task, flow=flow)
-
- cleanup_task = ResourceCleanupTask(**self.cleanup_task_kwargs)(
- init_task, setup_task, flow=flow
- )
+ if hasattr(self.resource_class, "setup"):
+ setup_task = ResourceSetupTask(**self.setup_task_kwargs)(
+ init_task, flow=flow
+ )
+ cleanup_task = ResourceCleanupTask(**self.cleanup_task_kwargs)(
+ init_task, setup_task, flow=flow
+ )
+ else:
+ setup_task = None
+ cleanup_task = ResourceCleanupTask(**self.cleanup_task_kwargs)(
+ init_task, flow=flow
+ )
return ResourceContext(init_task, setup_task, cleanup_task, flow)
@@ -257,18 +270,19 @@ def resource_manager(
"""A decorator for creating a `ResourceManager` object.
Used as a context manager, `ResourceManager` objects create tasks to setup
- and cleanup temporary objects used within a block of tasks. Examples might
- include temporary Dask/Spark clusters, Docker containers, etc...
+ and/or cleanup temporary objects used within a block of tasks. Examples
+ might include temporary Dask/Spark clusters, Docker containers, etc...
- Through usage a ResourceManager object adds three tasks to the graph:
+ Through usage a ResourceManager object adds up to three tasks to the graph:
- A `init` task, which returns an object that meets the `ResourceManager`
- protocol. This protocol requires two methods:
+ protocol. This protocol contains two methods:
* `setup(self) -> resource`: A method for creating the resource.
- The return value from this will available to user tasks.
+ The return value from this will available to user tasks. If no
+ setup is required, the `setup` method may be left undefined.
* `cleanup(self, resource) -> None`: A method for cleaning up the
- resource. This takes the return value from `setup` and
- shouldn't return anything.
- - A `setup` task, which calls the `setup` method on the `ResourceManager`
+ resource. This takes the return value from `setup` (or `None`
+ if no `setup` method) and shouldn't return anything.
+ - A `setup` task, which calls the optional `setup` method on the `ResourceManager`
- A `cleanup` task, which calls the `cleanup` method on the `ResourceManager`.
Args:
@@ -278,7 +292,7 @@ def resource_manager(
- init_task_kwargs (dict, optional): keyword arguments that will be
passed to the `Task` constructor for the `init` task.
- setup_task_kwargs (dict, optional): keyword arguments that will be
- passed to the `Task` constructor for the `setup` task.
+ passed to the `Task` constructor for the optional `setup` task.
- cleanup_task_kwargs (dict, optional): keyword arguments that will be
passed to the `Task` constructor for the `cleanup` task.
| PrefectHQ/prefect | c85e6e3b3658fe9ca6276d9af2bb38038078df1b | diff --git a/tests/tasks/core/test_resource_manager.py b/tests/tasks/core/test_resource_manager.py
index 73795a06e6..f1b7847878 100644
--- a/tests/tasks/core/test_resource_manager.py
+++ b/tests/tasks/core/test_resource_manager.py
@@ -186,6 +186,56 @@ def test_resource_manager_generated_flow_structure(api):
}
+def test_resource_manager_generated_flow_structure_no_setup():
+ @resource_manager
+ class MyResource:
+ def __init__(self, a):
+ self.a = a
+
+ def cleanup(self, val):
+ pass
+
+ with Flow("test") as flow:
+ a = inc(1)
+ context = MyResource(a)
+ with context as resource:
+ b = add(resource, a)
+ c = inc(b)
+ d = inc(2)
+ e = inc(d)
+ f = inc(3)
+ g = inc(f)
+
+ # task kwargs successfully forwarded to tasks
+ assert context.init_task.name == "MyResource"
+ assert context.setup_task is None
+ assert resource is None
+ assert context.cleanup_task.name == "MyResource.cleanup"
+ assert not context.cleanup_task.skip_on_upstream_skip
+
+ # Reference tasks setup properly
+ assert flow.reference_tasks() == {c, e, g}
+
+ # Check that:
+ # - Tasks with no downstream dependency in the resource context have
+ # the cleanup task set as a downstream dependency
+ # - All other tasks only have explicit dependencies
+ assert flow.upstream_tasks(a) == set()
+ assert flow.upstream_tasks(context.init_task) == {a}
+ assert flow.upstream_tasks(b) == {a}
+ assert flow.upstream_tasks(c) == {b}
+ assert flow.upstream_tasks(d) == set()
+ assert flow.upstream_tasks(e) == {d}
+ assert flow.upstream_tasks(f) == set()
+ assert flow.upstream_tasks(g) == {f}
+ assert flow.upstream_tasks(context.cleanup_task) == {
+ context.init_task,
+ c,
+ e,
+ f,
+ }
+
+
def test_resource_manager_execution_success():
on_setup = MagicMock(return_value=100)
on_cleanup = MagicMock()
@@ -206,6 +256,29 @@ def test_resource_manager_execution_success():
assert r.is_successful()
+def test_resource_manager_execution_success_no_setup():
+ @resource_manager
+ class MyResource:
+ def __init__(self, on_cleanup):
+ self.on_cleanup = on_cleanup
+
+ def cleanup(self, val):
+ self.on_cleanup(val)
+
+ on_cleanup = MagicMock()
+
+ with Flow("test") as flow:
+ context = MyResource(on_cleanup)
+ with context:
+ inc(inc(1))
+
+ state = flow.run()
+ assert on_cleanup.call_args == ((None,), {})
+ assert state.is_successful()
+ for r in state.result.values():
+ assert r.is_successful()
+
+
@pytest.mark.parametrize("kind", ["init", "setup", "cleanup"])
def test_resource_manager_execution_with_failure_in_manager(kind):
on_setup = MagicMock(return_value=100)
| Support `resource_manager` tasks without a setup/cleanup step
The current `prefect.resource_manager` design requires both a `setup` and `cleanup` method defined. Sometimes you want to run a cleanup step without an explicit setup step (or the other way around, although that's less useful). It'd be good to modify the `resource_manager` to skip setup/cleanup task creation if the `setup`/`cleanup` methods are missing from the class.
See #3836 for a motivator for this. The `setup` method is unneeded, we only want to create a `cleanup` task that always runs after exiting that block. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/tasks/core/test_resource_manager.py::test_resource_manager_generated_flow_structure_no_setup",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_execution_success_no_setup"
] | [
"tests/tasks/core/test_resource_manager.py::test_resource_manager_default_init",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_init_overrides",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_decorator_init",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_sets_and_clears_context",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_errors_no_flow_in_context",
"tests/tasks/core/test_resource_manager.py::test_resource_cannot_be_used_with_multiple_flows",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_generated_flow_structure[functional]",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_generated_flow_structure[imperative]",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_execution_success",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_execution_with_failure_in_manager[init]",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_execution_with_failure_in_manager[setup]",
"tests/tasks/core/test_resource_manager.py::test_resource_manager_execution_with_failure_in_manager[cleanup]",
"tests/tasks/core/test_resource_manager.py::test_resource_tasks_always_rerun_on_flow_restart",
"tests/tasks/core/test_resource_manager.py::test_resource_cleanup_trigger",
"tests/tasks/core/test_resource_manager.py::test_resource_cleanup_reference_tasks"
] | {
"failed_lite_validators": [
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-12-17T17:18:12Z" | apache-2.0 |
|
PrefectHQ__prefect-3923 | diff --git a/changes/pr3923.yaml b/changes/pr3923.yaml
new file mode 100644
index 0000000000..a17d5f3adb
--- /dev/null
+++ b/changes/pr3923.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Support storing multiple local flows with the same name when using `Local` storage - [#3923](https://github.com/PrefectHQ/prefect/pull/3923)"
diff --git a/src/prefect/storage/local.py b/src/prefect/storage/local.py
index 6f9ef87b44..25f6c3b620 100644
--- a/src/prefect/storage/local.py
+++ b/src/prefect/storage/local.py
@@ -2,6 +2,8 @@ import os
import socket
from typing import TYPE_CHECKING, Any, Dict, List
+import cloudpickle
+import pendulum
from slugify import slugify
import prefect
@@ -99,7 +101,8 @@ class Local(Storage):
if self.stored_as_script:
return extract_flow_from_file(file_path=flow_location)
else:
- return prefect.core.flow.Flow.load(flow_location)
+ with open(flow_location, "rb") as f:
+ return cloudpickle.load(f)
# otherwise the path is given in the module format
else:
return extract_flow_from_module(module_str=flow_location)
@@ -138,9 +141,13 @@ class Local(Storage):
flow_location = self.path
else:
flow_location = os.path.join(
- self.directory, "{}.prefect".format(slugify(flow.name))
+ self.directory,
+ slugify(flow.name),
+ slugify(pendulum.now("utc").isoformat()),
)
- flow_location = flow.save(flow_location)
+ os.makedirs(os.path.dirname(flow_location), exist_ok=True)
+ with open(flow_location, "wb") as f:
+ cloudpickle.dump(flow, f)
self.flows[flow.name] = flow_location
self._flows[flow.name] = flow
| PrefectHQ/prefect | a26cb1b5b6d2dd18eb8987a9e473e05e38dca8d1 | diff --git a/tests/storage/test_local_storage.py b/tests/storage/test_local_storage.py
index a9381ac1f3..ea4173fc3b 100644
--- a/tests/storage/test_local_storage.py
+++ b/tests/storage/test_local_storage.py
@@ -1,6 +1,5 @@
import os
import socket
-import tempfile
import cloudpickle
import pytest
@@ -41,21 +40,22 @@ def test_create_local_storage_without_validation():
assert storage.result.dir == storage.directory
-def test_add_flow_to_storage():
- with tempfile.TemporaryDirectory() as tmpdir:
- storage = Local(directory=tmpdir)
- f = Flow("test")
- assert f.name not in storage
+def test_add_flow_to_storage(tmpdir):
+ storage = Local(directory=str(tmpdir))
+ f = Flow("test")
+ assert f.name not in storage
- res = storage.add_flow(f)
- assert res.endswith("test.prefect")
- assert f.name in storage
+ res = storage.add_flow(f)
- with open(os.path.join(tmpdir, "test.prefect"), "rb") as f:
- wat = f.read()
+ flow_dir = os.path.join(tmpdir, "test")
+ assert os.path.exists(flow_dir)
+ assert len(os.listdir(flow_dir)) == 1
+ assert res.startswith(flow_dir)
- assert isinstance(wat, bytes)
- assert cloudpickle.loads(wat).name == "test"
+ assert f.name in storage
+
+ f2 = storage.get_flow(res)
+ assert f2.name == "test"
def test_add_flow_file_to_storage(tmpdir):
@@ -79,14 +79,13 @@ def test_add_flow_file_to_storage(tmpdir):
assert f.name in storage
-def test_add_flow_raises_if_name_conflict():
- with tempfile.TemporaryDirectory() as tmpdir:
- storage = Local(directory=tmpdir)
- f = Flow("test")
- res = storage.add_flow(f)
- g = Flow("test")
- with pytest.raises(ValueError, match='name "test"'):
- storage.add_flow(g)
+def test_add_flow_raises_if_name_conflict(tmpdir):
+ storage = Local(directory=str(tmpdir))
+ f = Flow("test")
+ res = storage.add_flow(f)
+ g = Flow("test")
+ with pytest.raises(ValueError, match='name "test"'):
+ storage.add_flow(g)
def test_get_env_runner_raises():
@@ -102,13 +101,12 @@ def test_get_flow_raises_if_flow_not_present():
s.get_flow("test")
-def test_get_flow_returns_flow():
- with tempfile.TemporaryDirectory() as tmpdir:
- storage = Local(directory=tmpdir)
- f = Flow("test")
- loc = storage.add_flow(f)
- runner = storage.get_flow(loc)
- assert runner == f
+def test_get_flow_returns_flow(tmpdir):
+ storage = Local(directory=str(tmpdir))
+ f = Flow("test")
+ loc = storage.add_flow(f)
+ runner = storage.get_flow(loc)
+ assert runner == f
def test_get_flow_from_file_returns_flow(tmpdir):
@@ -127,81 +125,73 @@ def test_get_flow_from_file_returns_flow(tmpdir):
assert flow.run()
-def test_containment():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir)
- f = Flow("test")
- s.add_flow(f)
+def test_containment(tmpdir):
+ s = Local(directory=str(tmpdir))
+ f = Flow("test")
+ s.add_flow(f)
- assert True not in s
- assert f not in s
- assert "test" in s
- assert Flow("other") not in s
- assert "other" not in s
+ assert True not in s
+ assert f not in s
+ assert "test" in s
+ assert Flow("other") not in s
+ assert "other" not in s
-def test_build_returns_self():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir)
- assert s.build() is s
+def test_build_returns_self(tmpdir):
+ s = Local(directory=str(tmpdir))
+ assert s.build() is s
- f = Flow("test")
- s.add_flow(f)
- assert s.build() is s
+ f = Flow("test")
+ s.add_flow(f)
+ assert s.build() is s
-def test_multiple_flows_in_storage():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir)
- f = Flow("test")
- g = Flow("other")
- z = Flow("not")
- f_loc = s.add_flow(f)
- g_loc = s.add_flow(g)
+def test_multiple_flows_in_storage(tmpdir):
+ s = Local(directory=str(tmpdir))
+ f = Flow("test")
+ g = Flow("other")
+ z = Flow("not")
+ f_loc = s.add_flow(f)
+ g_loc = s.add_flow(g)
- assert "test" in s
- assert "other" in s
- assert "not" not in s
+ assert "test" in s
+ assert "other" in s
+ assert "not" not in s
- assert s.get_flow(f_loc) == f
- assert s.get_flow(g_loc) == g
+ assert s.get_flow(f_loc) == f
+ assert s.get_flow(g_loc) == g
- assert s.flows["test"] == f_loc
- assert s.flows["other"] == g_loc
+ assert s.flows["test"] == f_loc
+ assert s.flows["other"] == g_loc
-def test_add_flow_with_weird_name_is_cleaned():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir)
- flow = Flow("WELL what do you know?!~? looks like a test!!!!")
- loc = s.add_flow(flow)
- assert "?" not in loc
- assert "!" not in loc
- assert " " not in loc
- assert "~" not in loc
+def test_add_flow_with_weird_name_is_cleaned(tmpdir):
+ s = Local(directory=str(tmpdir))
+ flow = Flow("WELL what do you know?!~? looks like a test!!!!")
+ loc = s.add_flow(flow)
+ assert "?" not in loc
+ assert "!" not in loc
+ assert " " not in loc
+ assert "~" not in loc
-def test_build_healthchecks():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir)
- flow = Flow("TestFlow")
- s.add_flow(flow)
- assert s.build()
+def test_build_healthchecks(tmpdir):
+ s = Local(directory=str(tmpdir))
+ flow = Flow("TestFlow")
+ s.add_flow(flow)
+ assert s.build()
-def test_build_healthcheck_returns_on_no_flows():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir)
- assert s.build()
+def test_build_healthcheck_returns_on_no_flows(tmpdir):
+ s = Local(directory=str(tmpdir))
+ assert s.build()
-def test_labels_includes_hostname():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir)
- assert socket.gethostname() in s.labels
+def test_labels_includes_hostname(tmpdir):
+ s = Local(directory=str(tmpdir))
+ assert socket.gethostname() in s.labels
-def test_opt_out_of_hostname_label():
- with tempfile.TemporaryDirectory() as tmpdir:
- s = Local(directory=tmpdir, add_default_labels=False)
- assert socket.gethostname() not in s.labels
+def test_opt_out_of_hostname_label(tmpdir):
+ s = Local(directory=str(tmpdir), add_default_labels=False)
+ assert socket.gethostname() not in s.labels
| Prefect serializes flows based on flow name ignoring project
Hi,
I searched the slack channel and did not find anything that pertained directly to this. We use Prefect for two departments that have virtually the same flows, but different sources and different processing rules. What we have found is that flows with the same name overwrite each other regardless of the project. I am not sure if that is intended or if there is a way to work around this issue.
This is a minimal example that I confirmed in a fresh install of Prefect 0.14.0 running the server backend in docker with a local agent all on the same machine.
```
jaykae@DT-JKNICKERBOCKER: ~: ls -1 prefect_test prefect_test/proj*
prefect_test:
proj1
proj2
prefect_test/proj1:
WriteHello.py
prefect_test/proj2:
WriteHello.py
```
Just a folder with two subfolders, projects 1 and 2, each folder holds a file with nearly the same content. Note the file paths are different and also the project names are different.
**proj1/WriteHello.py**
```
jaykae@DT-JKNICKERBOCKER: ~/prefect_test cat proj1/WriteHello.py
import prefect
from prefect import task, Flow
@task
def write_hello():
with open("proj1/hello.txt","w") as hello_file:
hello_file.write("Hello Project 1!!!")
with Flow("WriteHello") as flow:
write_it = write_hello()
flow.register(project_name="Project1")
```
**proj2/WriteHello.py**
```
jaykae@DT-JKNICKERBOCKER: ~/prefect_test cat proj2/WriteHello.py
import prefect
from prefect import task, Flow
@task
def write_hello():
with open("proj2/hello.txt","w") as hello_file:
hello_file.write("Hello Project 2!!!")
with Flow("WriteHello") as flow:
write_it = write_hello()
flow.register(project_name="Project2")
```
I register proj1/HelloWorld.py and then proj2/HelloWorld.py, I would expect that registering two flows with different file paths, as well as different project names would lead result in writing a text file in the directory for each flow when it is called. However, when I run proj1/HelloWorld.py the file is written in the proj2 directory and says Hello Project 2!!!.
```
jaykae@DT-JKNICKERBOCKER: ~/prefect_test ls proj*
proj1:
WriteHello.py
proj2:
WriteHello.py
jaykae@DT-JKNICKERBOCKER: ~/prefect_test prefect run flow --name WriteHello --project Project1
Flow Run: http://localhost:8080/default/flow-run/e05b8f99-945d-4954-ab0d-7e5e89df0965
jaykae@DT-JKNICKERBOCKER: ~/prefect_test ls proj*
proj1:
WriteHello.py
proj2:
WriteHello.py hello.txt
```
I tried changing the .py file name and that did not help at all, so I assume it has to do with the flow name. Is there a way to have two flows with the same name, but different content, in separate projects and peacefully coexist? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/storage/test_local_storage.py::test_add_flow_to_storage"
] | [
"tests/storage/test_local_storage.py::test_create_local_storage",
"tests/storage/test_local_storage.py::test_create_local_storage_with_custom_dir",
"tests/storage/test_local_storage.py::test_create_local_storage_without_validation",
"tests/storage/test_local_storage.py::test_add_flow_file_to_storage",
"tests/storage/test_local_storage.py::test_add_flow_raises_if_name_conflict",
"tests/storage/test_local_storage.py::test_get_env_runner_raises",
"tests/storage/test_local_storage.py::test_get_flow_raises_if_flow_not_present",
"tests/storage/test_local_storage.py::test_get_flow_returns_flow",
"tests/storage/test_local_storage.py::test_get_flow_from_file_returns_flow",
"tests/storage/test_local_storage.py::test_containment",
"tests/storage/test_local_storage.py::test_build_returns_self",
"tests/storage/test_local_storage.py::test_multiple_flows_in_storage",
"tests/storage/test_local_storage.py::test_add_flow_with_weird_name_is_cleaned",
"tests/storage/test_local_storage.py::test_build_healthchecks",
"tests/storage/test_local_storage.py::test_build_healthcheck_returns_on_no_flows",
"tests/storage/test_local_storage.py::test_labels_includes_hostname",
"tests/storage/test_local_storage.py::test_opt_out_of_hostname_label"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2021-01-05T18:56:39Z" | apache-2.0 |
|
PrefectHQ__prefect-3924 | diff --git a/changes/pr3924.yaml b/changes/pr3924.yaml
new file mode 100644
index 0000000000..de3d6b199d
--- /dev/null
+++ b/changes/pr3924.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix bug in `prefect.context` contextmanager that resulted in context fields reverting to their initially configured values - [#3924](https://github.com/PrefectHQ/prefect/pull/3924)"
diff --git a/src/prefect/utilities/context.py b/src/prefect/utilities/context.py
index d2bde4720e..27f4544bc6 100644
--- a/src/prefect/utilities/context.py
+++ b/src/prefect/utilities/context.py
@@ -82,10 +82,14 @@ class Context(DotDict, threading.local):
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
- super().__init__(*args, **kwargs)
- if "context" in config:
- self.update(config.context)
- self["config"] = merge_dicts(config, self.get("config", {})) # order matters
+ init = {}
+ # Initialize with config context
+ init.update(config.get("context", {}))
+ # Overwrite with explicit args
+ init.update(dict(*args, **kwargs))
+ # Merge in config (with explicit args overwriting)
+ init["config"] = merge_dicts(config, init.get("config", {}))
+ super().__init__(init)
def __getstate__(self) -> None:
"""
@@ -111,7 +115,8 @@ class Context(DotDict, threading.local):
with prefect.context(dict(a=1, b=2), c=3):
print(prefect.context.a) # 1
"""
- previous_context = self.copy()
+ # Avoid creating new `Context` object, copy as `dict` instead.
+ previous_context = self.__dict__.copy()
try:
new_context = dict(*args, **kwargs)
if "config" in new_context:
| PrefectHQ/prefect | 8f6792ba31cb17b4a9a0cd1bc32c4160559057b9 | diff --git a/tests/test_context.py b/tests/test_context.py
index 79f7a2c0f6..b17a2a3fc0 100644
--- a/tests/test_context.py
+++ b/tests/test_context.py
@@ -120,11 +120,13 @@ def test_modify_context_by_calling_update_inside_contextmanager():
def test_context_loads_values_from_config(monkeypatch):
subsection = Config(password="1234")
- config = Config(context=Config(subsection=subsection, my_key="my_value"))
+ config = Config(context=Config(subsection=subsection, key1="val1", key2="val2"))
monkeypatch.setattr(prefect.utilities.context, "config", config)
- fresh_context = Context()
+
+ fresh_context = Context(key2="new")
assert "subsection" in fresh_context
- assert fresh_context.my_key == "my_value"
+ assert fresh_context.key1 == "val1"
+ assert fresh_context.key2 == "new" # overridden by constructor
assert fresh_context.subsection == subsection
@@ -137,6 +139,27 @@ def test_context_loads_secrets_from_config(monkeypatch):
assert fresh_context.secrets == secrets_dict
+def test_context_contextmanager_prioritizes_new_keys_even_on_context_exit(monkeypatch):
+ """Previously exiting a context block would reload from the config,
+ overwriting any explicitly set values in a nested context. This was due to
+ the `Context` constructor being implicitly called when stashing the old
+ context, and the constructor prioritizing `config.context` over explicit
+ values."""
+ config = Config(context=Config(my_key="fizz"))
+ monkeypatch.setattr(prefect.utilities.context, "config", config)
+
+ context = Context()
+ assert context.my_key == "fizz"
+
+ with context(my_key="buzz"):
+ assert context.my_key == "buzz"
+ with context({"config": {"logging": {"log_to_cloud": "FOO"}}}):
+ assert context.config.logging.log_to_cloud == "FOO"
+ assert context.my_key == "buzz"
+ assert context.my_key == "buzz"
+ assert context.my_key == "fizz"
+
+
def test_context_contextmanager_prioritizes_new_config_keys():
with prefect.context({"config": {"logging": {"log_to_cloud": "FOO"}}}):
assert prefect.context.config.logging.log_to_cloud == "FOO"
| Prefect context manager overwrites global contexts only once
## Description
<!-- A clear description of the bug -->
The [document](https://docs.prefect.io/core/concepts/execution.html#modifying-context-at-runtime) states we can modify global context keys by using a context manager:
> ### Modifying context at runtime
> Modifying context, even globally set context keys, at specific times is possible using a provided context manager:
However, in the attached script, the context key `foo` is overwritten only once.
## Expected Behavior
<!-- What did you expect to happen instead? -->
Modified context values do not change during a flow run.
## Reproduction
<!-- A minimal example that exhibits the behavior. -->
### context-test.py
```python
import prefect
from prefect import task, Flow
@task
def print_contexts():
print("foo = " + prefect.context.foo)
with Flow('test') as flow:
t1 = print_contexts()
t2 = print_contexts()
t1.set_downstream(t2)
with prefect.context(foo='foo'):
flow.run()
```
### ~/.prefect/config.toml
```
[context]
foo = "global"
```
### Execution result
```
% python context-test.py
[2020-12-14 16:12:46+0900] INFO - prefect.FlowRunner | Beginning Flow run for 'test'
[2020-12-14 16:12:46+0900] INFO - prefect.TaskRunner | Task 'print_contexts': Starting task run...
foo = foo
[2020-12-14 16:12:46+0900] INFO - prefect.TaskRunner | Task 'print_contexts': Finished task run for task with final state: 'Success'
[2020-12-14 16:12:46+0900] INFO - prefect.TaskRunner | Task 'print_contexts': Starting task run...
foo = global <-- UNEXPECTED!!
[2020-12-14 16:12:46+0900] INFO - prefect.TaskRunner | Task 'print_contexts': Finished task run for task with final state: 'Success'
[2020-12-14 16:12:46+0900] INFO - prefect.FlowRunner | Flow run SUCCESS: all reference tasks succeeded
```
## Environment
<!-- Any additional information about your environment
Optionally run `prefect diagnostics` from the command line and paste the information here. -->
```
% prefect diagnostics
{
"config_overrides": {
"context": {
"foo": true
}
},
"env_vars": [],
"system_information": {
"platform": "macOS-10.16-x86_64-i386-64bit",
"prefect_backend": "cloud",
"prefect_version": "0.13.19",
"python_version": "3.8.5"
}
}
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_context.py::test_context_loads_values_from_config",
"tests/test_context.py::test_context_contextmanager_prioritizes_new_keys_even_on_context_exit"
] | [
"tests/test_context.py::test_context_sets_variables_inside_context_manager",
"tests/test_context.py::test_setting_context_with_keywords",
"tests/test_context.py::test_setting_context_with_dict",
"tests/test_context.py::test_call_function_inside_context_can_access_context",
"tests/test_context.py::test_nested_contexts_properly_restore_parent_context_when_closed",
"tests/test_context.py::test_context_setdefault_method",
"tests/test_context.py::test_modify_context_by_assigning_attributes_inside_contextmanager",
"tests/test_context.py::test_context_doesnt_overwrite_all_config_keys",
"tests/test_context.py::test_context_respects_the_dot_nature_of_config",
"tests/test_context.py::test_context_respects_the_dict_nature_of_non_config_keys",
"tests/test_context.py::test_modify_context_by_calling_update_inside_contextmanager",
"tests/test_context.py::test_context_loads_secrets_from_config",
"tests/test_context.py::test_context_contextmanager_prioritizes_new_config_keys",
"tests/test_context.py::test_context_init_prioritizes_new_config_keys",
"tests/test_context.py::test_context_init_prioritizes_new_config_keys_when_passed_a_dict",
"tests/test_context.py::test_contexts_are_thread_safe",
"tests/test_context.py::test_context_raises_informative_error_if_pickled"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-01-05T22:53:46Z" | apache-2.0 |
|
PrefectHQ__prefect-3964 | diff --git a/changes/pr3964.yaml b/changes/pr3964.yaml
new file mode 100644
index 0000000000..2c4023eb50
--- /dev/null
+++ b/changes/pr3964.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Properly handle `NotImplementedError` exceptions raised by a result's serializer - [#3964](https://github.com/PrefectHQ/prefect/pull/3964)"
diff --git a/src/prefect/engine/result/base.py b/src/prefect/engine/result/base.py
index 8e6139a37d..05a6888424 100644
--- a/src/prefect/engine/result/base.py
+++ b/src/prefect/engine/result/base.py
@@ -29,6 +29,12 @@ from prefect.engine.serializers import PickleSerializer, Serializer
from prefect.utilities import logging
+# Subclass of `NotImplementedError` to make it easier to distinguish this error
+# in consuming code
+class ResultNotImplementedError(NotImplementedError):
+ """Indicates a Result feature isn't implemented"""
+
+
class Result:
"""
A representation of the result of a Prefect task; this class contains
@@ -182,7 +188,7 @@ class Result:
Returns:
- bool: whether or not the target result exists.
"""
- raise NotImplementedError(
+ raise ResultNotImplementedError(
"Not implemented on the base Result class - if you are seeing this error you "
"might be trying to use features that require choosing a Result subclass; "
"see https://docs.prefect.io/core/concepts/results.html"
@@ -198,7 +204,7 @@ class Result:
Returns:
- Any: The value saved to the result.
"""
- raise NotImplementedError(
+ raise ResultNotImplementedError(
"Not implemented on the base Result class - if you are seeing this error you "
"might be trying to use features that require choosing a Result subclass; "
"see https://docs.prefect.io/core/concepts/results.html"
@@ -217,7 +223,7 @@ class Result:
Returns:
- Result: a new result object with the appropriately formatted location destination
"""
- raise NotImplementedError(
+ raise ResultNotImplementedError(
"Not implemented on the base Result class - if you are seeing this error you "
"might be trying to use features that require choosing a Result subclass; "
"see https://docs.prefect.io/core/concepts/results.html"
diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py
index 501a00cbec..c1642076d6 100644
--- a/src/prefect/engine/task_runner.py
+++ b/src/prefect/engine/task_runner.py
@@ -18,7 +18,7 @@ import prefect
from prefect import config
from prefect.core import Edge, Task
from prefect.engine import signals
-from prefect.engine.result import Result
+from prefect.engine.result.base import Result, ResultNotImplementedError
from prefect.engine.runner import ENDRUN, Runner, call_state_handlers
from prefect.engine.state import (
Cached,
@@ -889,7 +889,7 @@ class TaskRunner(Runner):
**raw_inputs,
}
result = self.result.write(value, **formatting_kwargs)
- except NotImplementedError:
+ except ResultNotImplementedError:
result = self.result.from_value(value=value)
else:
result = self.result.from_value(value=value)
@@ -978,7 +978,7 @@ class TaskRunner(Runner):
loop_result = self.result.write(
loop_result.value, **formatting_kwargs
)
- except NotImplementedError:
+ except ResultNotImplementedError:
pass
state_context = {"_loop_count": prefect.context["task_loop_count"]}
| PrefectHQ/prefect | babc7631582b43dce9f41d584719dcc2ed6279c9 | diff --git a/tests/engine/result/test_base.py b/tests/engine/result/test_base.py
index dce845dca3..aaa2ddff18 100644
--- a/tests/engine/result/test_base.py
+++ b/tests/engine/result/test_base.py
@@ -7,6 +7,7 @@ from unittest.mock import MagicMock
import prefect
from prefect.engine.result import Result, NoResultType
+from prefect.engine.result.base import ResultNotImplementedError
class TestInitialization:
@@ -31,12 +32,12 @@ class TestInitialization:
def test_has_abstract_interfaces(abstract_interface: str):
"""
Tests to make sure that calling the abstract interfaces directly
- on the base `Result` class results in `NotImplementedError`s.
+ on the base `Result` class results in `ResultNotImplementedError`s.
"""
r = Result(value=3)
func = getattr(r, abstract_interface)
- with pytest.raises(NotImplementedError):
+ with pytest.raises(ResultNotImplementedError):
func(None)
| Try/Catch accidentally suppresses errors in serializers
## Description
<!-- A clear description of the bug -->
https://github.com/PrefectHQ/prefect/blob/master/src/prefect/engine/task_runner.py#L892
A Flow using the PandasSerializer and the S3Result failed to serialize a `datetime.timedelta` in a column. But, that returned an `ArrowNotImplementedError`. the serializer didn't catch it, the result didn't catch it, and it fell through to the line pointed at which allowed the Task to Succeed in a case where it should have failed.
## Expected Behavior
<!-- What did you expect to happen instead? -->
Task should have failed
## Reproduction
<!-- A minimal example that exhibits the behavior. -->
## Environment
<!-- Any additional information about your environment
Optionally run `prefect diagnostics` from the command line and paste the information here. -->
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/engine/result/test_base.py::TestInitialization::test_result_does_not_require_a_value",
"tests/engine/result/test_base.py::TestInitialization::test_result_inits_with_value",
"tests/engine/result/test_base.py::test_has_abstract_interfaces[exists]",
"tests/engine/result/test_base.py::test_has_abstract_interfaces[read]",
"tests/engine/result/test_base.py::test_has_abstract_interfaces[write]",
"tests/engine/result/test_base.py::test_basic_result_repr",
"tests/engine/result/test_base.py::TestResultEquality::test_boring_results_are_the_same_if_values_are[1]",
"tests/engine/result/test_base.py::TestResultEquality::test_boring_results_are_the_same_if_values_are[2]",
"tests/engine/result/test_base.py::TestResultEquality::test_boring_results_are_the_same_if_values_are[object]",
"tests/engine/result/test_base.py::TestResultEquality::test_boring_results_are_the_same_if_values_are[<lambda>]",
"tests/engine/result/test_base.py::TestResultEquality::test_boring_results_are_different_if_one_has_location",
"tests/engine/result/test_base.py::TestResultEquality::test_no_results_are_equal",
"tests/engine/result/test_base.py::TestResultEquality::test_no_results_are_not_equal_to_results",
"tests/engine/result/test_base.py::test_everything_is_pickleable_after_init[obj0]",
"tests/engine/result/test_base.py::test_everything_is_pickleable_after_init[obj1]",
"tests/engine/result/test_base.py::test_result_format_template_from_context",
"tests/engine/result/test_base.py::TestResultValidate::test_result_validate_calls_validate_functions_from_attribute",
"tests/engine/result/test_base.py::TestResultValidate::test_result_validate_returns_false_on_any_invalid",
"tests/engine/result/test_base.py::TestResultValidate::test_result_validate_returns_true_on_none_invalid",
"tests/engine/result/test_base.py::TestResultValidate::test_result_validate_ok_when_none_provided[r0]",
"tests/engine/result/test_base.py::TestResultValidate::test_result_validate_ok_when_none_provided[r1]",
"tests/engine/result/test_base.py::TestResultValidate::test_result_validate_raises_exceptions",
"tests/engine/result/test_base.py::TestResultValidate::test_result_validate_warns_when_run_without_run_validators_flag"
] | [] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-01-14T21:54:02Z" | apache-2.0 |
|
PrefectHQ__prefect-3975 | diff --git a/changes/pr3975.yaml b/changes/pr3975.yaml
new file mode 100644
index 0000000000..b4b643e41b
--- /dev/null
+++ b/changes/pr3975.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix regression in `apply_map` which prevented use in `case`/`resource_manager` blocks - [#3975](https://github.com/PrefectHQ/prefect/pull/3975)"
diff --git a/src/prefect/utilities/tasks.py b/src/prefect/utilities/tasks.py
index 2915aaf251..19d4aad7cb 100644
--- a/src/prefect/utilities/tasks.py
+++ b/src/prefect/utilities/tasks.py
@@ -97,16 +97,20 @@ def apply_map(func: Callable, *args: Any, flow: "Flow" = None, **kwargs: Any) ->
# Preprocess inputs to `apply_map`:
# - Extract information about each argument (is unmapped, is constant, ...)
# - Convert all arguments to instances of `Task`
- # - Add all non-constant arguments to the flow. Constant arguments are
- # added later as needed.
+ # - Add all non-constant arguments to the flow and subflow. Constant arguments
+ # are added later as needed.
def preprocess(a: Any) -> "prefect.Task":
- a2 = as_task(a, flow=flow2)
- is_mapped = not isinstance(a, prefect.utilities.edges.unmapped)
- is_constant = isinstance(a2, Constant)
+ # Clear external case/resource when adding tasks to flow2
+ with prefect.context(case=None, resource=None):
+ a2 = as_task(a, flow=flow2)
+ is_mapped = not isinstance(a, prefect.utilities.edges.unmapped)
+ is_constant = isinstance(a2, Constant)
+ if not is_constant:
+ flow2.add_task(a2) # type: ignore
+
arg_info[a2] = (is_mapped, is_constant)
if not is_constant:
flow.add_task(a2) # type: ignore
- flow2.add_task(a2) # type: ignore
if is_mapped and is_constant:
id_to_const[id(a2.value)] = a2 # type: ignore
return a2
| PrefectHQ/prefect | 07f049055c66193078f4db6a3d3b034acc7145bf | diff --git a/tests/utilities/test_tasks.py b/tests/utilities/test_tasks.py
index 62ebe61107..d8a8700e23 100644
--- a/tests/utilities/test_tasks.py
+++ b/tests/utilities/test_tasks.py
@@ -1,6 +1,6 @@
import pytest
-from prefect import Flow, Task, case, Parameter
+from prefect import Flow, Task, case, Parameter, resource_manager
from prefect.engine.flow_runner import FlowRunner
from prefect.engine.state import Paused, Resume
from prefect.utilities import tasks, edges
@@ -302,24 +302,50 @@ class TestApplyMap:
assert res == sol
def test_apply_map_inside_case_statement_works(self):
- def func(x):
- return add(x, 1), add(x, 2)
+ def func(x, a):
+ return add(x, 1), add(x, a)
with Flow("test") as flow:
branch = Parameter("branch")
with case(branch, True):
- a, b = apply_map(func, range(4))
- c = add.map(a, b)
+ a = inc(1)
+ b, c = apply_map(func, range(4), edges.unmapped(a))
+ d = add.map(b, c)
state = flow.run(branch=True)
- assert state.result[a].result == [1, 2, 3, 4]
- assert state.result[b].result == [2, 3, 4, 5]
- assert state.result[c].result == [3, 5, 7, 9]
+ assert state.result[a].result == 2
+ assert state.result[b].result == [1, 2, 3, 4]
+ assert state.result[c].result == [2, 3, 4, 5]
+ assert state.result[d].result == [3, 5, 7, 9]
state = flow.run(branch=False)
assert state.result[a].is_skipped()
assert state.result[b].is_skipped()
assert state.result[c].is_skipped()
+ assert state.result[d].is_skipped()
+
+ def test_apply_map_inside_resource_manager_works(self):
+ @resource_manager
+ class MyResource:
+ def setup(self):
+ return 1
+
+ def cleanup(self, _):
+ pass
+
+ def func(x, a):
+ return add(x, a), add(x, 2)
+
+ with Flow("test") as flow:
+ with MyResource() as a:
+ b, c = apply_map(func, range(4), edges.unmapped(a))
+ d = add.map(b, c)
+
+ state = flow.run()
+ assert state.result[a].result == 1
+ assert state.result[b].result == [1, 2, 3, 4]
+ assert state.result[c].result == [2, 3, 4, 5]
+ assert state.result[d].result == [3, 5, 7, 9]
def test_apply_map_inputs_added_to_subflow_before_calling_func(self):
"""We need to ensure all args to `appy_map` are added to the temporary
@@ -380,7 +406,7 @@ class TestAsTask:
assert res.result[val].result == obj
def test_as_task_toggles_constants(self):
- with Flow("test") as f:
+ with Flow("test"):
t = tasks.as_task(4)
assert isinstance(t, Task)
@@ -409,7 +435,7 @@ class TestAsTask:
],
)
def test_nested_collections_of_mixed_constants_are_not_constants(self, val):
- with Flow("test") as f:
+ with Flow("test"):
task = tasks.as_task(val)
assert not isinstance(task, Constant)
| Cannot use apply_map in a ResourceManager context
## Description
<!-- A clear description of the bug -->
Prefect version 0.14.2 introduced a bugfix #3920 which disallows the use of apply_map
within a ResourceManager context. We heavily rely on both ResourceManager and apply_map, e.g. we want to
scale a presto/trino cluster using the setup/cleanup of the ResourceManager, and add tasks to the flow using apply_map.
## Expected Behavior
<!-- What did you expect to happen instead? -->
The following unittest must passed with latest prefect versions.
```
import pytest
from prefect import Flow, apply_map, task
from prefect.tasks.core.resource_manager import resource_manager
@resource_manager
class DummyResource:
def cleanup(self, resource):
pass
@task
def adder(x):
return x + 1
def func(x):
return adder(x)
@pytest.mark.parametrize("inputs, expected", [
([1], [2]),
([2, 3], [3, 4])
])
def test_apply_map_within_resource_manager(inputs, expected):
with Flow(name='test') as flow:
with DummyResource():
y = task(fn=lambda x: x)(inputs)
result = apply_map(func, y)
assert flow.run().result[result].result == expected
```
## Reproduction
<!-- A minimal example that exhibits the behavior. -->
Test passes with prefect version 0.14.1. Test fails with exception `ValueError: Multiple flows cannot be used with the same resource block` with prefect version 0.14.2 and 0.14.3.
## Environment
<!-- Any additional information about your environment
Optionally run `prefect diagnostics` from the command line and paste the information here. -->
Unittest works with
`pip install prefect==0.14.1`
Flow doesn't work with
`pip install prefect==0.14.2` or
`pip install prefect==0.14.3`
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_inside_case_statement_works",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_inside_resource_manager_works"
] | [
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_can_be_used_without_calling",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_generates_new_tasks_upon_subsequent_calls",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_with_args_must_be_called_in_flow_context",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_with_no_args_must_be_called_inside_flow_context",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_with_default_args_must_be_called_inside_flow_context",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_with_required_args_must_be_called_with_args",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_returns_task_instance",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_validates_run_signature_against_varargs",
"tests/utilities/test_tasks.py::TestTaskDecorator::test_task_decorator_validates_run_signature_against_upstream_tasks_kwarg",
"tests/utilities/test_tasks.py::TestApplyMap::test_raises_no_flow_found",
"tests/utilities/test_tasks.py::TestApplyMap::test_raises_non_sequence_args",
"tests/utilities/test_tasks.py::TestApplyMap::test_raises_use_map_in_mapped_function[map]",
"tests/utilities/test_tasks.py::TestApplyMap::test_raises_use_map_in_mapped_function[set_dependencies]",
"tests/utilities/test_tasks.py::TestApplyMap::test_raises_use_map_in_mapped_function[add_edge]",
"tests/utilities/test_tasks.py::TestApplyMap::test_imperative_args_are_added_to_flow_before_mapping",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_simple[functional]",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_simple[imperative]",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_return_multiple",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_disparate_length_args",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_control_flow[functional]",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_control_flow[imperative]",
"tests/utilities/test_tasks.py::TestApplyMap::test_tasks_have_all_non_unmapped_constant_args_as_transitive_upstream_deps",
"tests/utilities/test_tasks.py::TestApplyMap::test_apply_map_inputs_added_to_subflow_before_calling_func",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[1]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[obj1]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[obj2]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[string]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[obj4]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[NoneType]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[<lambda>]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_with_basic_python_objs[obj7]",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_toggles_constants",
"tests/utilities/test_tasks.py::TestAsTask::test_as_task_doesnt_label_tasks_as_auto_generated",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_constants_are_constants[val0]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_constants_are_constants[val1]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_constants_are_constants[val2]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_constants_are_constants[val3]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_constants_are_constants[val4]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_mixed_constants_are_not_constants[val0]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_mixed_constants_are_not_constants[val1]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_mixed_constants_are_not_constants[val2]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections_of_mixed_constants_are_not_constants[val3]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections[val0]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections[val1]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections[val2]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections[val3]",
"tests/utilities/test_tasks.py::TestAsTask::test_nested_collections[val4]",
"tests/utilities/test_tasks.py::TestAsTask::test_ordered_collections",
"tests/utilities/test_tasks.py::test_tag_contextmanager_works_with_task_decorator",
"tests/utilities/test_tasks.py::test_copying_then_setting_tags_doesnt_leak_backwards",
"tests/utilities/test_tasks.py::test_setting_tags_then_calling_copies_tags",
"tests/utilities/test_tasks.py::test_context_manager_for_setting_tags",
"tests/utilities/test_tasks.py::TestPauseTask::test_pause_task_pauses",
"tests/utilities/test_tasks.py::TestPauseTask::test_pause_task_doesnt_pause_sometimes",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_pulls_from_attr_if_not_provided_at_runtime",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_runtime_takes_precedence",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_even_none_at_runtime_takes_precedence",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_doc_is_unaffected",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_args_not_listed_are_unaffected",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_works_with_multiple_args",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_works_with_mutiple_attrs",
"tests/utilities/test_tasks.py::TestDefaultFromAttrs::test_raises_if_attr_wasnt_set_at_init"
] | {
"failed_lite_validators": [
"has_issue_reference",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-01-19T15:38:50Z" | apache-2.0 |
|
PrefectHQ__prefect-4043 | diff --git a/changes/issue4031.yaml b/changes/issue4031.yaml
new file mode 100644
index 0000000000..f521836e2c
--- /dev/null
+++ b/changes/issue4031.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix issue with fixed duration Paused states not resuming properly - [#4031](https://github.com/PrefectHQ/prefect/issues/4031)"
diff --git a/src/prefect/engine/task_runner.py b/src/prefect/engine/task_runner.py
index c1642076d6..66950ccf4a 100644
--- a/src/prefect/engine/task_runner.py
+++ b/src/prefect/engine/task_runner.py
@@ -25,6 +25,7 @@ from prefect.engine.state import (
Failed,
Looped,
Mapped,
+ Paused,
Pending,
Resume,
Retrying,
@@ -141,7 +142,13 @@ class TaskRunner(Runner):
else:
run_count = state.context.get("task_run_count", 1)
- if isinstance(state, Resume):
+ # detect if currently Paused with a recent start_time
+ should_resume = (
+ isinstance(state, Paused)
+ and state.start_time
+ and state.start_time <= pendulum.now("utc") # type: ignore
+ )
+ if isinstance(state, Resume) or should_resume:
context.update(resume=True)
if "_loop_count" in state.context:
| PrefectHQ/prefect | 974c4a2267cdf28272b349875b58e9bfd13389c0 | diff --git a/tests/engine/test_task_runner.py b/tests/engine/test_task_runner.py
index 534537c29f..9be34ec8fd 100644
--- a/tests/engine/test_task_runner.py
+++ b/tests/engine/test_task_runner.py
@@ -429,6 +429,23 @@ class TestInitializeRun:
result = TaskRunner(Task()).initialize_run(state=Resume(), context=ctx)
assert result.context.resume is True
+ def test_task_runner_puts_resume_in_context_if_paused_start_time_elapsed(self):
+ with prefect.context() as ctx:
+ assert "resume" not in ctx
+ result = TaskRunner(Task()).initialize_run(
+ state=Paused(start_time=pendulum.now("utc")), context=ctx
+ )
+ assert result.context.resume is True
+
+ def test_task_runner_ignores_resume_in_context_if_paused_start_time_in_future(self):
+ with prefect.context() as ctx:
+ assert "resume" not in ctx
+ result = TaskRunner(Task()).initialize_run(
+ state=Paused(start_time=pendulum.now("utc").add(seconds=10)),
+ context=ctx,
+ )
+ assert "resume" not in ctx
+
def test_task_runner_puts_checkpointing_in_context(self):
with prefect.context() as ctx:
assert "checkpointing" not in ctx
| duration parameter is not taken into account from task_pause
## Description
<!-- A clear description of the bug -->
`duration` parameter in `prefect.utilities.tasks.pause_task` seems to be ignored. Based on documentation, if `duration` is specified, then, the task should be resumed after a period of time
## Expected Behavior
<!-- What did you expect to happen instead? -->
Resume task run after specified period of time.
## Reproduction
<!-- A minimal example that exhibits the behavior. -->
```
import datetime
from prefect import Flow, task
from prefect.utilities.tasks import pause_task
@task
def say_hello():
pause_task("wait or approve..", datetime.timedelta(seconds=10))
print('hello')
with Flow("My Flow") as f:
say_hello()
```
When I executed this flow from the UI, I noticed that the task stays paused for ever (well, for 10 years :P )
![image](https://user-images.githubusercontent.com/8322266/105976777-42170600-6099-11eb-8cff-b4bcab172657.png)
**NOTE**: Every time the duration time is reached, the task's state changes like: Paused -> Resumed -> Paused.
## Environment
<!-- Any additional information about your environment
Optionally run `prefect diagnostics` from the command line and paste the information here. -->
```
{
"config_overrides": {},
"env_vars": [
"PREFECT__CONTEXT__SECRETS__XXxx",
"PREFECT__CONTEXT__SECRETS__XXxx"
],
"system_information": {
"platform": "Linux-5.8.0-7630-generic-x86_64-with-debian-bullseye-sid",
"prefect_backend": "server",
"prefect_version": "0.14.4",
"python_version": "3.7.9"
}
}
```
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_resume_in_context_if_paused_start_time_elapsed"
] | [
"tests/engine/test_task_runner.py::test_task_runner_has_logger",
"tests/engine/test_task_runner.py::test_task_that_succeeds_is_marked_success",
"tests/engine/test_task_runner.py::test_task_that_raises_success_is_marked_success",
"tests/engine/test_task_runner.py::test_task_that_has_an_error_is_marked_fail",
"tests/engine/test_task_runner.py::test_task_that_raises_fail_is_marked_fail",
"tests/engine/test_task_runner.py::test_task_that_fails_gets_retried_up_to_max_retry_time",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_has_start_time_recognized",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_with_naive_datetime_is_assumed_UTC",
"tests/engine/test_task_runner.py::test_task_that_raises_retry_gets_retried_even_if_max_retries_is_set",
"tests/engine/test_task_runner.py::test_task_that_raises_skip_gets_skipped",
"tests/engine/test_task_runner.py::test_task_that_has_upstream_skip_gets_skipped_with_informative_message",
"tests/engine/test_task_runner.py::test_task_that_is_running_doesnt_run",
"tests/engine/test_task_runner.py::test_running_task_that_already_has_finished_state_doesnt_run",
"tests/engine/test_task_runner.py::test_task_runner_preserves_error_type",
"tests/engine/test_task_runner.py::test_task_runner_raise_on_exception_when_task_errors",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_when_task_signals",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_mapping",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_state[state0]",
"tests/engine/test_task_runner.py::test_task_runner_does_not_raise_on_exception_when_endrun_raised_by_state[state1]",
"tests/engine/test_task_runner.py::test_task_runner_accepts_dictionary_of_edges",
"tests/engine/test_task_runner.py::test_timeout_actually_stops_execution",
"tests/engine/test_task_runner.py::test_task_runner_can_handle_timeouts_by_default",
"tests/engine/test_task_runner.py::test_task_runner_handles_secrets",
"tests/engine/test_task_runner.py::test_task_that_starts_failed_doesnt_get_retried",
"tests/engine/test_task_runner.py::test_runner_checks_hashed_inputs_correctly",
"tests/engine/test_task_runner.py::TestContext::test_task_runner_inits_with_current_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state4]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_without_run_count[state5]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_states_with_run_count[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_resume_in_context_if_state_is_resume",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_ignores_resume_in_context_if_paused_start_time_in_future",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_checkpointing_in_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_puts_tags_in_context",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state0]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state1]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state2]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state3]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state4]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_task_runner_doesnt_put_resume_in_context_if_state_is_not_resume[state5]",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_submitted_states",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_queued_states",
"tests/engine/test_task_runner.py::TestInitializeRun::test_unwrap_nested_meta_states",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_with_empty",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_with_two_finished",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_raises_with_one_unfinished",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_raises_if_mapped_upstream_retrying",
"tests/engine/test_task_runner.py::TestCheckUpstreamFinished::test_doesnt_raise_if_mapped_upstream_complete",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_empty",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_unskipped_states",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_raises_with_skipped",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_doesnt_raise_with_skipped_and_flag_set",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_raises_if_single_mapped_upstream_skipped",
"tests/engine/test_task_runner.py::TestCheckUpstreamSkipped::test_doesnt_raise_if_single_mapped_upstream_skipped_and_edge_is_mapped",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_successful_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_failed_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_successful_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_any_failed_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_pass",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_fail",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_all_finished_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_only",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_only_empty",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_manual_passes_when_context_is_resume",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_custom_trigger_function_raise",
"tests/engine/test_task_runner.py::TestCheckTaskTrigger::test_custom_trigger_returns_false",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state3]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_ready_states[state4]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state3]",
"tests/engine/test_task_runner.py::TestCheckTaskReady::test_not_ready_doesnt_run[state4]",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_empty_inputs",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_unkeyed_inputs",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_with_non_key_edges",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_failed",
"tests/engine/test_task_runner.py::TestGetTaskInputs::test_get_inputs_from_upstream_mapped",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_not_cached[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_same_inputs",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_different_inputs",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_duration",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_cached_duration_fail",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_reads_result_from_context_if_cached_valid",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_state_kwarg_is_prioritized_over_context_caches",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_reads_result_from_context_with_cache_key_if_cached_valid",
"tests/engine/test_task_runner.py::TestCheckTaskCached::test_all_of_run_context_is_available_to_custom_cache_validators",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_pending[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_inputs_are_cached[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state0]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state1]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state2]",
"tests/engine/test_task_runner.py::TestSetTaskRunning::test_not_pending[state3]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_running_state",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state0]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state1]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state2]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_not_running_state[state3]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_success_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_fail_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_loop_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_loop_signal_with_custom_message",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_skip_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_raise_pause_signal",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_run_with_error",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_invalid_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_returns_success_with_hydrated_result_obj",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_returns_success_with_correct_result_type",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_without_checkpoint",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_config[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_config[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_checkpointing",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_custom_formatter",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_templated_inputs",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_templated_inputs_inputs_take_precedence",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_result_formatting_with_input_named_value",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_context[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_checkpointing_in_context[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_is_checkpointed_if_result_handler_present[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_is_checkpointed_if_result_handler_present[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_for_parameter",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_results_in_failed_state[True]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_results_in_failed_state[None]",
"tests/engine/test_task_runner.py::TestRunTaskStep::test_success_state_with_bad_result_and_checkpointing_disabled",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state0]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state1]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state2]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state3]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_non_failed_states[state4]",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_zero_max_retry",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_one_max_retry",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_one_max_retry_second_run",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_failed_retry_caches_inputs",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_when_run_count_greater_than_max_retries",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_with_start_time",
"tests/engine/test_task_runner.py::TestCheckRetryStep::test_retrying_when_state_has_explicit_run_count_set",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state0]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state1]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state2]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state3]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states_with_results[state4]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state0]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state1]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state2]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_non_success_states[state3]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[all_inputs]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[all_parameters]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[duration_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[partial_inputs_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_no_cache_for[partial_parameters_only]",
"tests/engine/test_task_runner.py::TestCacheResultStep::test_success_state_with_cache_for",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state0]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state1]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state2]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state3]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target[state4]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_calls_state_handlers",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state0]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state1]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state2]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state3]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_not_exists[state4]",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_exists",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_exists_multiple_checks",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_uses_callable",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_check_target_callable_uses_context",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_respects_multiple_flow_runs",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_with_callable_uses_run_context",
"tests/engine/test_task_runner.py::TestTargetExistsStep::test_target_with_callable_uses_task_inputs",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state2]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state3]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state4]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_default_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_default_start_time[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_none_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state1]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time[state2]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_past_start_time[state0]",
"tests/engine/test_task_runner.py::TestCheckScheduledStep::test_scheduled_states_with_past_start_time[state1]",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_failure_is_not_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_failure_is_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_on_trigger_failure_is_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called_on_retry",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_can_return_none",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_are_called_on_failure",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_respect_signals",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handlers_handle_retry_signals",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_multiple_task_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_multiple_task_handlers_are_called_in_sequence",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handler_that_doesnt_return_state_or_none",
"tests/engine/test_task_runner.py::TestTaskStateHandlers::test_task_handler_errors_are_logged",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_retry",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_triggerfailed",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handlers_are_called_on_mapped_parent",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_multiple_task_runner_handlers_are_called",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_multiple_task_runner_handlers_are_called_in_sequence",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_runner_handler_that_doesnt_return_state_or_none",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_handler_that_raises_signal_is_trapped",
"tests/engine/test_task_runner.py::TestTaskRunnerStateHandlers::test_task_handler_that_has_error_is_trapped",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_mapped[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_cached_inputs_if_rerun[state0]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_cached_inputs_if_rerun[state1]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_cached_inputs_if_rerun[state2]",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_returns_failed_if_no_success_upstream",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_sets_n_map_states",
"tests/engine/test_task_runner.py::TestCheckTaskReadyToMapStep::test_run_mapped_handles_upstream_mapped_states",
"tests/engine/test_task_runner.py::test_task_runner_skips_upstream_check_for_parent_mapped_task",
"tests/engine/test_task_runner.py::test_task_runner_converts_pause_signal_to_paused_state_for_manual_only_triggers",
"tests/engine/test_task_runner.py::test_task_runner_passes_manual_only_trigger_when_resume_state_is_passed",
"tests/engine/test_task_runner.py::test_task_runner_converts_pause_signal_to_paused_state_for_internally_raised_pauses",
"tests/engine/test_task_runner.py::test_task_runner_bypasses_pause_when_requested",
"tests/engine/test_task_runner.py::test_mapped_tasks_parents_and_children_respond_to_individual_triggers",
"tests/engine/test_task_runner.py::test_mapped_tasks_parent_regenerates_child_pipeline",
"tests/engine/test_task_runner.py::test_retry_has_updated_metadata",
"tests/engine/test_task_runner.py::test_pending_raised_from_endrun_has_updated_metadata",
"tests/engine/test_task_runner.py::test_failures_arent_checkpointed[True]",
"tests/engine/test_task_runner.py::test_failures_arent_checkpointed[None]",
"tests/engine/test_task_runner.py::test_skips_arent_checkpointed[True]",
"tests/engine/test_task_runner.py::test_skips_arent_checkpointed[None]",
"tests/engine/test_task_runner.py::test_task_runner_provides_logger",
"tests/engine/test_task_runner.py::TestLooping::test_looping_works",
"tests/engine/test_task_runner.py::TestLooping::test_looping_calls_state_handlers_appropriately",
"tests/engine/test_task_runner.py::TestLooping::test_looping_doesnt_aggressively_log_task_starting",
"tests/engine/test_task_runner.py::TestLooping::test_looping_doesnt_aggressively_log_task_finished",
"tests/engine/test_task_runner.py::TestLooping::test_looping_accumulates",
"tests/engine/test_task_runner.py::TestLooping::test_looping_checkpoints_all_iterations[True]",
"tests/engine/test_task_runner.py::TestLooping::test_looping_checkpoints_all_iterations[None]",
"tests/engine/test_task_runner.py::TestLooping::test_looping_works_with_retries",
"tests/engine/test_task_runner.py::TestLooping::test_loop_results_work_with_retries",
"tests/engine/test_task_runner.py::test_task_tags_are_attached_to_all_states",
"tests/engine/test_task_runner.py::test_task_runner_logs_stdout",
"tests/engine/test_task_runner.py::test_task_runner_logs_stdout_disabled",
"tests/engine/test_task_runner.py::test_task_runner_logs_map_index_for_mapped_tasks"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-02-01T19:07:08Z" | apache-2.0 |
|
PrefectHQ__prefect-4189 | diff --git a/changes/pr4189.yaml b/changes/pr4189.yaml
new file mode 100644
index 0000000000..4281dbf640
--- /dev/null
+++ b/changes/pr4189.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Make task slug generation robust to modifying existing task names - [#4189](https://github.com/PrefectHQ/prefect/pull/4189)"
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py
index d790330786..0789f336ab 100644
--- a/src/prefect/core/flow.py
+++ b/src/prefect/core/flow.py
@@ -4,6 +4,7 @@ import copy
import functools
import hashlib
import inspect
+import itertools
import json
import os
import tempfile
@@ -176,7 +177,10 @@ class Flow:
self.tasks = set() # type: Set[Task]
self.edges = set() # type: Set[Edge]
- self.slugs = dict() # type: Dict[Task, str]
+ self._slug_counters = collections.defaultdict(
+ lambda: itertools.count(1)
+ ) # type: Dict[str, Iterator[int]]
+ self.slugs = {} # type: Dict[Task, str]
self.constants = collections.defaultdict(
dict
) # type: Dict[Task, Dict[str, Any]]
@@ -486,12 +490,14 @@ class Flow:
Returns:
- str: the corresponding slug
"""
- slug_bases = []
- for t in self.tasks:
- slug_bases.append(f"{t.name}-" + "-".join(sorted(t.tags)))
- new_slug = f"{task.name}-" + "-".join(sorted(task.tags))
- index = slug_bases.count(new_slug)
- return f"{new_slug}{'' if new_slug.endswith('-') else '-'}{index + 1}"
+ parts = [task.name]
+ parts.extend(sorted(task.tags))
+ prefix = "-".join(parts)
+ while True:
+ ind = next(self._slug_counters[prefix])
+ slug = f"{prefix}-{ind}"
+ if slug not in self.slugs.values():
+ return slug
def add_task(self, task: Task) -> Task:
"""
| PrefectHQ/prefect | 720d06a8b66f4f13bab59dff7658113e93c39d13 | diff --git a/tests/core/test_flow.py b/tests/core/test_flow.py
index f7df8b2f84..2c2144b6e0 100644
--- a/tests/core/test_flow.py
+++ b/tests/core/test_flow.py
@@ -3196,12 +3196,16 @@ def test_run_agent_passes_flow_labels(monkeypatch, kind):
class TestSlugGeneration:
def test_slugs_are_stable(self):
- tasks = [Task(name=str(x)) for x in range(10)]
+ tasks = [Task(name="add") for _ in range(5)]
+ tasks.extend(Task(name="mul") for _ in range(5))
flow_one = Flow("one", tasks=tasks)
flow_two = Flow("two", tasks=tasks)
- assert set(flow_one.slugs.values()) == set([str(x) + "-1" for x in range(10)])
- assert flow_one.slugs == flow_two.slugs
+ sol = {f"add-{i}" for i in range(1, 6)}
+ sol.update(f"mul-{i}" for i in range(1, 6))
+
+ assert set(flow_one.slugs.values()) == sol
+ assert set(flow_two.slugs.values()) == sol
def test_slugs_incorporate_tags_and_order(self):
with Flow("one") as flow_one:
@@ -3224,3 +3228,27 @@ class TestSlugGeneration:
"a-tag1-1",
"b-tag1-tag2-1",
}
+
+ def test_generated_slugs_dont_collide_with_user_provided_slugs(self):
+ with Flow("test") as flow:
+ a3 = Task("a", slug="a-3")
+ flow.add_task(a3)
+ a1 = Task("a")
+ flow.add_task(a1)
+ a2 = Task("a")
+ flow.add_task(a2)
+ a4 = Task("a")
+ flow.add_task(a4)
+
+ assert flow.slugs == {a1: "a-1", a2: "a-2", a3: "a-3", a4: "a-4"}
+
+ def test_slugs_robust_to_task_name_changes(self):
+ "See https://github.com/PrefectHQ/prefect/issues/4185"
+ with Flow("test") as flow:
+ a1 = Task("a")
+ flow.add_task(a1)
+ a1.name = "changed"
+ a2 = Task("a")
+ flow.add_task(a2)
+
+ assert flow.slugs == {a1: "a-1", a2: "a-2"}
| Modifying `task.name` can violate the assumption that slugs are unique
## Description
Dynamically modifying a task's name can result in overlapping slugs. This seems to be because https://github.com/PrefectHQ/prefect/blob/20cd7352705741628ff83789e779b7bdb5315969/src/prefect/core/flow.py#L473-L494 generates `slug_bases` each time that the tasks are added.
Better practice is to provide the task name when it is added to the flow, but there is no enforcement that `.name` is read only, and the use of `task_args` is less intuitive.
`_generate_task_slug` could be made more robust, or `.name` could be made read-only or raise a warning suggesting the preferred method.
## Expected Behavior
Slugs should always be unique.
## Reproduction
```python
from prefect import Flow, Parameter, task
@task
def identity(x):
return x
with Flow('slug-overlap') as flow:
with tags('test-tag'):
a = Parameter('a', default=1)
# b = identity(a, task_args=dict(name='better-practice'))
b = identity(a)
b.name = 'bad-practice'
c = identity(a)
print(flow.slugs)
```
results in
```python
{<Task: bad-practice>: 'identity-test-tag-1', <Parameter: a>: 'a', <Task: identity>: 'identity-test-tag-1'}
```
## Environment
n/a | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/core/test_flow.py::TestSlugGeneration::test_generated_slugs_dont_collide_with_user_provided_slugs",
"tests/core/test_flow.py::TestSlugGeneration::test_slugs_robust_to_task_name_changes"
] | [
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_no_args",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_no_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_none",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_empty_string",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name_as_false",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_edges",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_schedule",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_without_state_handler",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_on_failure",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_state_handler[handlers0]",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_state_handler[handlers1]",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_illegal_handler",
"tests/core/test_flow.py::TestCreateFlow::test_flow_has_logger",
"tests/core/test_flow.py::TestCreateFlow::test_flow_has_logger_with_informative_name",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_result",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_storage",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_storage_and_result",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_with_environment",
"tests/core/test_flow.py::TestCreateFlow::test_create_flow_auto_generates_tasks",
"tests/core/test_flow.py::test_add_task_to_flow",
"tests/core/test_flow.py::test_add_task_returns_task",
"tests/core/test_flow.py::test_add_task_raise_an_error_if_the_task_is_not_a_task_class",
"tests/core/test_flow.py::test_set_dependencies_adds_all_arguments_to_flow",
"tests/core/test_flow.py::test_set_dependencies_converts_unkeyed_arguments_to_tasks",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val0]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val1]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val2]",
"tests/core/test_flow.py::test_set_dependencies_with_nested_ordered_constants_creates_a_single_constant[val3]",
"tests/core/test_flow.py::test_set_dependencies_creates_mapped_edges",
"tests/core/test_flow.py::test_set_dependencies_respects_unmapped",
"tests/core/test_flow.py::test_binding_a_task_in_context_adds_it_to_flow",
"tests/core/test_flow.py::test_binding_a_task_adds_it_to_flow",
"tests/core/test_flow.py::test_binding_a_task_no_with_flow_raises_error",
"tests/core/test_flow.py::test_adding_a_task_to_a_flow_twice_is_ok",
"tests/core/test_flow.py::test_binding_a_task_to_two_different_flows_is_ok",
"tests/core/test_flow.py::test_binding_a_task_with_var_kwargs_expands_the_kwargs",
"tests/core/test_flow.py::test_calling_a_task_without_context_returns_a_copy",
"tests/core/test_flow.py::test_calling_a_task_returns_a_copy",
"tests/core/test_flow.py::test_calling_a_slugged_task_in_different_flows_is_ok",
"tests/core/test_flow.py::test_context_manager_is_properly_applied_to_tasks",
"tests/core/test_flow.py::test_that_flow_adds_and_removes_itself_from_prefect_context",
"tests/core/test_flow.py::test_add_edge",
"tests/core/test_flow.py::test_add_edge_raise_error_for_downstream_parameter",
"tests/core/test_flow.py::test_add_edge_raise_error_for_duplicate_key_if_validate",
"tests/core/test_flow.py::test_add_edge_returns_edge",
"tests/core/test_flow.py::test_add_edge_from_contant",
"tests/core/test_flow.py::test_chain",
"tests/core/test_flow.py::test_splatting_chain_works_in_flow_context_without_duplication",
"tests/core/test_flow.py::test_chain_works_in_flow_context_without_duplication",
"tests/core/test_flow.py::test_iter",
"tests/core/test_flow.py::test_detect_cycle",
"tests/core/test_flow.py::test_eager_cycle_detection_defaults_false",
"tests/core/test_flow.py::test_direct_cycles_are_always_detected_1",
"tests/core/test_flow.py::test_direct_cycles_are_always_detected_2",
"tests/core/test_flow.py::test_eager_validation_is_off_by_default",
"tests/core/test_flow.py::test_eager_cycle_detection_works",
"tests/core/test_flow.py::test_copy_handles_constants",
"tests/core/test_flow.py::test_copy",
"tests/core/test_flow.py::test_infer_root_tasks",
"tests/core/test_flow.py::test_infer_terminal_tasks",
"tests/core/test_flow.py::test_reference_tasks_are_terminal_tasks_by_default",
"tests/core/test_flow.py::test_set_reference_tasks",
"tests/core/test_flow.py::test_set_reference_tasks_at_init_with_empty_flow_raises_error",
"tests/core/test_flow.py::test_set_reference_tasks_at_init",
"tests/core/test_flow.py::test_reset_reference_tasks_to_terminal_tasks",
"tests/core/test_flow.py::test_key_states_raises_error_if_not_part_of_flow",
"tests/core/test_flow.py::test_key_states_raises_error_if_not_iterable",
"tests/core/test_flow.py::test_warning_raised_if_tasks_are_created_but_not_added_to_flow",
"tests/core/test_flow.py::test_warning_raised_if_tasks_are_copied_but_not_added_to_flow",
"tests/core/test_flow.py::test_warning_raised_for_tasks_defined_in_flow_context_and_unused",
"tests/core/test_flow.py::test_warning_raised_for_lambda_tasks_defined_in_flow_context_and_unused",
"tests/core/test_flow.py::test_context_is_scoped_to_flow_context",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_tasks",
"tests/core/test_flow.py::TestEquality::test_object_inequality",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_edges",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_name",
"tests/core/test_flow.py::TestEquality::test_equality_based_on_reference_tasks",
"tests/core/test_flow.py::test_update",
"tests/core/test_flow.py::test_update_with_constants",
"tests/core/test_flow.py::test_update_with_mapped_edges",
"tests/core/test_flow.py::test_update_with_parameter_merge",
"tests/core/test_flow.py::test_upstream_and_downstream_error_msgs_when_task_is_not_in_flow",
"tests/core/test_flow.py::test_sorted_tasks",
"tests/core/test_flow.py::test_sorted_tasks_with_ambiguous_sort",
"tests/core/test_flow.py::test_sorted_tasks_with_start_task",
"tests/core/test_flow.py::test_sorted_tasks_with_invalid_start_task",
"tests/core/test_flow.py::test_flow_raises_for_irrelevant_user_provided_parameters",
"tests/core/test_flow.py::test_flow_raises_for_missing_required_parameters",
"tests/core/test_flow.py::test_flow_doesnt_raises_for_missing_nonrequired_parameters",
"tests/core/test_flow.py::test_flow_accepts_unserializeable_parameters",
"tests/core/test_flow.py::test_parameters_can_not_be_downstream_dependencies",
"tests/core/test_flow.py::test_validate_cycles",
"tests/core/test_flow.py::test_validate_missing_edge_downstream_tasks",
"tests/core/test_flow.py::test_validate_missing_edge_upstream_tasks",
"tests/core/test_flow.py::test_validate_missing_reference_tasks",
"tests/core/test_flow.py::test_validate_edges_kwarg",
"tests/core/test_flow.py::test_validate_edges",
"tests/core/test_flow.py::test_skip_validate_edges",
"tests/core/test_flow.py::test_skip_validation_in_init_with_kwarg",
"tests/core/test_flow.py::TestCache::test_cache_created",
"tests/core/test_flow.py::TestCache::test_cache_sorted_tasks",
"tests/core/test_flow.py::TestCache::test_cache_sorted_tasks_with_args",
"tests/core/test_flow.py::TestCache::test_cache_root_tasks",
"tests/core/test_flow.py::TestCache::test_cache_terminal_tasks",
"tests/core/test_flow.py::TestCache::test_cache_all_upstream_edges",
"tests/core/test_flow.py::TestCache::test_cache_all_downstream_edges",
"tests/core/test_flow.py::TestCache::test_cache_survives_pickling",
"tests/core/test_flow.py::TestCache::test_adding_task_clears_cache",
"tests/core/test_flow.py::TestCache::test_adding_edge_clears_cache",
"tests/core/test_flow.py::TestCache::test_setting_reference_tasks_clears_cache",
"tests/core/test_flow.py::TestReplace::test_replace_replaces_all_the_things",
"tests/core/test_flow.py::TestReplace::test_replace_leaves_unset_reference_tasks_alone",
"tests/core/test_flow.py::TestReplace::test_replace_update_slugs",
"tests/core/test_flow.py::TestReplace::test_replace_complains_about_tasks_not_in_flow",
"tests/core/test_flow.py::TestReplace::test_replace_runs_smoothly",
"tests/core/test_flow.py::TestReplace::test_replace_converts_new_to_task",
"tests/core/test_flow.py::TestReplace::test_replace_converts_new_collections_to_tasks",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_defaults_to_return_everything",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_defaults_to_name",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_takes_intersection",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_accepts_tags_and_requires_all_tags",
"tests/core/test_flow.py::TestGetTasks::test_get_tasks_can_check_types",
"tests/core/test_flow.py::TestSerialize::test_serialization",
"tests/core/test_flow.py::TestSerialize::test_deserialization",
"tests/core/test_flow.py::TestSerialize::test_serialize_validates_invalid_flows",
"tests/core/test_flow.py::TestSerialize::test_serialize_includes_storage",
"tests/core/test_flow.py::TestSerialize::test_serialize_adds_flow_to_storage_if_build",
"tests/core/test_flow.py::TestSerialize::test_serialize_can_be_called_twice",
"tests/core/test_flow.py::TestSerialize::test_serialize_fails_with_no_storage",
"tests/core/test_flow.py::TestSerializedHash::test_is_same_with_same_flow",
"tests/core/test_flow.py::TestSerializedHash::test_is_same_with_copied_flow",
"tests/core/test_flow.py::TestSerializedHash::test_is_consistent_after_storage_build",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_before_and_after_storage_build",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_different_flow_name",
"tests/core/test_flow.py::TestSerializedHash::test_is_same_in_new_python_instance",
"tests/core/test_flow.py::TestSerializedHash::test_task_order_is_deterministic",
"tests/core/test_flow.py::TestSerializedHash::test_parameter_order_is_deterministic",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_modified_flow_name",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_modified_flow_storage",
"tests/core/test_flow.py::TestSerializedHash::test_is_different_with_different_flow_tasks",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_runs_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_with_paused_states_hangs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_passes_scheduled_parameters",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_doesnt_persist_stale_scheduled_params",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_doesnt_run_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_returns_tasks_when_running_off_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_responds_to_config",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_stops_on_schedule",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_schedule_continues_on_executor_failure",
"tests/core/test_flow.py::TestFlowRunMethod::test_scheduled_runs_handle_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states_across_runs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_without_schedule_can_run_cached_tasks",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_cached_states_across_runs_with_always_run_trigger",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_across_runs",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_with_differing_lengths",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_handles_mapped_cached_states_with_non_cached",
"tests/core/test_flow.py::TestFlowRunMethod::test_scheduled_runs_handle_mapped_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_run_accepts_state_kwarg",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_sets_scheduled_start_time",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_does_not_set_scheduled_start_time_globally",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_persists_scheduled_start_time_across_retries",
"tests/core/test_flow.py::TestFlowRunMethod::test_flow_dot_run_updates_the_scheduled_start_time_of_each_scheduled_run",
"tests/core/test_flow.py::TestFlowDiagnostics::test_flow_diagnostics",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_uses_default_storage[prefect.storage.Docker]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_uses_default_storage[prefect.storage.Local]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_passes_kwargs_to_storage",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_sets_universal_run_if_empty",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage0-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage0-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage1-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage1-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage2-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage2-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage3-environment]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_labels_if_labeled_storage_used[storage3-run_config]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage0]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage1]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage2]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_auto_sets_result_if_storage_has_default[storage3]",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_doesnt_override_custom_set_result",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_doesnt_overwrite_labels_if_local_storage_is_used",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_errors_if_in_flow_context",
"tests/core/test_flow.py::TestFlowRegister::test_flow_register_warns_if_mixing_environment_and_executor",
"tests/core/test_flow.py::test_bad_flow_runner_code_still_returns_state_obj",
"tests/core/test_flow.py::test_flow_run_raises_informative_error_for_certain_kwargs",
"tests/core/test_flow.py::test_flow_run_raises_if_no_more_scheduled_runs",
"tests/core/test_flow.py::test_flow_run_respects_state_kwarg",
"tests/core/test_flow.py::test_flow_run_respects_task_state_kwarg",
"tests/core/test_flow.py::test_flow_run_handles_error_states_when_initial_state_is_provided",
"tests/core/test_flow.py::test_looping_works_in_a_flow",
"tests/core/test_flow.py::test_pause_resume_works_with_retries",
"tests/core/test_flow.py::test_looping_with_retries_works_in_a_flow",
"tests/core/test_flow.py::test_looping_with_retries_resets_run_count",
"tests/core/test_flow.py::test_starting_at_arbitrary_loop_index",
"tests/core/test_flow.py::test_flow_run_name_as_run_param",
"tests/core/test_flow.py::TestSaveLoad::test_save_saves_and_load_loads",
"tests/core/test_flow.py::TestSaveLoad::test_save_saves_has_a_default",
"tests/core/test_flow.py::TestSaveLoad::test_load_accepts_name_and_sluggified_name",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[local]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[sync]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mthread]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mproc_local]",
"tests/core/test_flow.py::test_timeout_actually_stops_execution[mproc]",
"tests/core/test_flow.py::test_results_write_to_formatted_locations",
"tests/core/test_flow.py::test_results_write_to_custom_formatters",
"tests/core/test_flow.py::test_run_agent_passes_flow_labels[environment]",
"tests/core/test_flow.py::test_run_agent_passes_flow_labels[run_config]",
"tests/core/test_flow.py::TestSlugGeneration::test_slugs_are_stable",
"tests/core/test_flow.py::TestSlugGeneration::test_slugs_incorporate_tags_and_order"
] | {
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-01T18:01:59Z" | apache-2.0 |
|
PrefectHQ__prefect-4232 | diff --git a/changes/pr4232.yaml b/changes/pr4232.yaml
new file mode 100644
index 0000000000..a9b08be9a5
--- /dev/null
+++ b/changes/pr4232.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "Add `--docker-client-timeout` flag to docker agent, for configuring the timeout for all docker API requests - [#4232](https://github.com/PrefectHQ/prefect/pull/4232)"
diff --git a/package-lock.json b/package-lock.json
index ed0115baf8..73659c5821 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -4247,24 +4247,24 @@
"dev": true
},
"elliptic": {
- "version": "6.5.3",
- "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz",
- "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==",
+ "version": "6.5.4",
+ "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz",
+ "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==",
"dev": true,
"requires": {
- "bn.js": "^4.4.0",
- "brorand": "^1.0.1",
+ "bn.js": "^4.11.9",
+ "brorand": "^1.1.0",
"hash.js": "^1.0.0",
- "hmac-drbg": "^1.0.0",
- "inherits": "^2.0.1",
- "minimalistic-assert": "^1.0.0",
- "minimalistic-crypto-utils": "^1.0.0"
+ "hmac-drbg": "^1.0.1",
+ "inherits": "^2.0.4",
+ "minimalistic-assert": "^1.0.1",
+ "minimalistic-crypto-utils": "^1.0.1"
},
"dependencies": {
"bn.js": {
- "version": "4.11.9",
- "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz",
- "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==",
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==",
"dev": true
}
}
diff --git a/src/prefect/agent/docker/agent.py b/src/prefect/agent/docker/agent.py
index 218fd74449..766bf32337 100644
--- a/src/prefect/agent/docker/agent.py
+++ b/src/prefect/agent/docker/agent.py
@@ -18,17 +18,18 @@ if TYPE_CHECKING:
import docker
-def _stream_container_logs(base_url: str, container_id: str) -> None:
+def _stream_container_logs(base_url: str, timeout: int, container_id: str) -> None:
"""
Stream container logs back to stdout
Args:
- base_url (str): URL for a Docker daemon server
+ - timeout (int): timeout for docker api requests
- container_id (str): ID of a container to stream logs
"""
import docker
- client = docker.APIClient(base_url=base_url, version="auto")
+ client = docker.APIClient(base_url=base_url, timeout=timeout, version="auto")
for log in client.logs(container=container_id, stream=True, follow=True):
print(str(log, "utf-8").rstrip())
@@ -82,6 +83,8 @@ class DockerAgent(Agent):
some Docker-in-Docker setups that users may be running their agent with.
- reg_allow_list (List[str], optional): Limits Docker Agent to only pull images
from the listed registries.
+ - docker_client_timeout (int, optional): The timeout to use for docker
+ API calls, defaults to 60 seconds.
"""
def __init__(
@@ -101,6 +104,7 @@ class DockerAgent(Agent):
networks: List[str] = None,
docker_interface: bool = True,
reg_allow_list: List[str] = None,
+ docker_client_timeout: int = None,
) -> None:
super().__init__(
agent_config_id=agent_config_id,
@@ -150,6 +154,7 @@ class DockerAgent(Agent):
self.networks = networks
self.logger.debug(f"Docker networks set to {self.networks}")
+ self.docker_client_timeout = docker_client_timeout or 60
self.docker_interface = docker_interface
self.logger.debug(
"Docker interface toggle set to {}".format(self.docker_interface)
@@ -183,7 +188,11 @@ class DockerAgent(Agent):
# the 'import prefect' time low
import docker
- return docker.APIClient(base_url=self.base_url, version="auto")
+ return docker.APIClient(
+ base_url=self.base_url,
+ version="auto",
+ timeout=self.docker_client_timeout,
+ )
def heartbeat(self) -> None:
try:
@@ -483,6 +492,7 @@ class DockerAgent(Agent):
target=_stream_container_logs,
kwargs={
"base_url": self.base_url,
+ "timeout": self.docker_client_timeout,
"container_id": container_id,
},
)
diff --git a/src/prefect/cli/agent.py b/src/prefect/cli/agent.py
index b90fa8b6aa..f26b4f91c5 100644
--- a/src/prefect/cli/agent.py
+++ b/src/prefect/cli/agent.py
@@ -223,6 +223,12 @@ def docker():
"setups that users may be running their agent with."
),
)
[email protected](
+ "--docker-client-timeout",
+ default=None,
+ type=int,
+ help="The timeout to use for docker API calls, defaults to 60 seconds.",
+)
def start(volumes, no_docker_interface, **kwargs):
"""Start a docker agent"""
from prefect.agent.docker import DockerAgent
@@ -524,6 +530,12 @@ _agents = {
help="Disable presence of a Docker interface.",
hidden=True,
)
[email protected](
+ "--docker-client-timeout",
+ default=None,
+ type=int,
+ hidden=True,
+)
@click.pass_context
def start(
ctx,
@@ -549,6 +561,7 @@ def start(
agent_address,
hostname_label,
storage_labels,
+ docker_client_timeout,
):
"""
Start an agent.
@@ -611,6 +624,7 @@ def start(
--no-docker-interface Disable the check of a Docker interface on this machine.
Note: This is mostly relevant for some Docker-in-Docker
setups that users may be running their agent with.
+ --docker-client-timeout Timeout for docker api requests
\b
Kubernetes Agent:
@@ -692,6 +706,7 @@ def start(
volumes=list(volume),
networks=tuple(network),
docker_interface=not no_docker_interface,
+ docker_client_timeout=docker_client_timeout,
).start()
elif agent_option == "fargate":
from_qualified_name(retrieved_agent)(
| PrefectHQ/prefect | d06c5e49be0d1fc98fb337803aeee0286be39ff6 | diff --git a/tests/agent/test_docker_agent.py b/tests/agent/test_docker_agent.py
index 92bdad2372..a671c7dd70 100644
--- a/tests/agent/test_docker_agent.py
+++ b/tests/agent/test_docker_agent.py
@@ -68,11 +68,12 @@ def test_docker_agent_config_options_populated(monkeypatch):
api = MagicMock()
monkeypatch.setattr("docker.APIClient", api)
- agent = DockerAgent(base_url="url", no_pull=True)
+ agent = DockerAgent(base_url="url", no_pull=True, docker_client_timeout=123)
assert agent.client.get_auth_token() == "TEST_TOKEN"
assert agent.logger
assert agent.no_pull
assert api.call_args[1]["base_url"] == "url"
+ assert api.call_args[1]["timeout"] == 123
def test_docker_agent_no_pull(api):
@@ -515,7 +516,11 @@ def test_docker_agent_deploy_flow_show_flow_logs(api, monkeypatch):
process_kwargs = dict(
target=_stream_container_logs,
- kwargs={"base_url": agent.base_url, "container_id": "container_id"},
+ kwargs={
+ "base_url": agent.base_url,
+ "container_id": "container_id",
+ "timeout": 60,
+ },
)
process.assert_called_with(**process_kwargs)
# Check all arguments to `multiprocessing.Process` are pickleable
diff --git a/tests/cli/test_agent.py b/tests/cli/test_agent.py
index d0d42981e2..4a283e6df9 100644
--- a/tests/cli/test_agent.py
+++ b/tests/cli/test_agent.py
@@ -59,7 +59,7 @@ def test_help(cmd):
(
"--base-url testurl --no-pull --show-flow-logs --volume volume1 "
"--volume volume2 --network testnetwork1 --network testnetwork2 "
- "--no-docker-interface"
+ "--no-docker-interface --docker-client-timeout 123"
),
{
"base_url": "testurl",
@@ -68,6 +68,7 @@ def test_help(cmd):
"no_pull": True,
"show_flow_logs": True,
"docker_interface": False,
+ "docker_client_timeout": 123,
},
),
(
| Docker Agent: Passing custom parameters when creating docker.APIClient
## Current behavior
prefect docker agent is using default parameters to create an instance of docker.APIClient. This makes it impossible to customize the docker client. Things such as to increase the timeout of docker client cannot be done.
https://github.com/PrefectHQ/prefect/blob/master/src/prefect/agent/docker/agent.py#L186
## Proposed behavior
We should have a way to pass custom parameters to when creating an instance of docker.APIClient.
Either as a dictionary or as a json config file
## Example
When the somehow get overloaded, docker daemon failed to create a container within 60 seconds, docker.APIClient will then return a timeout error similar to https://github.com/docker/compose/issues/3927. In my case increase the timeout from 60 to 120 seconds would mitigate this problem. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/agent/test_docker_agent.py::test_docker_agent_config_options_populated"
] | [
"tests/agent/test_docker_agent.py::test_docker_agent_init",
"tests/agent/test_docker_agent.py::test_docker_agent_config_options[osx-unix://var/run/docker.sock]",
"tests/agent/test_docker_agent.py::test_docker_agent_config_options[win32-npipe:////./pipe/docker_engine]",
"tests/agent/test_docker_agent.py::test_docker_agent_no_pull",
"tests/agent/test_docker_agent.py::test_docker_agent_ping",
"tests/agent/test_docker_agent.py::test_docker_agent_ping_exception",
"tests/agent/test_docker_agent.py::test_populate_env_vars_from_agent_config",
"tests/agent/test_docker_agent.py::test_populate_env_vars[backend0]",
"tests/agent/test_docker_agent.py::test_populate_env_vars[backend1]",
"tests/agent/test_docker_agent.py::test_populate_env_vars_includes_agent_labels",
"tests/agent/test_docker_agent.py::test_populate_env_vars_sets_log_to_cloud[True]",
"tests/agent/test_docker_agent.py::test_populate_env_vars_sets_log_to_cloud[False]",
"tests/agent/test_docker_agent.py::test_populate_env_vars_from_run_config",
"tests/agent/test_docker_agent.py::test_docker_agent_deploy_flow_unsupported_run_config",
"tests/agent/test_docker_agent.py::test_docker_agent_deploy_flow_storage_raises",
"tests/agent/test_docker_agent.py::test_docker_agent_deploy_flow_reg_allow_list_not_allowed",
"tests/agent/test_docker_agent.py::test_docker_agent_shutdown_terminates_child_processes",
"tests/agent/test_docker_agent.py::test_docker_agent_heartbeat_gocase",
"tests/agent/test_docker_agent.py::test_docker_agent_heartbeat_exits_on_failure",
"tests/agent/test_docker_agent.py::test_docker_agent_heartbeat_logs_reconnect",
"tests/agent/test_docker_agent.py::test_docker_agent_heartbeat_resets_fail_count",
"tests/agent/test_docker_agent.py::test_docker_agent_init_volume_empty_options",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_unix[name-True]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_unix[/some/path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_unix[./some/path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_unix[~/some/path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_unix[../some/path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_unix[",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_unix[\\n../some/path-True]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_win32[name-True]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_win32[C:\\\\\\\\some\\\\path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_win32[c:\\\\\\\\some\\\\path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_win32[\\\\\\\\some\\\\path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_is_named_volume_win32[\\\\\\\\\\\\some\\\\path-False]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_unix[candidate0-named_volumes0-container_mount_paths0-host_spec0]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_unix[candidate1-named_volumes1-container_mount_paths1-host_spec1]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_unix[candidate2-named_volumes2-container_mount_paths2-host_spec2]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_unix[candidate3-named_volumes3-container_mount_paths3-host_spec3]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_unix[candidate4-named_volumes4-container_mount_paths4-host_spec4]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_unix[candidate5-named_volumes5-container_mount_paths5-host_spec5]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_win[candidate0-named_volumes0-container_mount_paths0-host_spec0]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_win[candidate1-named_volumes1-container_mount_paths1-host_spec1]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_win[candidate2-named_volumes2-container_mount_paths2-host_spec2]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_win[candidate3-named_volumes3-container_mount_paths3-host_spec3]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_win[candidate4-named_volumes4-container_mount_paths4-host_spec4]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_raises_on_invalid_spec[some-name:/ctr/path:ro-ValueError]",
"tests/agent/test_docker_agent.py::test_docker_agent_parse_volume_spec_raises_on_invalid_spec[/some/path:/ctr/path:rw:something-else-ValueError]",
"tests/agent/test_docker_agent.py::test_docker_agent_start_max_polls[0]",
"tests/agent/test_docker_agent.py::test_docker_agent_start_max_polls[1]",
"tests/agent/test_docker_agent.py::test_docker_agent_start_max_polls[2]",
"tests/agent/test_docker_agent.py::test_docker_agent_network_network_and_networks",
"tests/agent/test_docker_agent.py::test_docker_agent_deploy_with_interface_check_linux",
"tests/agent/test_docker_agent.py::test_docker_agent_deploy_with_no_interface_check_linux"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-09T19:16:51Z" | apache-2.0 |
|
PrefectHQ__prefect-4287 | diff --git a/changes/pr4287.yaml b/changes/pr4287.yaml
new file mode 100644
index 0000000000..2f28d5641f
--- /dev/null
+++ b/changes/pr4287.yaml
@@ -0,0 +1,2 @@
+fix:
+ - "Fix bug where sometimes the global `prefect.context` wouldn't be respected during a flow run - [#4287](https://github.com/PrefectHQ/prefect/pull/4287)"
diff --git a/src/prefect/engine/flow_runner.py b/src/prefect/engine/flow_runner.py
index 5150b15fb2..e741366341 100644
--- a/src/prefect/engine/flow_runner.py
+++ b/src/prefect/engine/flow_runner.py
@@ -224,10 +224,13 @@ class FlowRunner(Runner):
self.logger.info("Beginning Flow run for '{}'".format(self.flow.name))
# make copies to avoid modifying user inputs
+ parameters = dict(parameters or {})
task_states = dict(task_states or {})
- context = dict(context or {})
task_contexts = dict(task_contexts or {})
- parameters = dict(parameters or {})
+ # Default to global context, with provided context as override
+ run_context = dict(prefect.context)
+ run_context.update(context or {})
+
if executor is None:
# Use the executor on the flow, if configured
executor = getattr(self.flow, "executor", None)
@@ -237,15 +240,15 @@ class FlowRunner(Runner):
self.logger.debug("Using executor type %s", type(executor).__name__)
try:
- state, task_states, context, task_contexts = self.initialize_run(
+ state, task_states, run_context, task_contexts = self.initialize_run(
state=state,
task_states=task_states,
- context=context,
+ context=run_context,
task_contexts=task_contexts,
parameters=parameters,
)
- with prefect.context(context):
+ with prefect.context(run_context):
state = self.check_flow_is_pending_or_running(state)
state = self.check_flow_reached_start_time(state)
state = self.set_flow_to_running(state)
@@ -266,7 +269,7 @@ class FlowRunner(Runner):
self.logger.exception(
"Unexpected error while running flow: {}".format(repr(exc))
)
- if prefect.context.get("raise_on_exception"):
+ if run_context.get("raise_on_exception"):
raise exc
new_state = Failed(
message="Unexpected error while running flow: {}".format(repr(exc)),
| PrefectHQ/prefect | 4f09895840ff0bc5187ae18ef493ad3021dab0cc | diff --git a/tests/engine/test_flow_runner.py b/tests/engine/test_flow_runner.py
index 47749cd250..c07cc95fbf 100644
--- a/tests/engine/test_flow_runner.py
+++ b/tests/engine/test_flow_runner.py
@@ -1323,18 +1323,28 @@ class TestContext:
output = res.result[return_ctx_key].result
assert isinstance(output, datetime.datetime)
- def test_user_provided_context_is_prioritized(self):
+ @pytest.mark.parametrize(
+ "outer_context, inner_context, sol",
+ [
+ ({"date": "outer"}, {"date": "inner"}, "inner"),
+ ({"date": "outer"}, {}, "outer"),
+ ],
+ )
+ def test_user_provided_context_is_prioritized(
+ self, outer_context, inner_context, sol
+ ):
@prefect.task
def return_ctx_key():
return prefect.context.get("date")
f = Flow(name="test", tasks=[return_ctx_key])
- res = f.run(context={"date": "42"})
+ with prefect.context(**outer_context):
+ res = f.run(context=inner_context)
assert res.is_successful()
output = res.result[return_ctx_key].result
- assert output == "42"
+ assert output == sol
@pytest.mark.parametrize(
| FlowRunner won't preserve all items from prefect.context
## Description
<!-- A clear description of the bug -->
FlowRunner does not preserve all items from prefect.context (e.g. dates: today, yesterday, etc.), because `initialize_run()` method prepares an empty context if not specified in `context` parameter. This could be okay, but contradicts to the [documentation](https://docs.prefect.io/core/concepts/execution.html#modifying-context-at-runtime), where it is suggested that for "backfills", dates could be overridded (e.g. today).
## Expected Behavior
<!-- What did you expect to happen instead? -->
Items already set in prefect.context should be respected.
## Reproduction
<!-- A minimal example that exhibits the behavior. -->
Both runs should output the same value "YESTERDAAAAAY" for `yesterday`, but the first outputs the actual date (2021-03-18 as of this writing).
```python
import prefect
from prefect import task, Flow
CTX = dict(test_variable="TEST_VALUE", yesterday="YESTERDAAAAAY")
@task(log_stdout=True)
def print_env():
print('TEST_VARIABLE: %s' % prefect.context.get('test_variable', 'MISSING'))
print('YESTERDAY: %s' % prefect.context.get('yesterday', 'MISSING'))
with Flow("Context override test") as flow:
print_env()
## Run 1
with prefect.context(**CTX):
flow.run()
## Run 2
flow.run(context=CTX)
```
## Environment
<!-- Any additional information about your environment
Optionally run `prefect diagnostics` from the command line and paste the information here. -->
```json
{
"config_overrides": {},
"env_vars": [],
"system_information": {
"platform": "Linux-4.18.0-240.10.1.el8_3.x86_64-x86_64-with-centos-8.3.2011",
"prefect_backend": "server",
"prefect_version": "0.14.10",
"python_version": "3.6.8"
}
}
```
## Suggested Resolution
A single line added to FlowRunner could solve that.
```diff
--- prefect/engine/flow_runner.py.orig 2021-02-16 16:08:28.204208267 +0100
+++ prefect/engine/flow_runner.py 2021-03-19 16:36:56.278923592 +0100
@@ -152,6 +152,7 @@
for param, value in (parameters or {}).items():
context_params[param] = value
+ context.update(prefect.context)
context.update(flow_name=self.flow.name)
context.setdefault("scheduled_start_time", pendulum.now("utc"))
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/engine/test_flow_runner.py::TestContext::test_user_provided_context_is_prioritized[outer_context1-inner_context1-outer]"
] | [
"tests/engine/test_flow_runner.py::test_flow_runner_has_logger",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_basic_flow_with_1_task",
"tests/engine/test_flow_runner.py::test_flow_runner_with_no_return_tasks",
"tests/engine/test_flow_runner.py::test_flow_runner_with_invalid_return_tasks",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_basic_flow_with_2_independent_tasks",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_basic_flow_with_2_dependent_tasks",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_base_task_class",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_basic_flow_with_2_dependent_tasks_and_first_task_fails",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_flow_with_2_dependent_tasks_and_first_task_fails_and_second_has_trigger",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_basic_flow_with_2_dependent_tasks_and_first_task_fails_with_FAIL",
"tests/engine/test_flow_runner.py::test_flow_runner_runs_basic_flow_with_2_dependent_tasks_and_second_task_fails",
"tests/engine/test_flow_runner.py::test_flow_runner_does_not_return_task_states_when_it_doesnt_run",
"tests/engine/test_flow_runner.py::test_flow_run_method_returns_task_states_even_if_it_doesnt_run",
"tests/engine/test_flow_runner.py::test_flow_runner_remains_running_if_tasks_are_retrying",
"tests/engine/test_flow_runner.py::test_secrets_dynamically_pull_from_context",
"tests/engine/test_flow_runner.py::test_secrets_are_rerun_on_restart",
"tests/engine/test_flow_runner.py::test_flow_runner_doesnt_return_by_default",
"tests/engine/test_flow_runner.py::test_flow_runner_does_return_tasks_when_requested",
"tests/engine/test_flow_runner.py::test_required_parameters_must_be_provided",
"tests/engine/test_flow_runner.py::test_parameters_are_placed_into_context",
"tests/engine/test_flow_runner.py::test_parameters_are_placed_into_context_including_defaults",
"tests/engine/test_flow_runner.py::test_parameters_are_placed_into_context_and_override_current_context",
"tests/engine/test_flow_runner.py::test_flow_run_state_determined_by_reference_tasks",
"tests/engine/test_flow_runner.py::test_flow_run_state_not_determined_by_reference_tasks_if_terminal_tasks_are_not_finished",
"tests/engine/test_flow_runner.py::test_flow_with_multiple_retry_tasks_doesnt_run_them_early",
"tests/engine/test_flow_runner.py::test_flow_runner_makes_copy_of_task_results_dict",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_pending_or_running_are_ok[state0]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_pending_or_running_are_ok[state1]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_pending_or_running_are_ok[state2]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_pending_or_running_are_ok[state3]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_not_pending_or_running_raise_endrun[state0]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_not_pending_or_running_raise_endrun[state1]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_not_pending_or_running_raise_endrun[state2]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_not_pending_or_running_raise_endrun[state3]",
"tests/engine/test_flow_runner.py::TestCheckFlowPendingOrRunning::test_not_pending_or_running_raise_endrun[state4]",
"tests/engine/test_flow_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state0]",
"tests/engine/test_flow_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state1]",
"tests/engine/test_flow_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state2]",
"tests/engine/test_flow_runner.py::TestCheckScheduledStep::test_non_scheduled_states[state3]",
"tests/engine/test_flow_runner.py::TestCheckScheduledStep::test_scheduled_states_without_start_time",
"tests/engine/test_flow_runner.py::TestCheckScheduledStep::test_scheduled_states_with_future_start_time",
"tests/engine/test_flow_runner.py::TestCheckScheduledStep::test_scheduled_states_with_past_start_time",
"tests/engine/test_flow_runner.py::TestSetFlowToRunning::test_pending_becomes_running[state0]",
"tests/engine/test_flow_runner.py::TestSetFlowToRunning::test_pending_becomes_running[state1]",
"tests/engine/test_flow_runner.py::TestSetFlowToRunning::test_running_stays_running",
"tests/engine/test_flow_runner.py::TestSetFlowToRunning::test_other_states_raise_endrun[state0]",
"tests/engine/test_flow_runner.py::TestSetFlowToRunning::test_other_states_raise_endrun[state1]",
"tests/engine/test_flow_runner.py::TestSetFlowToRunning::test_other_states_raise_endrun[state2]",
"tests/engine/test_flow_runner.py::TestSetFlowToRunning::test_other_states_raise_endrun[state3]",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_running_state_finishes",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_other_states_raise_endrun[state0]",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_other_states_raise_endrun[state1]",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_other_states_raise_endrun[state2]",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_other_states_raise_endrun[state3]",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_other_states_raise_endrun[state4]",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_other_states_raise_endrun[state5]",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_determine_final_state_has_final_say",
"tests/engine/test_flow_runner.py::TestRunFlowStep::test_determine_final_state_preserves_running_states_when_tasks_still_running",
"tests/engine/test_flow_runner.py::TestOutputCaching::test_providing_cachedstate_with_simple_example[local]",
"tests/engine/test_flow_runner.py::TestOutputCaching::test_providing_cachedstate_with_simple_example[sync]",
"tests/engine/test_flow_runner.py::TestOutputCaching::test_providing_cachedstate_with_simple_example[mproc]",
"tests/engine/test_flow_runner.py::TestOutputCaching::test_providing_cachedstate_with_simple_example[mthread]",
"tests/engine/test_flow_runner.py::TestCachingFromContext::test_caches_do_not_persist_across_flow_runner_runs",
"tests/engine/test_flow_runner.py::TestInitializeRun::test_initialize_sets_none_to_pending",
"tests/engine/test_flow_runner.py::TestInitializeRun::test_initialize_returns_state_if_provided[state0]",
"tests/engine/test_flow_runner.py::TestInitializeRun::test_initialize_returns_state_if_provided[state1]",
"tests/engine/test_flow_runner.py::TestInitializeRun::test_initialize_sets_task_contexts",
"tests/engine/test_flow_runner.py::TestInitializeRun::test_initialize_puts_parameters_in_context",
"tests/engine/test_flow_runner.py::TestInitializeRun::test_parameter_precedance",
"tests/engine/test_flow_runner.py::TestRunCount::test_run_count_updates_after_each_retry",
"tests/engine/test_flow_runner.py::TestRunCount::test_run_count_tracked_via_retry_states",
"tests/engine/test_flow_runner.py::test_flow_runner_uses_default_executor_on_flow_if_present",
"tests/engine/test_flow_runner.py::test_flow_runner_uses_user_provided_executor",
"tests/engine/test_flow_runner.py::test_flow_runner_properly_provides_context_to_task_runners[local]",
"tests/engine/test_flow_runner.py::test_flow_runner_properly_provides_context_to_task_runners[mproc]",
"tests/engine/test_flow_runner.py::test_flow_runner_properly_provides_context_to_task_runners[mthread]",
"tests/engine/test_flow_runner.py::test_flow_runner_properly_provides_context_to_task_runners[sync]",
"tests/engine/test_flow_runner.py::test_flow_runner_handles_timeouts[local]",
"tests/engine/test_flow_runner.py::test_flow_runner_handles_timeouts[mthread]",
"tests/engine/test_flow_runner.py::test_flow_runner_handles_timeouts[sync]",
"tests/engine/test_flow_runner.py::test_flow_runner_handles_timeout_error_with_mproc",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_flow_handlers_are_called",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_flow_handlers_are_called_even_when_initialize_run_fails",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_flow_handlers_can_return_none",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_flow_on_failure_is_not_called",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_task_on_failure_is_called",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_multiple_flow_handlers_are_called",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_multiple_flow_handlers_are_called_in_sequence",
"tests/engine/test_flow_runner.py::TestFlowStateHandlers::test_task_handler_that_doesnt_return_state_or_none",
"tests/engine/test_flow_runner.py::TestFlowRunnerStateHandlers::test_task_runner_handlers_are_called",
"tests/engine/test_flow_runner.py::TestFlowRunnerStateHandlers::test_multiple_task_runner_handlers_are_called",
"tests/engine/test_flow_runner.py::TestFlowRunnerStateHandlers::test_multiple_task_runner_handlers_are_called_in_sequence",
"tests/engine/test_flow_runner.py::TestFlowRunnerStateHandlers::test_task_runner_handler_that_doesnt_return_state_or_none",
"tests/engine/test_flow_runner.py::TestFlowRunnerStateHandlers::test_task_handler_that_raises_signal_is_trapped",
"tests/engine/test_flow_runner.py::TestFlowRunnerStateHandlers::test_task_handler_that_has_error_is_trapped",
"tests/engine/test_flow_runner.py::test_improper_use_of_unmapped_fails_gracefully",
"tests/engine/test_flow_runner.py::test_all_pipeline_method_steps_are_called",
"tests/engine/test_flow_runner.py::test_endrun_raised_in_initialize_is_caught_correctly",
"tests/engine/test_flow_runner.py::test_task_runner_cls_uses_default_function_if_none",
"tests/engine/test_flow_runner.py::test_flow_run_uses_default_flow_runner",
"tests/engine/test_flow_runner.py::test_parameters_can_be_set_in_context_if_none_passed",
"tests/engine/test_flow_runner.py::test_parameters_overwrite_context",
"tests/engine/test_flow_runner.py::test_parameters_overwrite_context_only_if_key_matches",
"tests/engine/test_flow_runner.py::TestMapping::test_terminal_mapped_states_are_used_for_flow_state[local]",
"tests/engine/test_flow_runner.py::TestMapping::test_terminal_mapped_states_are_used_for_flow_state[mthread]",
"tests/engine/test_flow_runner.py::TestMapping::test_terminal_mapped_states_are_used_for_flow_state[mproc]",
"tests/engine/test_flow_runner.py::TestMapping::test_terminal_mapped_states_are_used_for_flow_state[sync]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_existing_map_states_if_available[local]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_existing_map_states_if_available[mthread]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_existing_map_states_if_available[mproc]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_existing_map_states_if_available[sync]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_partial_existing_map_states_if_available[local]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_partial_existing_map_states_if_available[mthread]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_partial_existing_map_states_if_available[mproc]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_will_use_partial_existing_map_states_if_available[sync]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_tasks_dont_run_if_upstream_pending[local]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_tasks_dont_run_if_upstream_pending[mthread]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_tasks_dont_run_if_upstream_pending[mproc]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_tasks_dont_run_if_upstream_pending[sync]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled[local]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled[mthread]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled[mproc]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled[sync]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled_for_future[local]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled_for_future[mthread]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled_for_future[mproc]",
"tests/engine/test_flow_runner.py::TestMapping::test_mapped_task_can_be_scheduled_for_future[sync]",
"tests/engine/test_flow_runner.py::test_task_contexts_are_provided_to_tasks",
"tests/engine/test_flow_runner.py::test_paused_tasks_stay_paused_when_run",
"tests/engine/test_flow_runner.py::TestContext::test_flow_runner_passes_along_its_run_context_to_tasks",
"tests/engine/test_flow_runner.py::TestContext::test_flow_runner_provides_scheduled_start_time",
"tests/engine/test_flow_runner.py::TestContext::test_flow_runner_doesnt_override_scheduled_start_time_when_running_on_schedule[True]",
"tests/engine/test_flow_runner.py::TestContext::test_flow_runner_doesnt_override_scheduled_start_time_when_running_on_schedule[False]",
"tests/engine/test_flow_runner.py::TestContext::test_context_contains_nodash_date_formats[today_nodash]",
"tests/engine/test_flow_runner.py::TestContext::test_context_contains_nodash_date_formats[tomorrow_nodash]",
"tests/engine/test_flow_runner.py::TestContext::test_context_contains_nodash_date_formats[yesterday_nodash]",
"tests/engine/test_flow_runner.py::TestContext::test_context_contains_date_formats[today]",
"tests/engine/test_flow_runner.py::TestContext::test_context_contains_date_formats[tomorrow]",
"tests/engine/test_flow_runner.py::TestContext::test_context_contains_date_formats[yesterday]",
"tests/engine/test_flow_runner.py::TestContext::test_context_includes_date",
"tests/engine/test_flow_runner.py::TestContext::test_user_provided_context_is_prioritized[outer_context0-inner_context0-inner]",
"tests/engine/test_flow_runner.py::test_task_logs_survive_if_timeout_is_used[local]",
"tests/engine/test_flow_runner.py::test_task_logs_survive_if_timeout_is_used[sync]",
"tests/engine/test_flow_runner.py::test_task_logs_survive_if_timeout_is_used[mproc]",
"tests/engine/test_flow_runner.py::test_task_logs_survive_if_timeout_is_used[mthread]",
"tests/engine/test_flow_runner.py::test_constant_tasks_arent_submitted",
"tests/engine/test_flow_runner.py::test_constant_tasks_arent_submitted_when_mapped",
"tests/engine/test_flow_runner.py::test_dask_executor_with_flow_runner_sets_task_keys"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-23T22:15:05Z" | apache-2.0 |
|
PrefectHQ__prefect-5237 | diff --git a/changes/pr5237.yaml b/changes/pr5237.yaml
new file mode 100644
index 0000000000..aab6b48e00
--- /dev/null
+++ b/changes/pr5237.yaml
@@ -0,0 +1,5 @@
+enhancement:
+ - "Allow passing of proxies argument to slack_notifier - [#5237](https://github.com/PrefectHQ/prefect/pull/5237)"
+
+contributor:
+ - "[Vincent ChΓ©ry](https://github.com/VincentAntoine)"
diff --git a/docs/orchestration/flow-runs/inspection.md b/docs/orchestration/flow-runs/inspection.md
index 8d410cf50b..d42d3b2f8a 100644
--- a/docs/orchestration/flow-runs/inspection.md
+++ b/docs/orchestration/flow-runs/inspection.md
@@ -66,14 +66,28 @@ flow_run.state.message
### Getting flow run logs
-<!-- TODO after CLI merged -->
+Get a List of `FlowRunLog` from the flow run using `.get_logs()`:
+
+```python
+flow_run.get_logs()
+# [
+# FlowRunLog(timestamp=DateTime(1978, 03, 08, 22, 30, 00, 000000, tzinfo=Timezone('+00:00')), level=20, message='Submitted for execution: Task XXXXXXX'),
+# FlowRunLog(timestamp=DateTime(1978, 03, 08, 22, 30, 01, 123456, tzinfo=Timezone('+00:00')), level=20, message="Beginning Flow run for 'radio_show'"),
+# FlowRunLog(timestamp=DateTime(1978, 03, 08, 22, 30, 02, 234567, tzinfo=Timezone('+00:00')), level=20, message="Task 'series_one': Starting task run..."),
+# FlowRunLog(timestamp=DateTime(1978, 03, 08, 22, 42, 42, 424242, tzinfo=Timezone('+00:00')), level=20, message='It feels like I just had my brains smashed out by a slice of lemon wrapped round a large gold brick.'),
+# FlowRunLog(timestamp=DateTime(1978, 04, 12, 22, 59, 59, 987654, tzinfo=Timezone('+00:00')), level=20, message="Task 'series_one': Finished task run for task with final state: 'Success'"),
+# FlowRunLog(timestamp=DateTime(1978, 04, 12, 23, 00, 00, 000000, tzinfo=Timezone('+00:00')), level=20, message='Flow run SUCCESS: all reference tasks succeeded')
+# ]
+```
+
+Each `FlowRunLog` in the list contains a log message, along with the log level and timestamp.
### Getting flow metadata
Metadata about the flow that the flow run was created for is accessible using `.get_flow_metadata()`
```python
-flow_run.get_flow_metdata()
+flow_run.get_flow_metadata()
# FlowView(
# flow_id='8bdcf5b5-7598-49d1-a885-61612ca550de',
# name='hello-world',
diff --git a/src/prefect/utilities/notifications/notifications.py b/src/prefect/utilities/notifications/notifications.py
index d9d39bfeb7..99be913853 100644
--- a/src/prefect/utilities/notifications/notifications.py
+++ b/src/prefect/utilities/notifications/notifications.py
@@ -252,6 +252,7 @@ def slack_notifier(
only_states: list = None,
webhook_secret: str = None,
backend_info: bool = True,
+ proxies: dict = None,
) -> "prefect.engine.state.State":
"""
Slack state change handler; requires having the Prefect slack app installed. Works as a
@@ -274,6 +275,9 @@ def slack_notifier(
webhook URL; defaults to `"SLACK_WEBHOOK_URL"`
- backend_info (bool, optional): Whether to supply slack notification with urls
pointing to backend pages; defaults to True
+ - proxies (dict), optional): `dict` with "http" and/or "https" keys, passed to
+ `requests.post` - for situations where a proxy is required to send requests to the
+ Slack webhook
Returns:
- State: the `new_state` object that was provided
@@ -310,7 +314,7 @@ def slack_notifier(
import requests
form_data = slack_message_formatter(tracked_obj, new_state, backend_info)
- r = requests.post(webhook_url, json=form_data)
+ r = requests.post(webhook_url, json=form_data, proxies=proxies)
if not r.ok:
raise ValueError("Slack notification for {} failed".format(tracked_obj))
return new_state
| PrefectHQ/prefect | 80efb2c4f509ba6ee324c6c5c9b214a745eae6a4 | diff --git a/tests/utilities/notifications/test_notifications.py b/tests/utilities/notifications/test_notifications.py
index a2611f6e7b..83e65bff53 100644
--- a/tests/utilities/notifications/test_notifications.py
+++ b/tests/utilities/notifications/test_notifications.py
@@ -260,6 +260,16 @@ def test_slack_notifier_is_curried_and_uses_only_states(monkeypatch, state):
assert ok.called is isinstance(state, TriggerFailed)
+def test_slack_notifier_uses_proxies(monkeypatch):
+ post = MagicMock(ok=True)
+ monkeypatch.setattr(requests, "post", post)
+ state = Failed(message="1", result=0)
+ with set_temporary_config({"cloud.use_local_secrets": True}):
+ with prefect.context(secrets=dict(SLACK_WEBHOOK_URL="")):
+ slack_notifier(Task(), "", state, proxies={"http": "some.proxy.I.P"})
+ assert post.call_args[1]["proxies"] == {"http": "some.proxy.I.P"}
+
+
def test_gmail_notifier_sends_simple_email(monkeypatch):
smtp = MagicMock()
sendmail = MagicMock()
| Add proxies to slack_notifier
## Current behavior
`prefect.utilities.notifications.slack_notifier` sends requests to the Slack webhook using `requests.post` without allowing `proxies={"http": "0.123.456.789"}` to be passed, which is problematic when Internet access must be done through a proxy.
It is possible to go around this by setting HTTP_PROXY / HTTPS_PROXY environment variables, but this is not ideal / possible when running on private infrastructure which by default does not access the Internet and should not have these variables set.
## Proposed behavior
Add an optional `proxies` argument to `prefect.utilities.notifications.slack_notifier` which gets passed to `requests.post`.
## Example
To use Slack notifications from Prefect running on private infrastructure. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_uses_proxies"
] | [
"tests/utilities/notifications/test_notifications.py::test_callback_factory_generates_pickleable_objs",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Running]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Pending]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Finished]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Failed]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[TriggerFailed]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Cached]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Scheduled]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Retrying]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Success]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states[Skipped]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Running]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Pending]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Finished]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Failed]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[TriggerFailed]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Cached]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Scheduled]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Retrying]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Success]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_string_message[Skipped]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Running]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Pending]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Finished]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Failed]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[TriggerFailed]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Cached]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Scheduled]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Retrying]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Success]",
"tests/utilities/notifications/test_notifications.py::test_formatter_formats_states_with_exception_message[Skipped]",
"tests/utilities/notifications/test_notifications.py::test_every_state_gets_a_unique_color",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_returns_new_state_and_old_state_is_ignored",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_pulls_url_from_secret",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_ignores_ignore_states",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Running]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Pending]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Finished]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Failed]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[TriggerFailed]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Cached]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Scheduled]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Retrying]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Success]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_ignores_ignore_states[Skipped]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Running]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Pending]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Finished]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Failed]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[TriggerFailed]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Cached]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Scheduled]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Retrying]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Success]",
"tests/utilities/notifications/test_notifications.py::test_slack_notifier_is_curried_and_uses_only_states[Skipped]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_sends_simple_email",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Running]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Pending]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Finished]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Failed]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[TriggerFailed]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Cached]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Scheduled]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Retrying]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Success]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_is_curried_and_uses_only_states[Skipped]",
"tests/utilities/notifications/test_notifications.py::test_gmail_notifier_ignores_ignore_states"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-12-15T20:08:18Z" | apache-2.0 |
|
PrincetonUniversity__PsyNeuLink-897 | diff --git a/psyneulink/components/component.py b/psyneulink/components/component.py
index 6fe282529e..2a9f3a19bd 100644
--- a/psyneulink/components/component.py
+++ b/psyneulink/components/component.py
@@ -2649,9 +2649,8 @@ class Component(object):
kwargs_to_instantiate = function.ClassDefaults.values().copy()
if function_params is not None:
kwargs_to_instantiate.update(**function_params)
- # matrix is unexpected at this point
# default_variable should not be in any function_params but sometimes it is
- kwargs_to_remove = [MATRIX, 'default_variable']
+ kwargs_to_remove = ['default_variable']
for arg in kwargs_to_remove:
try:
@@ -2659,6 +2658,14 @@ class Component(object):
except KeyError:
pass
+ # matrix is determined from parameter state based on string value in function_params
+ # update it here if needed
+ if MATRIX in kwargs_to_instantiate:
+ try:
+ kwargs_to_instantiate[MATRIX] = self.parameter_states[MATRIX].instance_defaults.value
+ except (AttributeError, KeyError, TypeError):
+ pass
+
_, kwargs = prune_unused_args(function.__init__, args=[], kwargs=kwargs_to_instantiate)
self.function_object = function(default_variable=function_variable, **kwargs)
else:
diff --git a/psyneulink/components/functions/function.py b/psyneulink/components/functions/function.py
index a3822272a7..e83a9d60fd 100644
--- a/psyneulink/components/functions/function.py
+++ b/psyneulink/components/functions/function.py
@@ -4004,7 +4004,7 @@ class LinearMatrix(TransferFunction): # ---------------------------------------
@tc.typecheck
def __init__(self,
default_variable=None,
- matrix:tc.optional(is_matrix) = None,
+ matrix=None,
params=None,
owner=None,
prefs: is_pref_set = None):
| PrincetonUniversity/PsyNeuLink | c95f8b7f357d90bc5b5e65d2ef3c3932e1175cc5 | diff --git a/tests/projections/test_projections.py b/tests/projections/test_projections.py
new file mode 100644
index 0000000000..482a6ab589
--- /dev/null
+++ b/tests/projections/test_projections.py
@@ -0,0 +1,18 @@
+import numpy as np
+import psyneulink as pnl
+import pytest
+
+
[email protected](
+ 'projection_type, sender_variable, receiver_variable, projection_value, function_value',
+ [
+ (pnl.MappingProjection, [0, 0, 0], [0, 0], np.array([0, 0]), np.array([0, 0]))
+ ]
+)
+def test_value_shapes_with_matrix(projection_type, sender_variable, receiver_variable, projection_value, function_value):
+ A = pnl.TransferMechanism(default_variable=sender_variable)
+ B = pnl.TransferMechanism(default_variable=receiver_variable)
+ P = projection_type(sender=A, receiver=B)
+
+ assert P.instance_defaults.value.shape == projection_value.shape
+ assert P.function_object.instance_defaults.value.shape == function_value.shape
| Inconsistent shape of instance_defaults.value between MappingProjection and its function_object
```
>>> import psyneulink as pnl
>>> A = pnl.components.mechanisms.TransferMechanism(default_variable=[0,0,0])
>>> B = pnl.components.mechanisms.TransferMechanism(default_variable=[0,0])
>>> P = pnl.components.projections.MappingProjection(sender=A, reciever=B)
>>> print(P.instance_defaults.value)
[ 0. 0.]
>>> print(P.function_object.instance_defaults.value)
[ 0. 0. 0.]
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/projections/test_projections.py::test_value_shapes_with_matrix[MappingProjection-sender_variable0-receiver_variable0-projection_value0-function_value0]"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2018-08-09T22:32:59Z" | apache-2.0 |
|
Project-MONAI__MONAI-640 | diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst
index b02db767..1c8b8415 100644
--- a/docs/source/transforms.rst
+++ b/docs/source/transforms.rst
@@ -373,6 +373,12 @@ Vanilla Transforms
:members:
:special-members: __call__
+`LabelToContour`
+~~~~~~~~~~~~~~~~
+.. autoclass:: LabelToContour
+ :members:
+ :special-members: __call__
+
Dictionary-based Transforms
---------------------------
@@ -701,6 +707,12 @@ Dictionary-based Transforms
:members:
:special-members: __call__
+`LabelToContourd`
+~~~~~~~~~~~~~~~~~
+.. autoclass:: LabelToContourd
+ :members:
+ :special-members: __call__
+
`Lambdad`
~~~~~~~~~
.. autoclass:: Lambdad
diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py
index c50833e4..582fb3de 100644
--- a/monai/transforms/post/array.py
+++ b/monai/transforms/post/array.py
@@ -16,6 +16,7 @@ https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
from typing import Optional, Callable
import torch
+import torch.nn.functional as F
from monai.transforms.compose import Transform
from monai.networks.utils import one_hot
from monai.transforms.utils import get_largest_connected_component_mask
@@ -296,56 +297,44 @@ class KeepLargestConnectedComponent(Transform):
class LabelToContour(Transform):
"""
- Return the contour flag of objects in mask images that only compose of 0 and 1, with Laplace kernel
- set as default for edge detection.
+ Return the contour of binary input images that only compose of 0 and 1, with Laplace kernel
+ set as default for edge detection. Typical usage is to plot the edge of label or segmentation output.
Args:
- kernel_type: the method applied to do edge detection.
+ kernel_type: the method applied to do edge detection, default is "Laplace".
+
"""
- def __init__(self, kernel_type="Laplace"):
+ def __init__(self, kernel_type: str = "Laplace"):
+ if kernel_type != "Laplace":
+ raise NotImplementedError("currently, LabelToContour only supports Laplace kernel.")
self.kernel_type = kernel_type
- def __find_img_contour(self, img):
- channels = img.shape[1]
- conv = torch.nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False, groups=channels)
- kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32, device=img.device)
- kernel = kernel.repeat(channels, 1, 1, 1)
- conv.weight = torch.nn.Parameter(kernel, requires_grad=False)
-
- contour_img = conv(img)
- torch.clamp_(contour_img, min=0.0, max=1.0)
- return contour_img
-
def __call__(self, img):
"""
Args:
- img: torch tensor of the img that you want to find the contour of, with shape being
- (batch_size, channels, width, height[, depth])
+ img: torch tensor data to extract the contour, with shape: [batch_size, channels, height, width[, depth]]
Returns:
A torch tensor with the same shape as img, note:
- 1. It's the binary classification result of whether a pixel is edge or not.
- 2. In order to keep the original shape of mask image, we use padding as default.
- 3. The edge detection is just approximate due to
- a) defects inherent to Laplace kernel, ideally the edge should be thin enough, but now it has a thickness.
- b) need to search the optimal/better thresold for classification
- """
- if self.kernel_type != "Laplace":
- raise NotImplementedError
- if img.ndim != 4 and img.ndim != 5:
- raise RuntimeError("img.ndim should be 4 or 5")
- if img.ndim == 4:
- return self.__find_img_contour(img)
+ 1. it's the binary classification result of whether a pixel is edge or not.
+ 2. in order to keep the original shape of mask image, we use padding as default.
+ 3. the edge detection is just approximate because it defects inherent to Laplace kernel,
+ ideally the edge should be thin enough, but now it has a thickness.
+ """
channels = img.shape[1]
+ if img.ndim == 4:
+ kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32, device=img.device)
+ kernel = kernel.repeat(channels, 1, 1, 1)
+ contour_img = F.conv2d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
+ elif img.ndim == 5:
+ kernel = -1 * torch.ones(3, 3, 3, dtype=torch.float32, device=img.device)
+ kernel[1, 1, 1] = 26
+ kernel = kernel.repeat(channels, 1, 1, 1, 1)
+ contour_img = F.conv3d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
+ else:
+ raise RuntimeError("the dimensions of img should be 4 or 5.")
- conv = torch.nn.Conv3d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False, groups=channels)
- kernel = -1 * torch.ones(3, 3, 3, dtype=torch.float32, device=img.device)
- kernel[1, 1, 1] = 26
- kernel = kernel.repeat(channels, 1, 1, 1, 1)
- conv.weight = torch.nn.Parameter(kernel, requires_grad=False)
-
- contour_img = conv(img)
torch.clamp_(contour_img, min=0.0, max=1.0)
return contour_img
diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py
index dbc4a26a..53c3d3fa 100644
--- a/monai/transforms/post/dictionary.py
+++ b/monai/transforms/post/dictionary.py
@@ -20,7 +20,13 @@ from typing import Optional
from monai.config.type_definitions import KeysCollection
from monai.utils.misc import ensure_tuple_rep
from monai.transforms.compose import MapTransform
-from monai.transforms.post.array import SplitChannel, Activations, AsDiscrete, KeepLargestConnectedComponent
+from monai.transforms.post.array import (
+ SplitChannel,
+ Activations,
+ AsDiscrete,
+ KeepLargestConnectedComponent,
+ LabelToContour,
+)
class SplitChanneld(MapTransform):
@@ -145,7 +151,7 @@ class AsDiscreted(MapTransform):
class KeepLargestConnectedComponentd(MapTransform):
"""
- dictionary-based wrapper of :py:class:monai.transforms.utility.array.KeepLargestConnectedComponent.
+ dictionary-based wrapper of :py:class:monai.transforms.KeepLargestConnectedComponent.
"""
def __init__(
@@ -176,7 +182,30 @@ class KeepLargestConnectedComponentd(MapTransform):
def __call__(self, data):
d = dict(data)
- for idx, key in enumerate(self.keys):
+ for key in self.keys:
+ d[key] = self.converter(d[key])
+ return d
+
+
+class LabelToContourd(MapTransform):
+ """
+ dictionary-based wrapper of :py:class:monai.transforms.LabelToContour.
+ """
+
+ def __init__(self, keys: KeysCollection, kernel_type: str = "Laplace"):
+ """
+ Args:
+ keys: keys of the corresponding items to be transformed.
+ See also: :py:class:`monai.transforms.compose.MapTransform`
+ kernel_type: the method applied to do edge detection, default is "Laplace".
+
+ """
+ super().__init__(keys)
+ self.converter = LabelToContour(kernel_type=kernel_type)
+
+ def __call__(self, data):
+ d = dict(data)
+ for key in self.keys:
d[key] = self.converter(d[key])
return d
@@ -185,3 +214,4 @@ SplitChannelD = SplitChannelDict = SplitChanneld
ActivationsD = ActivationsDict = Activationsd
AsDiscreteD = AsDiscreteDict = AsDiscreted
KeepLargestConnectedComponentD = KeepLargestConnectedComponentDict = KeepLargestConnectedComponentd
+LabelToContourD = LabelToContourDict = LabelToContourd
| Project-MONAI/MONAI | 39f6df581d72562c43207ddf408195c10bf845e7 | diff --git a/tests/test_label_to_contour.py b/tests/test_label_to_contour.py
index e90dcc73..cd9bf88d 100644
--- a/tests/test_label_to_contour.py
+++ b/tests/test_label_to_contour.py
@@ -171,10 +171,6 @@ class TestContour(unittest.TestCase):
error_input = torch.rand(1, 2, 3, 4, 5, 6)
self.assertRaises(RuntimeError, LabelToContour(**input_param), error_input)
- # check invalid kernel type
- input_param["kernel_type"] = "Sobel"
- self.assertRaises(NotImplementedError, LabelToContour(**input_param), test_cube)
-
if __name__ == "__main__":
unittest.main()
diff --git a/tests/test_label_to_contourd.py b/tests/test_label_to_contourd.py
new file mode 100644
index 00000000..4e184c00
--- /dev/null
+++ b/tests/test_label_to_contourd.py
@@ -0,0 +1,176 @@
+# Copyright 2020 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import torch
+import numpy as np
+from monai.transforms import LabelToContourd
+
+expected_output_for_cube = np.array(
+ [
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ ]
+)
+
+
+def gen_fixed_cube():
+ scale, core_start, core_end = 8, 1, 7
+ cube = torch.zeros(scale, scale, scale)
+ cube[core_start:core_end, core_start:core_end, core_start:core_end] = torch.ones(
+ core_end - core_start, core_end - core_start, core_end - core_start
+ )
+ cube = torch.unsqueeze(cube, 0)
+
+ batch_size, channels = 10, 6
+ cube = cube.repeat(batch_size, channels, 1, 1, 1)
+ return cube, expected_output_for_cube
+
+
+def gen_fixed_img():
+ img = torch.tensor(
+ [
+ [0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1],
+ ],
+ dtype=torch.float32,
+ )
+ batch_size, channels = 10, 6
+ img = img.repeat(batch_size, channels, 1, 1)
+ expected_output_for_img = torch.tensor(
+ [
+ [0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 1, 0, 0, 1],
+ [0, 0, 1, 1, 0, 0, 1],
+ [0, 1, 1, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1, 1, 1],
+ ],
+ dtype=torch.float32,
+ )
+ return img, expected_output_for_img
+
+
+class TestContourd(unittest.TestCase):
+ def test_contour(self):
+ input_param = {"keys": "img", "kernel_type": "Laplace"}
+
+ # check 5-dim input data
+ test_cube, expected_output = gen_fixed_cube()
+ test_result_cube = LabelToContourd(**input_param)({"img": test_cube})
+ self.assertEqual(test_result_cube["img"].shape, test_cube.shape)
+
+ test_result_np = test_result_cube["img"].data.cpu().numpy()
+ batch_size, channels = test_cube.shape[0], test_cube.shape[1]
+ for batch in range(batch_size):
+ for channel in range(channels):
+ np.testing.assert_allclose(test_result_np[batch, channel, ...], expected_output)
+
+ # check 4-dim input data
+ test_img, expected_output = gen_fixed_img()
+ batch_size, channels = test_img.shape[0], test_img.shape[1]
+ test_result_img = LabelToContourd(**input_param)({"img": test_img})
+ self.assertEqual(test_result_img["img"].shape, test_img.shape)
+
+ test_result_np = test_result_img["img"].data.cpu().numpy()
+ for batch in range(batch_size):
+ for channel in range(channels):
+ np.testing.assert_allclose(test_result_img["img"][batch, channel, ...], expected_output)
+
+ # check invalid input data
+ error_input = {"img": torch.rand(1, 2, 3)}
+ self.assertRaises(RuntimeError, LabelToContourd(**input_param), error_input)
+ error_input = {"img": torch.rand(1, 2, 3, 4, 5, 6)}
+ self.assertRaises(RuntimeError, LabelToContourd(**input_param), error_input)
+
+
+if __name__ == "__main__":
+ unittest.main()
| add dictionary level LabelToContourd transform
**Is your feature request related to a problem? Please describe.**
As we already have array level LabelToContour transform, need to update the document and add dictionary level version.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_label_to_contour.py::TestContour::test_contour",
"tests/test_label_to_contourd.py::TestContourd::test_contour"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-06-28T23:34:41Z" | apache-2.0 |
|
ProjectQ-Framework__ProjectQ-53 | diff --git a/projectq/cengines/_replacer/__init__.py b/projectq/cengines/_replacer/__init__.py
index e55b34f..b35d31e 100755
--- a/projectq/cengines/_replacer/__init__.py
+++ b/projectq/cengines/_replacer/__init__.py
@@ -15,4 +15,3 @@ from ._decomposition_rule_set import DecompositionRuleSet
from ._replacer import (AutoReplacer,
InstructionFilter,
NoGateDecompositionError)
-
diff --git a/projectq/ops/_basics.py b/projectq/ops/_basics.py
index bf0c225..8a37f4f 100755
--- a/projectq/ops/_basics.py
+++ b/projectq/ops/_basics.py
@@ -416,7 +416,8 @@ class BasicMathGate(BasicGate):
gate, given the input to the gate (a tuple of quantum registers).
Args:
- qubits (tuple<Qureg>): Qubits to which the math gate is being applied.
+ qubits (tuple<Qureg>): Qubits to which the math gate is being
+ applied.
Returns:
math_fun (function): Python function describing the action of this
diff --git a/projectq/ops/_metagates.py b/projectq/ops/_metagates.py
index b3841c7..975483a 100755
--- a/projectq/ops/_metagates.py
+++ b/projectq/ops/_metagates.py
@@ -203,9 +203,9 @@ class ControlledGate(BasicGate):
raise ControlQubitError("Wrong number of control qubits. "
"First qureg(s) need to contain exactly "
"the required number of control qubits.")
- cmd = BasicGate.generate_command(self._gate, tuple(gate_quregs))
- cmd.add_control_qubits(ctrl)
- apply_command(cmd)
+ import projectq.meta
+ with projectq.meta.Control(gate_quregs[0][0].engine, ctrl):
+ self._gate | tuple(gate_quregs)
def __eq__(self, other):
""" Compare two ControlledGate objects (return True if equal). """
| ProjectQ-Framework/ProjectQ | 7c1f33ed06a2fdc53b96311c08f068fd40591fc1 | diff --git a/projectq/cengines/_replacer/_replacer_test.py b/projectq/cengines/_replacer/_replacer_test.py
index cd9266d..43bb487 100755
--- a/projectq/cengines/_replacer/_replacer_test.py
+++ b/projectq/cengines/_replacer/_replacer_test.py
@@ -15,7 +15,9 @@
import pytest
from projectq import MainEngine
-from projectq.cengines import DummyEngine, DecompositionRuleSet, DecompositionRule
+from projectq.cengines import (DummyEngine,
+ DecompositionRuleSet,
+ DecompositionRule)
from projectq.ops import H, X, Command, S, Rx, NotInvertible, Ry, BasicGate
from projectq.cengines._replacer import _replacer
@@ -59,14 +61,16 @@ def make_decomposition_rule_set():
return True
result.add_decomposition_rule(
- DecompositionRule(TestGate.__class__, decompose_test1, recognize_test))
+ DecompositionRule(TestGate.__class__, decompose_test1,
+ recognize_test))
def decompose_test2(cmd):
qb = cmd.qubits
H | qb
result.add_decomposition_rule(
- DecompositionRule(TestGate.__class__, decompose_test2, recognize_test))
+ DecompositionRule(TestGate.__class__, decompose_test2,
+ recognize_test))
assert len(result.decompositions[TestGate.__class__.__name__]) == 2
return result
@@ -141,22 +145,28 @@ def test_auto_replacer_use_inverse_decomposition():
# Create test gate and inverse
class NoMagicGate(BasicGate):
pass
+
class MagicGate(BasicGate):
def get_inverse(self):
return NoMagicGate()
+
def decompose_no_magic_gate(cmd):
qb = cmd.qubits
Rx(0.6) | qb
H | qb
+
def recognize_no_magic_gate(cmd):
return True
+
rule_set.add_decomposition_rule(DecompositionRule(NoMagicGate,
decompose_no_magic_gate,
recognize_no_magic_gate))
+
def magic_filter(self, cmd):
if cmd.gate == MagicGate():
return False
return True
+
backend = DummyEngine(save_commands=True)
eng = MainEngine(backend=backend,
engine_list=[_replacer.AutoReplacer(rule_set),
@@ -181,7 +191,7 @@ def test_auto_replacer_adds_tags(fixture_gate_filter):
assert len(rule_set.decompositions[TestGate.__class__.__name__]) == 2
assert len(backend.received_commands) == 0
qb = eng.allocate_qubit()
- cmd = Command(eng, TestGate, (qb,) )
+ cmd = Command(eng, TestGate, (qb,))
cmd.tags = ["AddedTag"]
eng.send([cmd])
eng.flush()
diff --git a/projectq/ops/_metagates_test.py b/projectq/ops/_metagates_test.py
index d8defc4..8fa94fa 100755
--- a/projectq/ops/_metagates_test.py
+++ b/projectq/ops/_metagates_test.py
@@ -21,12 +21,27 @@ from projectq.types import Qubit, Qureg
from projectq import MainEngine
from projectq.cengines import DummyEngine
from projectq.ops import (T, Y, NotInvertible, Entangle, Rx,
- FastForwardingGate, Command,
- ClassicalInstructionGate)
+ FastForwardingGate, Command, C,
+ ClassicalInstructionGate, All)
from projectq.ops import _metagates
+def test_tensored_controlled_gate():
+ saving_backend = DummyEngine(save_commands=True)
+ main_engine = MainEngine(backend=saving_backend,
+ engine_list=[DummyEngine()])
+ gate = Rx(0.6)
+ qubit0 = Qubit(main_engine, 0)
+ qubit1 = Qubit(main_engine, 1)
+ qubit2 = Qubit(main_engine, 2)
+ target_qubits = [qubit1, qubit2]
+ C(All(gate)) | (qubit0, target_qubits)
+
+ assert saving_backend.received_commands[-1].gate == gate
+ assert len(saving_backend.received_commands[-1].control_qubits) == 1
+
+
def test_daggered_gate_init():
# Choose gate which does not have an inverse gate:
not_invertible_gate = T
| Controlled tensors work inconsistently w.r.t. non-controlled tensors
When you apply:
All(X) | targets
The engine receives X commands via the __or__ implementation in Tensor.
But if you apply:
C(All(X)) | (control, targets)
Then then the __or__ operation is bypassed and the engine receives a Tensor command instead. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"projectq/ops/_metagates_test.py::test_tensored_controlled_gate"
] | [
"projectq/cengines/_replacer/_replacer_test.py::test_filter_engine",
"projectq/cengines/_replacer/_replacer_test.py::test_auto_replacer_default_chooser",
"projectq/cengines/_replacer/_replacer_test.py::test_auto_replacer_decomposition_chooser",
"projectq/cengines/_replacer/_replacer_test.py::test_auto_replacer_no_rule_found",
"projectq/cengines/_replacer/_replacer_test.py::test_auto_replacer_use_inverse_decomposition",
"projectq/cengines/_replacer/_replacer_test.py::test_auto_replacer_adds_tags",
"projectq/ops/_metagates_test.py::test_daggered_gate_init",
"projectq/ops/_metagates_test.py::test_daggered_gate_str",
"projectq/ops/_metagates_test.py::test_daggered_gate_get_inverse",
"projectq/ops/_metagates_test.py::test_daggered_gate_comparison",
"projectq/ops/_metagates_test.py::test_get_inverse",
"projectq/ops/_metagates_test.py::test_controlled_gate_init",
"projectq/ops/_metagates_test.py::test_controlled_gate_str",
"projectq/ops/_metagates_test.py::test_controlled_gate_get_inverse",
"projectq/ops/_metagates_test.py::test_controlled_gate_or",
"projectq/ops/_metagates_test.py::test_controlled_gate_comparison",
"projectq/ops/_metagates_test.py::test_c",
"projectq/ops/_metagates_test.py::test_tensor_init",
"projectq/ops/_metagates_test.py::test_tensor_str",
"projectq/ops/_metagates_test.py::test_tensor_get_inverse",
"projectq/ops/_metagates_test.py::test_tensor_comparison",
"projectq/ops/_metagates_test.py::test_tensor_or"
] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2017-04-18T07:51:47Z" | apache-2.0 |
|
PyCQA__flake8-1320 | diff --git a/src/flake8/checker.py b/src/flake8/checker.py
index bfd3f4d..7130df3 100644
--- a/src/flake8/checker.py
+++ b/src/flake8/checker.py
@@ -424,13 +424,23 @@ class FileChecker:
)
@staticmethod
- def _extract_syntax_information(exception):
- token = ()
- if len(exception.args) > 1:
+ def _extract_syntax_information(exception: Exception) -> Tuple[int, int]:
+ if (
+ len(exception.args) > 1
+ and exception.args[1]
+ and len(exception.args[1]) > 2
+ ):
token = exception.args[1]
- if token and len(token) > 2:
- row, column = token[1:3]
+ row, column = token[1:3]
+ elif (
+ isinstance(exception, tokenize.TokenError)
+ and len(exception.args) == 2
+ and len(exception.args[1]) == 2
+ ):
+ token = ()
+ row, column = exception.args[1]
else:
+ token = ()
row, column = (1, 0)
if column > 0 and token and isinstance(exception, SyntaxError):
@@ -463,14 +473,7 @@ class FileChecker:
def run_ast_checks(self) -> None:
"""Run all checks expecting an abstract syntax tree."""
assert self.processor is not None
- try:
- ast = self.processor.build_ast()
- except (ValueError, SyntaxError, TypeError) as e:
- row, column = self._extract_syntax_information(e)
- self.report(
- "E999", row, column, f"{type(e).__name__}: {e.args[0]}"
- )
- return
+ ast = self.processor.build_ast()
for plugin in self.checks["ast_plugins"]:
checker = self.run_check(plugin, tree=ast)
@@ -548,7 +551,6 @@ class FileChecker:
def process_tokens(self):
"""Process tokens and trigger checks.
- This can raise a :class:`flake8.exceptions.InvalidSyntax` exception.
Instead of using this directly, you should use
:meth:`flake8.checker.FileChecker.run_checks`.
"""
@@ -578,15 +580,13 @@ class FileChecker:
"""Run checks against the file."""
assert self.processor is not None
try:
- self.process_tokens()
self.run_ast_checks()
- except exceptions.InvalidSyntax as exc:
- self.report(
- exc.error_code,
- exc.line_number,
- exc.column_number,
- exc.error_message,
- )
+ self.process_tokens()
+ except (SyntaxError, tokenize.TokenError) as e:
+ code = "E902" if isinstance(e, tokenize.TokenError) else "E999"
+ row, column = self._extract_syntax_information(e)
+ self.report(code, row, column, f"{type(e).__name__}: {e.args[0]}")
+ return
logical_lines = self.processor.statistics["logical lines"]
self.statistics["logical lines"] = logical_lines
diff --git a/src/flake8/exceptions.py b/src/flake8/exceptions.py
index 4b0ddd1..45db94d 100644
--- a/src/flake8/exceptions.py
+++ b/src/flake8/exceptions.py
@@ -33,23 +33,6 @@ class FailedToLoadPlugin(Flake8Exception):
}
-class InvalidSyntax(Flake8Exception):
- """Exception raised when tokenizing a file fails."""
-
- def __init__(self, exception: Exception) -> None:
- """Initialize our InvalidSyntax exception."""
- self.original_exception = exception
- self.error_message = f"{type(exception).__name__}: {exception.args[0]}"
- self.error_code = "E902"
- self.line_number = 1
- self.column_number = 0
- super().__init__(exception)
-
- def __str__(self) -> str:
- """Format our exception message."""
- return self.error_message
-
-
class PluginRequestedUnknownParameters(Flake8Exception):
"""The plugin requested unknown parameters."""
diff --git a/src/flake8/processor.py b/src/flake8/processor.py
index 86709c1..fdc47c6 100644
--- a/src/flake8/processor.py
+++ b/src/flake8/processor.py
@@ -13,7 +13,6 @@ from typing import Tuple
import flake8
from flake8 import defaults
-from flake8 import exceptions
from flake8 import utils
LOG = logging.getLogger(__name__)
@@ -125,20 +124,12 @@ class FileProcessor:
@property
def file_tokens(self) -> List[_Token]:
- """Return the complete set of tokens for a file.
-
- Accessing this attribute *may* raise an InvalidSyntax exception.
-
- :raises: flake8.exceptions.InvalidSyntax
- """
+ """Return the complete set of tokens for a file."""
if self._file_tokens is None:
line_iter = iter(self.lines)
- try:
- self._file_tokens = list(
- tokenize.generate_tokens(lambda: next(line_iter))
- )
- except (tokenize.TokenError, SyntaxError) as exc:
- raise exceptions.InvalidSyntax(exception=exc)
+ self._file_tokens = list(
+ tokenize.generate_tokens(lambda: next(line_iter))
+ )
return self._file_tokens
@@ -274,20 +265,12 @@ class FileProcessor:
return arguments
def generate_tokens(self) -> Generator[_Token, None, None]:
- """Tokenize the file and yield the tokens.
-
- :raises flake8.exceptions.InvalidSyntax:
- If a :class:`tokenize.TokenError` is raised while generating
- tokens.
- """
- try:
- for token in tokenize.generate_tokens(self.next_line):
- if token[2][0] > self.total_lines:
- break
- self.tokens.append(token)
- yield token
- except (tokenize.TokenError, SyntaxError) as exc:
- raise exceptions.InvalidSyntax(exception=exc)
+ """Tokenize the file and yield the tokens."""
+ for token in tokenize.generate_tokens(self.next_line):
+ if token[2][0] > self.total_lines:
+ break
+ self.tokens.append(token)
+ yield token
def _noqa_line_range(self, min_line: int, max_line: int) -> Dict[int, str]:
line_range = range(min_line, max_line + 1)
@@ -299,7 +282,7 @@ class FileProcessor:
if self._noqa_line_mapping is None:
try:
file_tokens = self.file_tokens
- except exceptions.InvalidSyntax:
+ except (tokenize.TokenError, SyntaxError):
# if we failed to parse the file tokens, we'll always fail in
# the future, so set this so the code does not try again
self._noqa_line_mapping = {}
| PyCQA/flake8 | 645cd71f571da1cdc42683cf4228b537ddc2685f | diff --git a/tests/integration/test_main.py b/tests/integration/test_main.py
index 45fe9de..5c99d3c 100644
--- a/tests/integration/test_main.py
+++ b/tests/integration/test_main.py
@@ -1,6 +1,7 @@
"""Integration tests for the main entrypoint of flake8."""
import json
import os
+import sys
from unittest import mock
import pytest
@@ -186,8 +187,15 @@ def test_tokenization_error_but_not_syntax_error(tmpdir, capsys):
tmpdir.join("t.py").write("b'foo' \\\n")
_call_main(["t.py"], retv=1)
+ if hasattr(sys, "pypy_version_info"): # pragma: no cover (pypy)
+ expected = "t.py:2:1: E999 SyntaxError: end of file (EOF) in multi-line statement\n" # noqa: E501
+ elif sys.version_info < (3, 8): # pragma: no cover (<cp38)
+ expected = "t.py:2:1: E902 TokenError: EOF in multi-line statement\n"
+ else: # pragma: no cover (cp38+)
+ expected = "t.py:1:8: E999 SyntaxError: unexpected EOF while parsing\n"
+
out, err = capsys.readouterr()
- assert out == "t.py:1:1: E902 TokenError: EOF in multi-line statement\n"
+ assert out == expected
assert err == ""
@@ -197,8 +205,12 @@ def test_tokenization_error_is_a_syntax_error(tmpdir, capsys):
tmpdir.join("t.py").write("if True:\n pass\n pass\n")
_call_main(["t.py"], retv=1)
+ if hasattr(sys, "pypy_version_info"): # pragma: no cover (pypy)
+ expected = "t.py:3:2: E999 IndentationError: unindent does not match any outer indentation level\n" # noqa: E501
+ else: # pragma: no cover (cpython)
+ expected = "t.py:3:5: E999 IndentationError: unindent does not match any outer indentation level\n" # noqa: E501
+
out, err = capsys.readouterr()
- expected = "t.py:1:1: E902 IndentationError: unindent does not match any outer indentation level\n" # noqa: E501
assert out == expected
assert err == ""
diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py
index e9be495..6be1ebd 100644
--- a/tests/unit/test_exceptions.py
+++ b/tests/unit/test_exceptions.py
@@ -13,7 +13,6 @@ from flake8 import exceptions
plugin_name="plugin_name",
exception=ValueError("boom!"),
),
- exceptions.InvalidSyntax(exception=ValueError("Unexpected token: $")),
exceptions.PluginRequestedUnknownParameters(
plugin={"plugin_name": "plugin_name"},
exception=ValueError("boom!"),
diff --git a/tests/unit/test_file_checker.py b/tests/unit/test_file_checker.py
index f433ea6..bcc8b32 100644
--- a/tests/unit/test_file_checker.py
+++ b/tests/unit/test_file_checker.py
@@ -7,30 +7,6 @@ import flake8
from flake8 import checker
[email protected]("flake8.processor.FileProcessor")
-def test_run_ast_checks_handles_SyntaxErrors(FileProcessor): # noqa: N802,N803
- """Stress our SyntaxError handling.
-
- Related to: https://github.com/pycqa/flake8/issues/169
- """
- processor = mock.Mock(lines=[])
- FileProcessor.return_value = processor
- processor.build_ast.side_effect = SyntaxError(
- "Failed to build ast", ("", 1, 5, "foo(\n")
- )
- file_checker = checker.FileChecker(__file__, checks={}, options=object())
-
- with mock.patch.object(file_checker, "report") as report:
- file_checker.run_ast_checks()
-
- report.assert_called_once_with(
- "E999",
- 1,
- 3,
- "SyntaxError: Failed to build ast",
- )
-
-
@mock.patch("flake8.checker.FileChecker._make_processor", return_value=None)
def test_repr(*args):
"""Verify we generate a correct repr."""
| Wrong location reported for syntax errors
In GitLab by @scascketta on May 21, 2020, 14:14
*Please describe how you installed Flake8*
```
$ python3.8 -m venv .venv
$ source .venv/bin/activate
(.venv) $ pip install flake8
```
*Please provide the exact, unmodified output of `flake8 --bug-report`*
```
{
"dependencies": [],
"platform": {
"python_implementation": "CPython",
"python_version": "3.8.1",
"system": "Darwin"
},
"plugins": [
{
"is_local": false,
"plugin": "mccabe",
"version": "0.6.1"
},
{
"is_local": false,
"plugin": "pycodestyle",
"version": "2.6.0"
},
{
"is_local": false,
"plugin": "pyflakes",
"version": "2.2.0"
}
],
"version": "3.8.1"
}
```
*Please describe the problem or feature*
Running flake8 on a Python module with a syntax error produces the E902 TokenError with the wrong line and column reported.
*If this is a bug report, please explain with examples (and example code) what you expected to happen and what actually happened.*
The contents of the test file with invalid syntax:
```
(.venv) $ cat blah.py
print("hello, world")
)
```
Python reports the correct location of the syntax error:
```
(.venv) $ python3.8 blah.py
File "blah.py", line 2
)
^
SyntaxError: unmatched ')'
```
Running flake8 on it reports the wrong line and column of the syntax error:
```
(.venv) $ flake8 blah.py
blah.py:1:1: E902 TokenError: EOF in multi-line statement
```
Thank you for reading! | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/integration/test_main.py::test_tokenization_error_but_not_syntax_error",
"tests/integration/test_main.py::test_tokenization_error_is_a_syntax_error"
] | [
"tests/integration/test_main.py::test_diff_option",
"tests/integration/test_main.py::test_form_feed_line_split",
"tests/integration/test_main.py::test_e101_indent_char_does_not_reset",
"tests/integration/test_main.py::test_statistics_option",
"tests/integration/test_main.py::test_show_source_option",
"tests/integration/test_main.py::test_extend_exclude",
"tests/integration/test_main.py::test_malformed_per_file_ignores_error",
"tests/integration/test_main.py::test_bug_report_successful",
"tests/integration/test_main.py::test_specific_noqa_does_not_clobber_pycodestyle_noqa",
"tests/integration/test_main.py::test_specific_noqa_on_line_with_continuation",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline_trailing_ws",
"tests/integration/test_main.py::test_obtaining_args_from_sys_argv_when_not_explicity_provided",
"tests/integration/test_main.py::test_cli_config_option_respected",
"tests/integration/test_main.py::test_cli_isolated_overrides_config_option",
"tests/integration/test_main.py::test_file_not_found",
"tests/integration/test_main.py::test_output_file",
"tests/unit/test_exceptions.py::test_pickleable[err0]",
"tests/unit/test_exceptions.py::test_pickleable[err1]",
"tests/unit/test_exceptions.py::test_pickleable[err2]",
"tests/unit/test_file_checker.py::test_repr",
"tests/unit/test_file_checker.py::test_nonexistent_file",
"tests/unit/test_file_checker.py::test_raises_exception_on_failed_plugin"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-04-18T17:05:57Z" | mit |
|
PyCQA__flake8-1440 | diff --git a/docs/source/user/options.rst b/docs/source/user/options.rst
index faeb17e..9cdb0ee 100644
--- a/docs/source/user/options.rst
+++ b/docs/source/user/options.rst
@@ -40,6 +40,8 @@ Index of Options
- :option:`flake8 --quiet`
+- :option:`flake8 --color`
+
- :option:`flake8 --count`
- :option:`flake8 --diff`
@@ -181,6 +183,35 @@ Options and their Descriptions
quiet = 1
+.. option:: --color
+
+ :ref:`Go back to index <top>`
+
+ Whether to use color in output. Defaults to ``auto``.
+
+ Possible options are ``auto``, ``always``, and ``never``.
+
+ This **can** be specified in config files.
+
+ When color is enabled, the following substitutions are enabled:
+
+ - ``%(bold)s``
+ - ``%(black)s``
+ - ``%(red)s``
+ - ``%(green)s``
+ - ``%(yellow)s``
+ - ``%(blue)s``
+ - ``%(magenta)s``
+ - ``%(cyan)s``
+ - ``%(white)s``
+ - ``%(reset)s``
+
+ Example config file usage:
+
+ .. code-block:: ini
+
+ color = never
+
.. option:: --count
diff --git a/src/flake8/formatting/_windows_color.py b/src/flake8/formatting/_windows_color.py
new file mode 100644
index 0000000..1d2c73f
--- /dev/null
+++ b/src/flake8/formatting/_windows_color.py
@@ -0,0 +1,59 @@
+"""ctypes hackery to enable color processing on windows.
+
+See: https://github.com/pre-commit/pre-commit/blob/cb40e96/pre_commit/color.py
+"""
+import sys
+
+if sys.platform == "win32": # pragma: no cover (windows)
+
+ def _enable() -> None:
+ from ctypes import POINTER
+ from ctypes import windll
+ from ctypes import WinError
+ from ctypes import WINFUNCTYPE
+ from ctypes.wintypes import BOOL
+ from ctypes.wintypes import DWORD
+ from ctypes.wintypes import HANDLE
+
+ STD_ERROR_HANDLE = -12
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
+
+ def bool_errcheck(result, func, args):
+ if not result:
+ raise WinError()
+ return args
+
+ GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(
+ ("GetStdHandle", windll.kernel32),
+ ((1, "nStdHandle"),),
+ )
+
+ GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(
+ ("GetConsoleMode", windll.kernel32),
+ ((1, "hConsoleHandle"), (2, "lpMode")),
+ )
+ GetConsoleMode.errcheck = bool_errcheck
+
+ SetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, DWORD)(
+ ("SetConsoleMode", windll.kernel32),
+ ((1, "hConsoleHandle"), (1, "dwMode")),
+ )
+ SetConsoleMode.errcheck = bool_errcheck
+
+ # As of Windows 10, the Windows console supports (some) ANSI escape
+ # sequences, but it needs to be enabled using `SetConsoleMode` first.
+ #
+ # More info on the escape sequences supported:
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx
+ stderr = GetStdHandle(STD_ERROR_HANDLE)
+ flags = GetConsoleMode(stderr)
+ SetConsoleMode(stderr, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+
+ try:
+ _enable()
+ except OSError:
+ terminal_supports_color = False
+ else:
+ terminal_supports_color = True
+else: # pragma: win32 no cover
+ terminal_supports_color = True
diff --git a/src/flake8/formatting/base.py b/src/flake8/formatting/base.py
index 7919f92..a17cb44 100644
--- a/src/flake8/formatting/base.py
+++ b/src/flake8/formatting/base.py
@@ -8,6 +8,8 @@ from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
+from flake8.formatting import _windows_color
+
if TYPE_CHECKING:
from flake8.statistics import Statistics
from flake8.style_guide import Violation
@@ -51,6 +53,11 @@ class BaseFormatter:
self.filename = options.output_file
self.output_fd: Optional[IO[str]] = None
self.newline = "\n"
+ self.color = options.color == "always" or (
+ options.color == "auto"
+ and sys.stdout.isatty()
+ and _windows_color.terminal_supports_color
+ )
self.after_init()
def after_init(self) -> None:
diff --git a/src/flake8/formatting/default.py b/src/flake8/formatting/default.py
index 0a8e09d..7c8073e 100644
--- a/src/flake8/formatting/default.py
+++ b/src/flake8/formatting/default.py
@@ -8,6 +8,20 @@ from flake8.formatting import base
if TYPE_CHECKING:
from flake8.style_guide import Violation
+COLORS = {
+ "bold": "\033[1m",
+ "black": "\033[30m",
+ "red": "\033[31m",
+ "green": "\033[32m",
+ "yellow": "\033[33m",
+ "blue": "\033[34m",
+ "magenta": "\033[35m",
+ "cyan": "\033[36m",
+ "white": "\033[37m",
+ "reset": "\033[m",
+}
+COLORS_OFF = {k: "" for k in COLORS}
+
class SimpleFormatter(base.BaseFormatter):
"""Simple abstraction for Default and Pylint formatter commonality.
@@ -39,6 +53,7 @@ class SimpleFormatter(base.BaseFormatter):
"path": error.filename,
"row": error.line_number,
"col": error.column_number,
+ **(COLORS if self.color else COLORS_OFF),
}
@@ -49,7 +64,11 @@ class Default(SimpleFormatter):
format string.
"""
- error_format = "%(path)s:%(row)d:%(col)d: %(code)s %(text)s"
+ error_format = (
+ "%(bold)s%(path)s%(reset)s"
+ "%(cyan)s:%(reset)s%(row)d%(cyan)s:%(reset)s%(col)d%(cyan)s:%(reset)s "
+ "%(bold)s%(red)s%(code)s%(reset)s %(text)s"
+ )
def after_init(self) -> None:
"""Check for a custom format string."""
diff --git a/src/flake8/main/options.py b/src/flake8/main/options.py
index c35dbc6..2a214a7 100644
--- a/src/flake8/main/options.py
+++ b/src/flake8/main/options.py
@@ -91,6 +91,7 @@ def register_default_options(option_manager):
The default options include:
- ``-q``/``--quiet``
+ - ``--color``
- ``--count``
- ``--diff``
- ``--exclude``
@@ -118,7 +119,6 @@ def register_default_options(option_manager):
"""
add_option = option_manager.add_option
- # pep8 options
add_option(
"-q",
"--quiet",
@@ -128,6 +128,13 @@ def register_default_options(option_manager):
help="Report only file names, or nothing. This option is repeatable.",
)
+ add_option(
+ "--color",
+ choices=("auto", "always", "never"),
+ default="auto",
+ help="Whether to use color in output. Defaults to `%(default)s`.",
+ )
+
add_option(
"--count",
action="store_true",
diff --git a/src/flake8/plugins/manager.py b/src/flake8/plugins/manager.py
index 840bf65..d2b9187 100644
--- a/src/flake8/plugins/manager.py
+++ b/src/flake8/plugins/manager.py
@@ -269,6 +269,12 @@ class PluginManager: # pylint: disable=too-few-public-methods
"flake8>=3.7 (which implements per-file-ignores itself)."
)
continue
+ elif entry_point.name == "flake8-colors":
+ LOG.warning(
+ "flake8-colors plugin is incompatible with "
+ "flake8>=4.1 (which implements colors itself)."
+ )
+ continue
self._load_plugin_from_entrypoint(entry_point)
def _load_plugin_from_entrypoint(self, entry_point, local=False):
diff --git a/tox.ini b/tox.ini
index 246f914..5b90a6d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -127,6 +127,8 @@ commands =
# Once Flake8 3.0 is released and in a good state, we can use both and it will
# work well \o/
ignore = D203, W503, E203, N818
+per-file-ignores =
+ src/flake8/formatting/_windows_color.py: N806
exclude =
.tox,
.git,
| PyCQA/flake8 | 05cae7e046d515b8c2dceaa9c897f4c84c7ffb5f | diff --git a/tests/unit/test_base_formatter.py b/tests/unit/test_base_formatter.py
index 8958903..d096457 100644
--- a/tests/unit/test_base_formatter.py
+++ b/tests/unit/test_base_formatter.py
@@ -1,15 +1,18 @@
"""Tests for the BaseFormatter object."""
import argparse
+import sys
from unittest import mock
import pytest
from flake8 import style_guide
+from flake8.formatting import _windows_color
from flake8.formatting import base
def options(**kwargs):
"""Create an argparse.Namespace instance."""
+ kwargs.setdefault("color", "auto")
kwargs.setdefault("output_file", None)
kwargs.setdefault("tee", False)
return argparse.Namespace(**kwargs)
@@ -136,6 +139,49 @@ def test_write_produces_stdout(capsys):
assert capsys.readouterr().out == f"{line}\n{source}\n"
+def test_color_always_is_true():
+ """Verify that color='always' sets it to True."""
+ formatter = base.BaseFormatter(options(color="always"))
+ assert formatter.color is True
+
+
+def _mock_isatty(val):
+ attrs = {"isatty.return_value": val}
+ return mock.patch.object(sys, "stdout", **attrs)
+
+
+def _mock_windows_color(val):
+ return mock.patch.object(_windows_color, "terminal_supports_color", val)
+
+
+def test_color_auto_is_true_for_tty():
+ """Verify that color='auto' sets it to True for a tty."""
+ with _mock_isatty(True), _mock_windows_color(True):
+ formatter = base.BaseFormatter(options(color="auto"))
+ assert formatter.color is True
+
+
+def test_color_auto_is_false_without_tty():
+ """Verify that color='auto' sets it to False without a tty."""
+ with _mock_isatty(False), _mock_windows_color(True):
+ formatter = base.BaseFormatter(options(color="auto"))
+ assert formatter.color is False
+
+
+def test_color_auto_is_false_if_not_supported_on_windows():
+ """Verify that color='auto' is False if not supported on windows."""
+ with _mock_isatty(True), _mock_windows_color(False):
+ formatter = base.BaseFormatter(options(color="auto"))
+ assert formatter.color is False
+
+
+def test_color_never_is_false():
+ """Verify that color='never' sets it to False despite a tty."""
+ with _mock_isatty(True), _mock_windows_color(True):
+ formatter = base.BaseFormatter(options(color="never"))
+ assert formatter.color is False
+
+
class AfterInitFormatter(base.BaseFormatter):
"""Subclass for testing after_init."""
diff --git a/tests/unit/test_filenameonly_formatter.py b/tests/unit/test_filenameonly_formatter.py
index 7dda50b..165ef69 100644
--- a/tests/unit/test_filenameonly_formatter.py
+++ b/tests/unit/test_filenameonly_formatter.py
@@ -7,6 +7,7 @@ from flake8.formatting import default
def options(**kwargs):
"""Create an argparse.Namespace instance."""
+ kwargs.setdefault("color", "auto")
kwargs.setdefault("output_file", None)
kwargs.setdefault("tee", False)
return argparse.Namespace(**kwargs)
diff --git a/tests/unit/test_nothing_formatter.py b/tests/unit/test_nothing_formatter.py
index d7cbea6..c019bdf 100644
--- a/tests/unit/test_nothing_formatter.py
+++ b/tests/unit/test_nothing_formatter.py
@@ -7,6 +7,7 @@ from flake8.formatting import default
def options(**kwargs):
"""Create an argparse.Namespace instance."""
+ kwargs.setdefault("color", "auto")
kwargs.setdefault("output_file", None)
kwargs.setdefault("tee", False)
return argparse.Namespace(**kwargs)
| add color support for formatting
~essentially replace `flake8-colors`
(needs a bit of design before implementation) | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/test_base_formatter.py::test_start[None]",
"tests/unit/test_base_formatter.py::test_start[out.txt]",
"tests/unit/test_base_formatter.py::test_stop",
"tests/unit/test_base_formatter.py::test_format_needs_to_be_implemented",
"tests/unit/test_base_formatter.py::test_show_source_returns_nothing_when_not_showing_source",
"tests/unit/test_base_formatter.py::test_show_source_returns_nothing_when_there_is_source",
"tests/unit/test_base_formatter.py::test_show_source_updates_physical_line_appropriately[x=1\\n-",
"tests/unit/test_base_formatter.py::test_show_source_updates_physical_line_appropriately[",
"tests/unit/test_base_formatter.py::test_show_source_updates_physical_line_appropriately[\\tx\\t=\\ty\\n-\\t",
"tests/unit/test_base_formatter.py::test_write_uses_an_output_file[False]",
"tests/unit/test_base_formatter.py::test_write_uses_an_output_file[True]",
"tests/unit/test_base_formatter.py::test_write_produces_stdout",
"tests/unit/test_base_formatter.py::test_color_always_is_true",
"tests/unit/test_base_formatter.py::test_color_auto_is_true_for_tty",
"tests/unit/test_base_formatter.py::test_color_auto_is_false_without_tty",
"tests/unit/test_base_formatter.py::test_color_auto_is_false_if_not_supported_on_windows",
"tests/unit/test_base_formatter.py::test_color_never_is_false",
"tests/unit/test_base_formatter.py::test_after_init_is_always_called",
"tests/unit/test_base_formatter.py::test_handle_formats_the_error",
"tests/unit/test_filenameonly_formatter.py::test_caches_filenames_already_printed",
"tests/unit/test_filenameonly_formatter.py::test_only_returns_a_string_once_from_format",
"tests/unit/test_filenameonly_formatter.py::test_show_source_returns_nothing",
"tests/unit/test_nothing_formatter.py::test_format_returns_nothing",
"tests/unit/test_nothing_formatter.py::test_show_source_returns_nothing"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-11-06T00:25:43Z" | mit |
|
PyCQA__flake8-1498 | diff --git a/src/flake8/options/config.py b/src/flake8/options/config.py
index d7519df..7cba936 100644
--- a/src/flake8/options/config.py
+++ b/src/flake8/options/config.py
@@ -8,6 +8,7 @@ from typing import List
from typing import Optional
from typing import Tuple
+from flake8 import exceptions
from flake8.options.manager import OptionManager
LOG = logging.getLogger(__name__)
@@ -60,7 +61,10 @@ def load_config(
cfg = configparser.RawConfigParser()
if config is not None:
- cfg.read(config)
+ if not cfg.read(config):
+ raise exceptions.ExecutionError(
+ f"The specified config file does not exist: {config}"
+ )
cfg_dir = os.path.dirname(config)
else:
cfg_dir = pwd
| PyCQA/flake8 | df64e392f48dca253e80a0d9f6191691df9ec294 | diff --git a/tests/integration/test_main.py b/tests/integration/test_main.py
index b351e9d..fe254b7 100644
--- a/tests/integration/test_main.py
+++ b/tests/integration/test_main.py
@@ -386,3 +386,21 @@ def test_early_keyboard_interrupt_does_not_crash(capsys):
out, err = capsys.readouterr()
assert out == "... stopped\n"
assert err == ""
+
+
+def test_config_file_not_found(tmpdir, capsys):
+ """Ensure that an explicitly specified config file which is not found is an
+ error"""
+
+ expected = """\
+There was a critical error during execution of Flake8:
+The specified config file does not exist: missing.cfg
+"""
+
+ with tmpdir.as_cwd():
+ tmpdir.join("t.py").write("print('hello hello world')\n")
+ assert cli.main(["--config", "missing.cfg", "t.py"]) == 1
+
+ out, err = capsys.readouterr()
+ assert out == expected
+ assert err == ""
diff --git a/tests/unit/test_options_config.py b/tests/unit/test_options_config.py
index b288de0..c5d1476 100644
--- a/tests/unit/test_options_config.py
+++ b/tests/unit/test_options_config.py
@@ -2,6 +2,7 @@ import configparser
import pytest
+from flake8 import exceptions
from flake8.main.options import register_default_options
from flake8.options import config
from flake8.options.manager import OptionManager
@@ -164,3 +165,8 @@ def test_parse_config_ignores_unknowns(tmp_path, opt_manager, caplog):
'Option "wat" is not registered. Ignoring.',
)
]
+
+
+def test_load_config_missing_file_raises_exception(capsys):
+ with pytest.raises(exceptions.ExecutionError):
+ config.load_config("foo.cfg", [])
| A missing explicitly specified config file is silently ignored
*Please describe how you installed Flake8*
```console
$ virtualenv venv
$ . venv/bin/activate
$ pip install flake8
```
*Please provide the exact, unmodified output of `flake8 --bug-report`*
```cnosole
$ flake8 --bug-report
{
"dependencies": [],
"platform": {
"python_implementation": "CPython",
"python_version": "3.8.8",
"system": "Linux"
},
"plugins": [
{
"is_local": false,
"plugin": "mccabe",
"version": "0.6.1"
},
{
"is_local": false,
"plugin": "pycodestyle",
"version": "2.8.0"
},
{
"is_local": false,
"plugin": "pyflakes",
"version": "2.4.0"
}
],
"version": "4.0.1"
}
```
*Please describe the problem or feature*
flake8 silently ignores the specified config file if it is missing
```console
$ rm -f foo
$ touch t.py
$ flake8 --config foo t.py
$ echo $?
0
```
*If this is a bug report, please explain with examples (and example code) what you expected to happen and what actually happened.*
An error should be raised if the specified config file is missing.
Looks like the culprit is `configparser.read()`: ["If a file named in filenames cannot be opened, that file will be ignored"](https://docs.python.org/3/library/configparser.html#configparser.ConfigParser.read) | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/integration/test_main.py::test_config_file_not_found",
"tests/unit/test_options_config.py::test_load_config_missing_file_raises_exception"
] | [
"tests/integration/test_main.py::test_diff_option",
"tests/integration/test_main.py::test_form_feed_line_split",
"tests/integration/test_main.py::test_e101_indent_char_does_not_reset",
"tests/integration/test_main.py::test_statistics_option",
"tests/integration/test_main.py::test_show_source_option",
"tests/integration/test_main.py::test_extend_exclude",
"tests/integration/test_main.py::test_malformed_per_file_ignores_error",
"tests/integration/test_main.py::test_tokenization_error_but_not_syntax_error",
"tests/integration/test_main.py::test_tokenization_error_is_a_syntax_error",
"tests/integration/test_main.py::test_bug_report_successful",
"tests/integration/test_main.py::test_benchmark_successful",
"tests/integration/test_main.py::test_specific_noqa_does_not_clobber_pycodestyle_noqa",
"tests/integration/test_main.py::test_specific_noqa_on_line_with_continuation",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline_trailing_ws",
"tests/integration/test_main.py::test_obtaining_args_from_sys_argv_when_not_explicity_provided",
"tests/integration/test_main.py::test_cli_config_option_respected",
"tests/integration/test_main.py::test_cli_isolated_overrides_config_option",
"tests/integration/test_main.py::test_file_not_found",
"tests/integration/test_main.py::test_output_file",
"tests/integration/test_main.py::test_early_keyboard_interrupt_does_not_crash",
"tests/unit/test_options_config.py::test_config_not_found_returns_none",
"tests/unit/test_options_config.py::test_config_file_without_section_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_parse_error_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_encoding_error_is_not_considered",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[setup.cfg]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[tox.ini]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[.flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8:local-plugins]",
"tests/unit/test_options_config.py::test_find_config_searches_upwards",
"tests/unit/test_options_config.py::test_load_config_config_specified_skips_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_file_does_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_found_sets_cfg_dir_to_pwd",
"tests/unit/test_options_config.py::test_load_config_isolated_ignores_configuration",
"tests/unit/test_options_config.py::test_load_config_append_config",
"tests/unit/test_options_config.py::test_parse_config_no_values",
"tests/unit/test_options_config.py::test_parse_config_typed_values",
"tests/unit/test_options_config.py::test_parse_config_ignores_unknowns"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-12-24T15:20:01Z" | mit |
|
PyCQA__flake8-1543 | diff --git a/src/flake8/checker.py b/src/flake8/checker.py
index 2e8117f..28c954c 100644
--- a/src/flake8/checker.py
+++ b/src/flake8/checker.py
@@ -356,7 +356,9 @@ class FileChecker:
exc_info=True,
)
raise exceptions.PluginExecutionFailed(
- plugin_name=plugin.display_name, exception=all_exc
+ filename=self.filename,
+ plugin_name=plugin.display_name,
+ exception=all_exc,
)
@staticmethod
diff --git a/src/flake8/exceptions.py b/src/flake8/exceptions.py
index e2dfd77..8e13cd8 100644
--- a/src/flake8/exceptions.py
+++ b/src/flake8/exceptions.py
@@ -54,17 +54,24 @@ class PluginRequestedUnknownParameters(Flake8Exception):
class PluginExecutionFailed(Flake8Exception):
"""The plugin failed during execution."""
- FORMAT = '"%(name)s" failed during execution due to "%(exc)s"'
-
- def __init__(self, plugin_name: str, exception: Exception) -> None:
+ FORMAT = '{fname}: "{plugin}" failed during execution due to {exc!r}'
+
+ def __init__(
+ self,
+ filename: str,
+ plugin_name: str,
+ exception: Exception,
+ ) -> None:
"""Utilize keyword arguments for message generation."""
+ self.filename = filename
self.plugin_name = plugin_name
self.original_exception = exception
- super().__init__(plugin_name, exception)
+ super().__init__(filename, plugin_name, exception)
def __str__(self) -> str:
"""Format our exception message."""
- return self.FORMAT % {
- "name": self.plugin_name,
- "exc": self.original_exception,
- }
+ return self.FORMAT.format(
+ fname=self.filename,
+ plugin=self.plugin_name,
+ exc=self.original_exception,
+ )
| PyCQA/flake8 | e704ab4d4405fde784aeecde47f563900f5f8dfd | diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py
index 06c5179..96d0244 100644
--- a/tests/unit/test_exceptions.py
+++ b/tests/unit/test_exceptions.py
@@ -18,6 +18,7 @@ from flake8 import exceptions
exception=ValueError("boom!"),
),
exceptions.PluginExecutionFailed(
+ filename="filename.py",
plugin_name="plugin_name",
exception=ValueError("boom!"),
),
diff --git a/tests/unit/test_file_checker.py b/tests/unit/test_file_checker.py
index ee4f745..3fe2e51 100644
--- a/tests/unit/test_file_checker.py
+++ b/tests/unit/test_file_checker.py
@@ -54,5 +54,10 @@ def test_raises_exception_on_failed_plugin(tmp_path, default_options):
plugins=finder.Checkers([], [], []),
options=default_options,
)
- with pytest.raises(flake8.exceptions.PluginExecutionFailed):
+ with pytest.raises(flake8.exceptions.PluginExecutionFailed) as excinfo:
fchecker.run_check(plugin)
+ expected = (
+ f'{fname}: "plugin-name[X]" failed during execution '
+ f"due to ValueError()"
+ )
+ assert str(excinfo.value) == expected
| Improvement suggestion: Add the relevant path as context when encountering exceptions in checker.py
In GitLab by @motin1 on Oct 2, 2018, 03:07
Ran into `UnicodeDecodeError: 'utf8' codec can't decode byte 0xb1 in position 81: invalid start byte` suddenly when running flake8:
```
$ flake8
Traceback (most recent call last):
File "/path/to/project/venv/bin/flake8", line 11, in <module>
sys.exit(main())
File "/path/to/project/venv/lib/python2.7/site-packages/flake8/main/cli.py", line 16, in main
app.run(argv)
File "/path/to/project/venv/lib/python2.7/site-packages/flake8/main/application.py", line 396, in run
self._run(argv)
File "/path/to/project/venv/lib/python2.7/site-packages/flake8/main/application.py", line 384, in _run
self.run_checks()
File "/path/to/project/venv/lib/python2.7/site-packages/flake8/main/application.py", line 310, in run_checks
self.file_checker_manager.run()
File "/path/to/project/venv/lib/python2.7/site-packages/flake8/checker.py", line 319, in run
self.run_parallel()
File "/path/to/project/venv/lib/python2.7/site-packages/flake8/checker.py", line 288, in run_parallel
for ret in pool_map:
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/pool.py", line 287, in <genexpr>
return (item for chunk in result for item in chunk)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/pool.py", line 668, in next
raise value
UnicodeDecodeError: 'utf8' codec can't decode byte 0xb1 in position 81: invalid start byte
```
*Please describe the problem or feature*
Without any context of what path the checker was processing at the time of the throw exception, it was hard to understand what was causing it.
Eventually, it was discovered that flake8 encountered the utf8 decoding issue within a vendor folder that was not supposed to be scanned by flake8 to begin with. Configuring an exclude for that folder solved the issue for now, but to make this easier for others in the future, it would be great if checker.py caught exceptions and added the relevant path as context.
*Please describe how you installed Flake8*
```
git clone https://github.com/project/repo.git
cd repo
virtualenv venv
source venv/bin/activate
pip install flake8
```
*Please provide the exact, unmodified output of `flake8 --bug-report`*
```
$ flake8 --bug-report
{
"dependencies": [
{
"dependency": "setuptools",
"version": "40.4.3"
}
],
"platform": {
"python_implementation": "CPython",
"python_version": "2.7.10",
"system": "Darwin"
},
"plugins": [
{
"is_local": false,
"plugin": "flake8_isort",
"version": "2.3"
},
{
"is_local": false,
"plugin": "mccabe",
"version": "0.6.1"
},
{
"is_local": false,
"plugin": "pycodestyle",
"version": "2.3.1"
},
{
"is_local": false,
"plugin": "pyflakes",
"version": "1.6.0"
}
],
"version": "3.5.0"
}
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/test_exceptions.py::test_pickleable[err0]",
"tests/unit/test_exceptions.py::test_pickleable[err1]",
"tests/unit/test_exceptions.py::test_pickleable[err2]",
"tests/unit/test_file_checker.py::test_repr",
"tests/unit/test_file_checker.py::test_nonexistent_file",
"tests/unit/test_file_checker.py::test_raises_exception_on_failed_plugin"
] | [] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2022-01-24T01:42:02Z" | mit |
|
PyCQA__flake8-1624 | diff --git a/src/flake8/main/application.py b/src/flake8/main/application.py
index 13ece4e..5178abb 100644
--- a/src/flake8/main/application.py
+++ b/src/flake8/main/application.py
@@ -132,6 +132,7 @@ class Application:
version=flake8.__version__,
plugin_versions=self.plugins.versions_str(),
parents=[self.prelim_arg_parser],
+ formatter_names=list(self.plugins.reporters),
)
options.register_default_options(self.option_manager)
self.option_manager.register_plugins(self.plugins)
diff --git a/src/flake8/main/options.py b/src/flake8/main/options.py
index 9b374ab..86a6cf8 100644
--- a/src/flake8/main/options.py
+++ b/src/flake8/main/options.py
@@ -220,7 +220,15 @@ def register_default_options(option_manager: OptionManager) -> None:
metavar="format",
default="default",
parse_from_config=True,
- help="Format errors according to the chosen formatter.",
+ help=(
+ f"Format errors according to the chosen formatter "
+ f"({', '.join(sorted(option_manager.formatter_names))}) "
+ f"or a format string containing %%-style "
+ f"mapping keys (code, col, path, row, text). "
+ f"For example, "
+ f"``--format=pylint`` or ``--format='%%(path)s %%(code)s'``. "
+ f"(Default: %(default)s)"
+ ),
)
add_option(
diff --git a/src/flake8/options/manager.py b/src/flake8/options/manager.py
index e333c9e..7c40cb9 100644
--- a/src/flake8/options/manager.py
+++ b/src/flake8/options/manager.py
@@ -317,6 +317,7 @@ class OptionManager:
version: str,
plugin_versions: str,
parents: list[argparse.ArgumentParser],
+ formatter_names: list[str],
) -> None:
"""Initialize an instance of an OptionManager.
@@ -330,6 +331,7 @@ class OptionManager:
A list of ArgumentParser objects whose arguments should also be
included.
"""
+ self.formatter_names = formatter_names
self.parser = argparse.ArgumentParser(
prog="flake8",
usage="%(prog)s [options] file file ...",
| PyCQA/flake8 | fbb33430e6e0b326744e2e703db77c4773de93de | diff --git a/tests/integration/test_aggregator.py b/tests/integration/test_aggregator.py
index a5b39d7..006ac5f 100644
--- a/tests/integration/test_aggregator.py
+++ b/tests/integration/test_aggregator.py
@@ -18,6 +18,7 @@ def optmanager():
version="3.0.0",
plugin_versions="",
parents=[],
+ formatter_names=[],
)
options.register_default_options(option_manager)
return option_manager
diff --git a/tests/integration/test_main.py b/tests/integration/test_main.py
index e711fb3..dfa0e0b 100644
--- a/tests/integration/test_main.py
+++ b/tests/integration/test_main.py
@@ -406,3 +406,13 @@ The specified config file does not exist: missing.cfg
out, err = capsys.readouterr()
assert out == expected
assert err == ""
+
+
+def test_format_option_help(capsys):
+ """Test that help displays list of available formatters."""
+ with pytest.raises(SystemExit):
+ cli.main(["--help"])
+
+ out, err = capsys.readouterr()
+ assert "(default, pylint, quiet-filename, quiet-nothing)" in out
+ assert err == ""
diff --git a/tests/integration/test_plugins.py b/tests/integration/test_plugins.py
index 0b4424a..edba048 100644
--- a/tests/integration/test_plugins.py
+++ b/tests/integration/test_plugins.py
@@ -100,6 +100,7 @@ def test_local_plugin_can_add_option(local_config):
version="123",
plugin_versions="",
parents=[stage1_parser],
+ formatter_names=[],
)
register_default_options(option_manager)
option_manager.register_plugins(loaded_plugins)
diff --git a/tests/unit/test_option_manager.py b/tests/unit/test_option_manager.py
index d5b88c3..3d3ddc1 100644
--- a/tests/unit/test_option_manager.py
+++ b/tests/unit/test_option_manager.py
@@ -17,7 +17,10 @@ TEST_VERSION = "3.0.0b1"
def optmanager():
"""Generate a simple OptionManager with default test arguments."""
return manager.OptionManager(
- version=TEST_VERSION, plugin_versions="", parents=[]
+ version=TEST_VERSION,
+ plugin_versions="",
+ parents=[],
+ formatter_names=[],
)
@@ -34,7 +37,10 @@ def test_option_manager_including_parent_options():
# WHEN
optmanager = manager.OptionManager(
- version=TEST_VERSION, plugin_versions="", parents=[parent_parser]
+ version=TEST_VERSION,
+ plugin_versions="",
+ parents=[parent_parser],
+ formatter_names=[],
)
options = optmanager.parse_args(["--parent", "foo"])
diff --git a/tests/unit/test_options_config.py b/tests/unit/test_options_config.py
index 0890ea9..8c8f0cb 100644
--- a/tests/unit/test_options_config.py
+++ b/tests/unit/test_options_config.py
@@ -168,7 +168,9 @@ def test_load_extra_config_utf8(tmpdir):
@pytest.fixture
def opt_manager():
- ret = OptionManager(version="123", plugin_versions="", parents=[])
+ ret = OptionManager(
+ version="123", plugin_versions="", parents=[], formatter_names=[]
+ )
register_default_options(ret)
return ret
| Display list of available formatters with help for --format
In GitLab by @blueyed on Apr 26, 2018, 03:32
`flake8 --help` contains:
> --format=format Format errors according to the chosen formatter.
It would be nice/helpful if the list of available formats would be listed there. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/integration/test_aggregator.py::test_aggregate_options_with_config",
"tests/integration/test_aggregator.py::test_aggregate_options_when_isolated",
"tests/integration/test_main.py::test_format_option_help",
"tests/integration/test_plugins.py::test_local_plugin_can_add_option",
"tests/unit/test_option_manager.py::test_option_manager_creates_option_parser",
"tests/unit/test_option_manager.py::test_option_manager_including_parent_options",
"tests/unit/test_option_manager.py::test_parse_args_forwarding_default_values",
"tests/unit/test_option_manager.py::test_parse_args_forwarding_type_coercion",
"tests/unit/test_option_manager.py::test_add_option_short_option_only",
"tests/unit/test_option_manager.py::test_add_option_long_option_only",
"tests/unit/test_option_manager.py::test_add_short_and_long_option_names",
"tests/unit/test_option_manager.py::test_add_option_with_custom_args",
"tests/unit/test_option_manager.py::test_parse_args_normalize_path",
"tests/unit/test_option_manager.py::test_parse_args_handles_comma_separated_defaults",
"tests/unit/test_option_manager.py::test_parse_args_handles_comma_separated_lists",
"tests/unit/test_option_manager.py::test_parse_args_normalize_paths",
"tests/unit/test_option_manager.py::test_extend_default_ignore",
"tests/unit/test_option_manager.py::test_optparse_normalize_callback_option_legacy",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[int-5-5]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[long-6-6]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[string-foo-foo]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[float-1.5-1.5]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[complex-1+5j-(1+5j)]",
"tests/unit/test_option_manager.py::test_optparse_normalize_types[str-foo-foo]",
"tests/unit/test_option_manager.py::test_optparse_normalize_choice_type",
"tests/unit/test_option_manager.py::test_optparse_normalize_help",
"tests/unit/test_option_manager.py::test_parse_invalid_jobs_argument",
"tests/unit/test_options_config.py::test_parse_config_no_values",
"tests/unit/test_options_config.py::test_parse_config_typed_values",
"tests/unit/test_options_config.py::test_parse_config_ignores_unknowns"
] | [
"tests/integration/test_main.py::test_diff_option",
"tests/integration/test_main.py::test_form_feed_line_split",
"tests/integration/test_main.py::test_e101_indent_char_does_not_reset",
"tests/integration/test_main.py::test_statistics_option",
"tests/integration/test_main.py::test_show_source_option",
"tests/integration/test_main.py::test_extend_exclude",
"tests/integration/test_main.py::test_malformed_per_file_ignores_error",
"tests/integration/test_main.py::test_tokenization_error_but_not_syntax_error",
"tests/integration/test_main.py::test_tokenization_error_is_a_syntax_error",
"tests/integration/test_main.py::test_bug_report_successful",
"tests/integration/test_main.py::test_benchmark_successful",
"tests/integration/test_main.py::test_specific_noqa_does_not_clobber_pycodestyle_noqa",
"tests/integration/test_main.py::test_specific_noqa_on_line_with_continuation",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline_trailing_ws",
"tests/integration/test_main.py::test_obtaining_args_from_sys_argv_when_not_explicity_provided",
"tests/integration/test_main.py::test_cli_config_option_respected",
"tests/integration/test_main.py::test_cli_isolated_overrides_config_option",
"tests/integration/test_main.py::test_file_not_found",
"tests/integration/test_main.py::test_output_file",
"tests/integration/test_main.py::test_early_keyboard_interrupt_does_not_crash",
"tests/integration/test_main.py::test_config_file_not_found",
"tests/integration/test_plugins.py::test_enable_local_plugin_from_config",
"tests/integration/test_plugins.py::test_plugin_gets_enabled_by_default",
"tests/integration/test_plugins.py::test_plugin_off_by_default",
"tests/integration/test_plugins.py::test_physical_line_plugin_multiline_string",
"tests/unit/test_option_manager.py::test_parse_valid_jobs_argument[auto-True--1]",
"tests/unit/test_option_manager.py::test_parse_valid_jobs_argument[4-False-4]",
"tests/unit/test_option_manager.py::test_jobs_argument_str",
"tests/unit/test_option_manager.py::test_jobs_argument_repr",
"tests/unit/test_options_config.py::test_config_not_found_returns_none",
"tests/unit/test_options_config.py::test_config_file_without_section_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_parse_error_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_encoding_error_is_not_considered",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[setup.cfg]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[tox.ini]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[.flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8:local-plugins]",
"tests/unit/test_options_config.py::test_find_config_searches_upwards",
"tests/unit/test_options_config.py::test_find_config_ignores_homedir",
"tests/unit/test_options_config.py::test_find_config_ignores_unknown_homedir",
"tests/unit/test_options_config.py::test_load_config_config_specified_skips_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_file_does_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_found_sets_cfg_dir_to_pwd",
"tests/unit/test_options_config.py::test_load_config_isolated_ignores_configuration",
"tests/unit/test_options_config.py::test_load_config_append_config",
"tests/unit/test_options_config.py::test_load_auto_config_utf8",
"tests/unit/test_options_config.py::test_load_explicit_config_utf8",
"tests/unit/test_options_config.py::test_load_extra_config_utf8",
"tests/unit/test_options_config.py::test_load_config_missing_file_raises_exception"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-07-28T20:48:41Z" | mit |
|
PyCQA__flake8-1642 | diff --git a/src/flake8/options/config.py b/src/flake8/options/config.py
index f5c93ce..36fe976 100644
--- a/src/flake8/options/config.py
+++ b/src/flake8/options/config.py
@@ -23,7 +23,10 @@ def _stat_key(s: str) -> Tuple[int, int]:
def _find_config_file(path: str) -> Optional[str]:
# on windows if the homedir isn't detected this returns back `~`
home = os.path.expanduser("~")
- home_stat = _stat_key(home) if home != "~" else None
+ try:
+ home_stat = _stat_key(home) if home != "~" else None
+ except OSError: # FileNotFoundError / PermissionError / etc.
+ home_stat = None
dir_stat = _stat_key(path)
cfg = configparser.RawConfigParser()
| PyCQA/flake8 | 446b18d35a5fa0be6b3531d30a0706fc82247313 | diff --git a/tests/unit/test_options_config.py b/tests/unit/test_options_config.py
index 7a7c4f7..bdc2208 100644
--- a/tests/unit/test_options_config.py
+++ b/tests/unit/test_options_config.py
@@ -78,6 +78,13 @@ def test_find_config_ignores_homedir(tmp_path):
assert config._find_config_file(str(subdir)) is None
+def test_find_config_ignores_unknown_homedir(tmp_path):
+ subdir = tmp_path.joinpath("d")
+
+ with mock.patch.object(os.path, "expanduser", return_value=str(subdir)):
+ assert config._find_config_file(str(tmp_path)) is None
+
+
def test_load_config_config_specified_skips_discovery(tmpdir):
tmpdir.join("setup.cfg").write("[flake8]\nindent-size=2\n")
custom_cfg = tmpdir.join("custom.cfg")
| Exception if HOME is not an existing path
### how did you install flake8?
```console
$ pip install flake8
```
### unmodified output of `flake8 --bug-report`
```json
{
"platform": {
"python_implementation": "CPython",
"python_version": "3.9.13",
"system": "Linux"
},
"plugins": [
{
"plugin": "mccabe",
"version": "0.7.0"
},
{
"plugin": "pycodestyle",
"version": "2.9.0"
},
{
"plugin": "pyflakes",
"version": "2.5.0"
}
],
"version": "5.0.1"
}
```
### describe the problem
#### what I expected to happen
flake8 should not raise an exception if $HOME points to an invalid directory
For example, when running as the `nobody` user in a Debian-based Docker image, and $HOME points to `/nonexistent`.
#### commands ran
```console
# sudo -u nobody flake8
Traceback (most recent call last):
File "/usr/local/bin/flake8", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.9/site-packages/flake8/main/cli.py", line 22, in main
app.run(argv)
File "/usr/local/lib/python3.9/site-packages/flake8/main/application.py", line 336, in run
self._run(argv)
File "/usr/local/lib/python3.9/site-packages/flake8/main/application.py", line 324, in _run
self.initialize(argv)
File "/usr/local/lib/python3.9/site-packages/flake8/main/application.py", line 296, in initialize
cfg, cfg_dir = config.load_config(
File "/usr/local/lib/python3.9/site-packages/flake8/options/config.py", line 73, in load_config
config = _find_config_file(pwd)
File "/usr/local/lib/python3.9/site-packages/flake8/options/config.py", line 26, in _find_config_file
home_stat = _stat_key(home) if home != "~" else None
File "/usr/local/lib/python3.9/site-packages/flake8/options/config.py", line 19, in _stat_key
st = os.stat(s)
FileNotFoundError: [Errno 2] No such file or directory: '/nonexistent'
```
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/test_options_config.py::test_find_config_ignores_unknown_homedir"
] | [
"tests/unit/test_options_config.py::test_config_not_found_returns_none",
"tests/unit/test_options_config.py::test_config_file_without_section_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_parse_error_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_encoding_error_is_not_considered",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[setup.cfg]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[tox.ini]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[.flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8:local-plugins]",
"tests/unit/test_options_config.py::test_find_config_searches_upwards",
"tests/unit/test_options_config.py::test_find_config_ignores_homedir",
"tests/unit/test_options_config.py::test_load_config_config_specified_skips_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_file_does_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_found_sets_cfg_dir_to_pwd",
"tests/unit/test_options_config.py::test_load_config_isolated_ignores_configuration",
"tests/unit/test_options_config.py::test_load_config_append_config",
"tests/unit/test_options_config.py::test_load_auto_config_utf8",
"tests/unit/test_options_config.py::test_load_explicit_config_utf8",
"tests/unit/test_options_config.py::test_load_extra_config_utf8",
"tests/unit/test_options_config.py::test_parse_config_no_values",
"tests/unit/test_options_config.py::test_parse_config_typed_values",
"tests/unit/test_options_config.py::test_parse_config_ignores_unknowns",
"tests/unit/test_options_config.py::test_load_config_missing_file_raises_exception"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2022-08-01T12:52:16Z" | mit |
|
PyCQA__flake8-1648 | diff --git a/src/flake8/options/config.py b/src/flake8/options/config.py
index 36fe976..daf8529 100644
--- a/src/flake8/options/config.py
+++ b/src/flake8/options/config.py
@@ -29,9 +29,9 @@ def _find_config_file(path: str) -> Optional[str]:
home_stat = None
dir_stat = _stat_key(path)
- cfg = configparser.RawConfigParser()
while True:
for candidate in ("setup.cfg", "tox.ini", ".flake8"):
+ cfg = configparser.RawConfigParser()
cfg_path = os.path.join(path, candidate)
try:
cfg.read(cfg_path, encoding="UTF-8")
| PyCQA/flake8 | 70c0b3d27a5626a6ad58293bfaa6308244cb349c | diff --git a/tests/unit/test_options_config.py b/tests/unit/test_options_config.py
index bdc2208..4ad6acd 100644
--- a/tests/unit/test_options_config.py
+++ b/tests/unit/test_options_config.py
@@ -21,7 +21,9 @@ def test_config_file_without_section_is_not_considered(tmp_path):
def test_config_file_with_parse_error_is_not_considered(tmp_path, caplog):
- tmp_path.joinpath("setup.cfg").write_text("[error")
+ # the syntax error here is deliberately to trigger a partial parse
+ # https://github.com/python/cpython/issues/95546
+ tmp_path.joinpath("setup.cfg").write_text("[flake8]\nx = 1\n...")
assert config._find_config_file(str(tmp_path)) is None
| Config file .flake8 not exist when running github actions
### how did you install flake8?
```console
jobs:
flake8_py3:
runs-on: ubuntu-latest
steps:
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.9.x
architecture: x64
- name: Checkout PyTorch
uses: actions/checkout@master
- name: Install flake8
run: pip install flake8==4.0.1
- name: Run flake8
uses: suo/flake8-github-action@v1
with:
checkName: 'flake8_py3' # NOTE: this needs to be the same as the job name
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
```
### unmodified output of `flake8 --bug-report`
```json
platform: git hub
```
### describe the problem
# Description of the issue
I have been using flake8 to run during the github actions. Configuration code is described above in how install flake8.
It was great until the new release 5.0.x was released on the 30th of July.
Starting from this release flake8 always fails because is trying to get the configuration from the runner server, which of course does not exist.
#### what happen
```
There was a critical error during execution of Flake8:
The specified config file does not exist: /home/runner/work/<my_repository>/.flake8
Error: The process 'flake8' failed with exit code 1
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/test_options_config.py::test_config_file_with_parse_error_is_not_considered"
] | [
"tests/unit/test_options_config.py::test_config_not_found_returns_none",
"tests/unit/test_options_config.py::test_config_file_without_section_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_encoding_error_is_not_considered",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[setup.cfg]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[tox.ini]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[.flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8:local-plugins]",
"tests/unit/test_options_config.py::test_find_config_searches_upwards",
"tests/unit/test_options_config.py::test_find_config_ignores_homedir",
"tests/unit/test_options_config.py::test_find_config_ignores_unknown_homedir",
"tests/unit/test_options_config.py::test_load_config_config_specified_skips_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_file_does_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_found_sets_cfg_dir_to_pwd",
"tests/unit/test_options_config.py::test_load_config_isolated_ignores_configuration",
"tests/unit/test_options_config.py::test_load_config_append_config",
"tests/unit/test_options_config.py::test_load_auto_config_utf8",
"tests/unit/test_options_config.py::test_load_explicit_config_utf8",
"tests/unit/test_options_config.py::test_load_extra_config_utf8",
"tests/unit/test_options_config.py::test_parse_config_no_values",
"tests/unit/test_options_config.py::test_parse_config_typed_values",
"tests/unit/test_options_config.py::test_parse_config_ignores_unknowns",
"tests/unit/test_options_config.py::test_load_config_missing_file_raises_exception"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2022-08-01T23:12:41Z" | mit |
|
PyCQA__flake8-1713 | diff --git a/src/flake8/defaults.py b/src/flake8/defaults.py
index 4ba0048..e3edf32 100644
--- a/src/flake8/defaults.py
+++ b/src/flake8/defaults.py
@@ -43,3 +43,5 @@ NOQA_INLINE_REGEXP = re.compile(
)
NOQA_FILE = re.compile(r"\s*# flake8[:=]\s*noqa", re.I)
+
+VALID_CODE_PREFIX = re.compile("^[A-Z]{1,3}[0-9]{0,3}$", re.ASCII)
diff --git a/src/flake8/options/config.py b/src/flake8/options/config.py
index e158737..a9ced1b 100644
--- a/src/flake8/options/config.py
+++ b/src/flake8/options/config.py
@@ -7,6 +7,7 @@ import os.path
from typing import Any
from flake8 import exceptions
+from flake8.defaults import VALID_CODE_PREFIX
from flake8.options.manager import OptionManager
LOG = logging.getLogger(__name__)
@@ -120,6 +121,16 @@ def parse_config(
LOG.debug('Option "%s" returned value: %r', option_name, value)
final_value = option.normalize(value, cfg_dir)
+
+ if option_name in {"ignore", "extend-ignore"}:
+ for error_code in final_value:
+ if not VALID_CODE_PREFIX.match(error_code):
+ raise ValueError(
+ f"Error code {error_code!r} "
+ f"supplied to {option_name!r} option "
+ f"does not match {VALID_CODE_PREFIX.pattern!r}"
+ )
+
assert option.config_name is not None
config_dict[option.config_name] = final_value
diff --git a/src/flake8/plugins/finder.py b/src/flake8/plugins/finder.py
index c051488..4a43ccb 100644
--- a/src/flake8/plugins/finder.py
+++ b/src/flake8/plugins/finder.py
@@ -5,7 +5,6 @@ import configparser
import inspect
import itertools
import logging
-import re
import sys
from typing import Any
from typing import Generator
@@ -14,13 +13,12 @@ from typing import NamedTuple
from flake8 import utils
from flake8._compat import importlib_metadata
+from flake8.defaults import VALID_CODE_PREFIX
from flake8.exceptions import ExecutionError
from flake8.exceptions import FailedToLoadPlugin
LOG = logging.getLogger(__name__)
-VALID_CODE = re.compile("^[A-Z]{1,3}[0-9]{0,3}$", re.ASCII)
-
FLAKE8_GROUPS = frozenset(("flake8.extension", "flake8.report"))
BANNED_PLUGINS = {
@@ -337,10 +335,10 @@ def _classify_plugins(
raise NotImplementedError(f"what plugin type? {loaded}")
for loaded in itertools.chain(tree, logical_line, physical_line):
- if not VALID_CODE.match(loaded.entry_name):
+ if not VALID_CODE_PREFIX.match(loaded.entry_name):
raise ExecutionError(
f"plugin code for `{loaded.display_name}` does not match "
- f"{VALID_CODE.pattern}"
+ f"{VALID_CODE_PREFIX.pattern}"
)
return Plugins(
| PyCQA/flake8 | 5eeee3fbc0b64c7de347d1964de3074b9b2bde0b | diff --git a/tests/unit/plugins/finder_test.py b/tests/unit/plugins/finder_test.py
index d526fd1..cd5cf4a 100644
--- a/tests/unit/plugins/finder_test.py
+++ b/tests/unit/plugins/finder_test.py
@@ -31,37 +31,6 @@ def _loaded(plugin=None, obj=None, parameters=None):
return finder.LoadedPlugin(plugin, obj, parameters)
[email protected](
- "s",
- (
- "E",
- "E1",
- "E123",
- "ABC",
- "ABC1",
- "ABC123",
- ),
-)
-def test_valid_plugin_prefixes(s):
- assert finder.VALID_CODE.match(s)
-
-
[email protected](
- "s",
- (
- "",
- "A1234",
- "ABCD",
- "abc",
- "a-b",
- "β",
- "Aπ",
- ),
-)
-def test_invalid_plugin_prefixes(s):
- assert finder.VALID_CODE.match(s) is None
-
-
def test_loaded_plugin_entry_name_vs_display_name():
loaded = _loaded(_plugin(package="package-name", ep=_ep(name="Q")))
assert loaded.entry_name == "Q"
diff --git a/tests/unit/test_defaults.py b/tests/unit/test_defaults.py
new file mode 100644
index 0000000..822b8f0
--- /dev/null
+++ b/tests/unit/test_defaults.py
@@ -0,0 +1,36 @@
+from __future__ import annotations
+
+import pytest
+
+from flake8.defaults import VALID_CODE_PREFIX
+
+
[email protected](
+ "s",
+ (
+ "E",
+ "E1",
+ "E123",
+ "ABC",
+ "ABC1",
+ "ABC123",
+ ),
+)
+def test_valid_plugin_prefixes(s):
+ assert VALID_CODE_PREFIX.match(s)
+
+
[email protected](
+ "s",
+ (
+ "",
+ "A1234",
+ "ABCD",
+ "abc",
+ "a-b",
+ "β",
+ "Aπ",
+ ),
+)
+def test_invalid_plugin_prefixes(s):
+ assert VALID_CODE_PREFIX.match(s) is None
diff --git a/tests/unit/test_options_config.py b/tests/unit/test_options_config.py
index 8c8f0cb..43d8104 100644
--- a/tests/unit/test_options_config.py
+++ b/tests/unit/test_options_config.py
@@ -220,3 +220,35 @@ def test_parse_config_ignores_unknowns(tmp_path, opt_manager, caplog):
def test_load_config_missing_file_raises_exception(capsys):
with pytest.raises(exceptions.ExecutionError):
config.load_config("foo.cfg", [])
+
+
+def test_invalid_ignore_codes_raise_error(tmpdir, opt_manager):
+ tmpdir.join("setup.cfg").write("[flake8]\nignore = E203, //comment")
+ with tmpdir.as_cwd():
+ cfg, _ = config.load_config("setup.cfg", [], isolated=False)
+
+ with pytest.raises(ValueError) as excinfo:
+ config.parse_config(opt_manager, cfg, tmpdir)
+
+ expected = (
+ "Error code '//comment' supplied to 'ignore' option "
+ "does not match '^[A-Z]{1,3}[0-9]{0,3}$'"
+ )
+ (msg,) = excinfo.value.args
+ assert msg == expected
+
+
+def test_invalid_extend_ignore_codes_raise_error(tmpdir, opt_manager):
+ tmpdir.join("setup.cfg").write("[flake8]\nextend-ignore = E203, //comment")
+ with tmpdir.as_cwd():
+ cfg, _ = config.load_config("setup.cfg", [], isolated=False)
+
+ with pytest.raises(ValueError) as excinfo:
+ config.parse_config(opt_manager, cfg, tmpdir)
+
+ expected = (
+ "Error code '//comment' supplied to 'extend-ignore' option "
+ "does not match '^[A-Z]{1,3}[0-9]{0,3}$'"
+ )
+ (msg,) = excinfo.value.args
+ assert msg == expected
| warn on invalid codes in `ignore` / `extend-ignore`
### describe the request
for example https://github.com/getsentry/sentry-python/blob/b36d84a76bd6f8344c9b0a9694591939296e9c06/.flake8#L2-L13
this disables a code called `//` and a code called `I` (among many others) due to incorrect comment syntax | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/plugins/finder_test.py::test_loaded_plugin_entry_name_vs_display_name",
"tests/unit/plugins/finder_test.py::test_plugins_all_plugins",
"tests/unit/plugins/finder_test.py::test_plugins_versions_str",
"tests/unit/plugins/finder_test.py::test_flake8_plugins",
"tests/unit/plugins/finder_test.py::test_importlib_plugins",
"tests/unit/plugins/finder_test.py::test_duplicate_dists",
"tests/unit/plugins/finder_test.py::test_find_local_plugins_nothing",
"tests/unit/plugins/finder_test.py::test_find_local_plugins",
"tests/unit/plugins/finder_test.py::test_parse_plugin_options_not_specified",
"tests/unit/plugins/finder_test.py::test_parse_enabled_from_commandline",
"tests/unit/plugins/finder_test.py::test_parse_enabled_from_config[enable_extensions]",
"tests/unit/plugins/finder_test.py::test_parse_enabled_from_config[enable-extensions]",
"tests/unit/plugins/finder_test.py::test_parse_plugin_options_local_plugin_paths_missing",
"tests/unit/plugins/finder_test.py::test_parse_plugin_options_local_plugin_paths",
"tests/unit/plugins/finder_test.py::test_find_plugins",
"tests/unit/plugins/finder_test.py::test_find_plugins_plugin_is_present",
"tests/unit/plugins/finder_test.py::test_find_plugins_plugin_is_missing",
"tests/unit/plugins/finder_test.py::test_find_plugins_name_normalization",
"tests/unit/plugins/finder_test.py::test_parameters_for_class_plugin",
"tests/unit/plugins/finder_test.py::test_parameters_for_function_plugin",
"tests/unit/plugins/finder_test.py::test_load_plugin_import_error",
"tests/unit/plugins/finder_test.py::test_load_plugin_not_callable",
"tests/unit/plugins/finder_test.py::test_load_plugin_ok",
"tests/unit/plugins/finder_test.py::test_import_plugins_extends_sys_path",
"tests/unit/plugins/finder_test.py::test_classify_plugins",
"tests/unit/plugins/finder_test.py::test_classify_plugins_enable_a_disabled_plugin",
"tests/unit/plugins/finder_test.py::test_classify_plugins_does_not_error_on_reporter_prefix",
"tests/unit/plugins/finder_test.py::test_classify_plugins_errors_on_incorrect_checker_name",
"tests/unit/plugins/finder_test.py::test_load_plugins",
"tests/unit/test_defaults.py::test_valid_plugin_prefixes[E]",
"tests/unit/test_defaults.py::test_valid_plugin_prefixes[E1]",
"tests/unit/test_defaults.py::test_valid_plugin_prefixes[E123]",
"tests/unit/test_defaults.py::test_valid_plugin_prefixes[ABC]",
"tests/unit/test_defaults.py::test_valid_plugin_prefixes[ABC1]",
"tests/unit/test_defaults.py::test_valid_plugin_prefixes[ABC123]",
"tests/unit/test_defaults.py::test_invalid_plugin_prefixes[]",
"tests/unit/test_defaults.py::test_invalid_plugin_prefixes[A1234]",
"tests/unit/test_defaults.py::test_invalid_plugin_prefixes[ABCD]",
"tests/unit/test_defaults.py::test_invalid_plugin_prefixes[abc]",
"tests/unit/test_defaults.py::test_invalid_plugin_prefixes[a-b]",
"tests/unit/test_defaults.py::test_invalid_plugin_prefixes[\\u2603]",
"tests/unit/test_defaults.py::test_invalid_plugin_prefixes[A\\U0001d7d7]",
"tests/unit/test_options_config.py::test_config_not_found_returns_none",
"tests/unit/test_options_config.py::test_config_file_without_section_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_parse_error_is_not_considered",
"tests/unit/test_options_config.py::test_config_file_with_encoding_error_is_not_considered",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[setup.cfg]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[tox.ini]",
"tests/unit/test_options_config.py::test_find_config_file_exists_at_path[.flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8]",
"tests/unit/test_options_config.py::test_find_config_either_section[flake8:local-plugins]",
"tests/unit/test_options_config.py::test_find_config_searches_upwards",
"tests/unit/test_options_config.py::test_find_config_ignores_homedir",
"tests/unit/test_options_config.py::test_find_config_ignores_unknown_homedir",
"tests/unit/test_options_config.py::test_load_config_config_specified_skips_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_file_does_discovery",
"tests/unit/test_options_config.py::test_load_config_no_config_found_sets_cfg_dir_to_pwd",
"tests/unit/test_options_config.py::test_load_config_isolated_ignores_configuration",
"tests/unit/test_options_config.py::test_load_config_append_config",
"tests/unit/test_options_config.py::test_load_auto_config_utf8",
"tests/unit/test_options_config.py::test_load_explicit_config_utf8",
"tests/unit/test_options_config.py::test_load_extra_config_utf8",
"tests/unit/test_options_config.py::test_parse_config_no_values",
"tests/unit/test_options_config.py::test_parse_config_typed_values",
"tests/unit/test_options_config.py::test_parse_config_ignores_unknowns",
"tests/unit/test_options_config.py::test_load_config_missing_file_raises_exception",
"tests/unit/test_options_config.py::test_invalid_ignore_codes_raise_error",
"tests/unit/test_options_config.py::test_invalid_extend_ignore_codes_raise_error"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2022-10-12T23:21:04Z" | mit |
|
PyCQA__flake8-1726 | diff --git a/src/flake8/checker.py b/src/flake8/checker.py
index 2d7bcd9..2cae545 100644
--- a/src/flake8/checker.py
+++ b/src/flake8/checker.py
@@ -6,6 +6,7 @@ import contextlib
import errno
import logging
import multiprocessing.pool
+import operator
import signal
import tokenize
from typing import Any
@@ -180,8 +181,9 @@ class Manager:
A tuple of the total results found and the results reported.
"""
results_reported = results_found = 0
+ self.results.sort(key=operator.itemgetter(0))
for filename, results, _ in self.results:
- results.sort(key=lambda tup: (tup[1], tup[2]))
+ results.sort(key=operator.itemgetter(1, 2))
with self.style_guide.processing_file(filename):
results_reported += self._handle_results(filename, results)
results_found += len(results)
| PyCQA/flake8 | 987a7187872e0abc89f9ed57f1d8012f0737e34c | diff --git a/tests/integration/test_main.py b/tests/integration/test_main.py
index db60f6b..96d1182 100644
--- a/tests/integration/test_main.py
+++ b/tests/integration/test_main.py
@@ -98,6 +98,26 @@ t.py:1:1: F401 'os' imported but unused
assert err == ""
+def test_errors_sorted(tmpdir, capsys):
+ with tmpdir.as_cwd():
+ for c in "abcde":
+ tmpdir.join(f"{c}.py").write("import os\n")
+ assert cli.main(["./"]) == 1
+
+ # file traversal was done in inode-order before
+ # this uses a significant number of files such that it's unlikely to pass
+ expected = """\
+./a.py:1:1: F401 'os' imported but unused
+./b.py:1:1: F401 'os' imported but unused
+./c.py:1:1: F401 'os' imported but unused
+./d.py:1:1: F401 'os' imported but unused
+./e.py:1:1: F401 'os' imported but unused
+"""
+ out, err = capsys.readouterr()
+ assert out == expected
+ assert err == ""
+
+
def test_extend_exclude(tmpdir, capsys):
"""Ensure that `flake8 --extend-exclude` works."""
for d in ["project", "vendor", "legacy", ".git", ".tox", ".hg"]:
| Sort FileCheckers alphabetically
Whenever I run *flake8* on a large set of code which has multiple violations, I am presented with an apparently deterministic, but not alphabetical order of the files. This makes it hard to work on them in a coordinated manner, as the file list inside the file manager or IDE usually is sorted alphabetically. Current example output, where I would expect `dummies.py` to be displayed before `sample.py`:
```bash
(.env) stefan@localhost:~/projects/samples> flake8 samples/
samples/sample.py:79:1: E501 line too long (100 > 80 characters)
samples/dummies.py:68:7: E265 block comment should start with '# '
```
Skimming through the available options, the documentation and the source code, this does not seem to be supported for now. Judging from the source code, this seems like some hard-coded behavior of `flake8.checker.Manager.report` and therefore hard to implement (or hacky) to fix from the outside.
This might be fixed by simply replacing the iteration over all the checkers inside the aforementioned method by something like the following snippet, although this will not separate files and directories if this is something which should be considered:
```python3
for checker in sorted(self._all_checkers, key=operator.attrgetter('filename')):
...
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/integration/test_main.py::test_errors_sorted"
] | [
"tests/integration/test_main.py::test_form_feed_line_split",
"tests/integration/test_main.py::test_e101_indent_char_does_not_reset",
"tests/integration/test_main.py::test_statistics_option",
"tests/integration/test_main.py::test_show_source_option",
"tests/integration/test_main.py::test_extend_exclude",
"tests/integration/test_main.py::test_malformed_per_file_ignores_error",
"tests/integration/test_main.py::test_tokenization_error_but_not_syntax_error",
"tests/integration/test_main.py::test_tokenization_error_is_a_syntax_error",
"tests/integration/test_main.py::test_bug_report_successful",
"tests/integration/test_main.py::test_benchmark_successful",
"tests/integration/test_main.py::test_specific_noqa_does_not_clobber_pycodestyle_noqa",
"tests/integration/test_main.py::test_specific_noqa_on_line_with_continuation",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline",
"tests/integration/test_main.py::test_physical_line_file_not_ending_in_newline_trailing_ws",
"tests/integration/test_main.py::test_obtaining_args_from_sys_argv_when_not_explicity_provided",
"tests/integration/test_main.py::test_cli_config_option_respected",
"tests/integration/test_main.py::test_cli_isolated_overrides_config_option",
"tests/integration/test_main.py::test_file_not_found",
"tests/integration/test_main.py::test_output_file",
"tests/integration/test_main.py::test_early_keyboard_interrupt_does_not_crash",
"tests/integration/test_main.py::test_config_file_not_found",
"tests/integration/test_main.py::test_format_option_help"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2022-10-30T19:12:25Z" | mit |
|
PyCQA__flake8-bugbear-12 | diff --git a/README.rst b/README.rst
index 9f3cf5f..81382f4 100644
--- a/README.rst
+++ b/README.rst
@@ -107,9 +107,10 @@ Users coming from Python 2 may expect the old behavior which might lead
to bugs. Use native ``async def`` coroutines or mark intentional
``return x`` usage with ``# noqa`` on the same line.
-**B902**: Invalid first argument used for method. Use ``self`` for
-instance methods, and `cls` for class methods (which includes `__new__`
-and `__init_subclass__`).
+**B902**: Invalid first argument used for method. Use ``self`` for instance
+methods, and `cls` for class methods (which includes `__new__` and
+`__init_subclass__`) or instance methods of metaclasses. Note that this lint
+can only detect metaclasses if they directly inherit from ``type``.
**B950**: Line too long. This is a pragmatic equivalent of ``pycodestyle``'s
E501: it considers "max-line-length" but only triggers when the value has been
diff --git a/bugbear.py b/bugbear.py
index d9dc7e7..42a9077 100644
--- a/bugbear.py
+++ b/bugbear.py
@@ -319,6 +319,10 @@ class BugBearVisitor(ast.NodeVisitor):
):
expected_first_args = B902.cls
kind = 'class'
+ elif any(getattr(x, 'id', None) == 'type'
+ for x in self.node_stack[-2].bases):
+ expected_first_args = B902.cls
+ kind = 'metaclass instance'
else:
expected_first_args = B902.self
kind = 'instance'
| PyCQA/flake8-bugbear | c91bb7e24891cc3d1e3f1396dd280385c4557b48 | diff --git a/tests/b902.py b/tests/b902.py
index 91e6bc1..7f8ffc1 100644
--- a/tests/b902.py
+++ b/tests/b902.py
@@ -47,3 +47,13 @@ class Warnings:
def invalid_keyword_only(*, self):
...
+
+
+class Meta(type):
+ def __init__(cls, name, bases, d):
+ ...
+
+
+class OtherMeta(type):
+ def __init__(self, name, bases, d):
+ ...
diff --git a/tests/test_bugbear.py b/tests/test_bugbear.py
index f7c89d9..c94a60e 100644
--- a/tests/test_bugbear.py
+++ b/tests/test_bugbear.py
@@ -146,6 +146,7 @@ class BugbearTestCase(unittest.TestCase):
B902(39, 22, vars=("*args", 'instance', 'self')),
B902(45, 30, vars=("**kwargs", 'instance', 'self')),
B902(48, 32, vars=("*, self", 'instance', 'self')),
+ B902(58, 17, vars=("'self'", 'metaclass instance', 'cls')),
)
)
| B902 requires first parameter named self for __init__ of metaclass
I have a metaclass defined like this:
class StorageMeta(type):
def __init__(cls, name, bases, d):
Naturally B902 flagged the first parameter of `__init__`, but I'm not sure if it's intended in the case of metaclasses. I can imagine arguments for and against this behavior. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_bugbear.py::BugbearTestCase::test_b902"
] | [
"tests/test_bugbear.py::BugbearTestCase::test_b001",
"tests/test_bugbear.py::BugbearTestCase::test_b002",
"tests/test_bugbear.py::BugbearTestCase::test_b003",
"tests/test_bugbear.py::BugbearTestCase::test_b004",
"tests/test_bugbear.py::BugbearTestCase::test_b005",
"tests/test_bugbear.py::BugbearTestCase::test_b006",
"tests/test_bugbear.py::BugbearTestCase::test_b007",
"tests/test_bugbear.py::BugbearTestCase::test_b301_b302_b305",
"tests/test_bugbear.py::BugbearTestCase::test_b303_b304",
"tests/test_bugbear.py::BugbearTestCase::test_b306",
"tests/test_bugbear.py::BugbearTestCase::test_b901",
"tests/test_bugbear.py::BugbearTestCase::test_b950",
"tests/test_bugbear.py::BugbearTestCase::test_selfclean_test_bugbear"
] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2017-02-25T15:47:25Z" | mit |
|
PyCQA__flake8-bugbear-129 | diff --git a/bugbear.py b/bugbear.py
index e45d059..eb96788 100644
--- a/bugbear.py
+++ b/bugbear.py
@@ -174,6 +174,7 @@ class BugBearVisitor(ast.NodeVisitor):
# (MyError, MyError) # duplicate names
# (MyError, BaseException) # everything derives from the Base
# (Exception, TypeError) # builtins where one subclasses another
+ # (IOError, OSError) # IOError is an alias of OSError since Python3.3
# but note that other cases are impractical to hande from the AST.
# We expect this is mostly useful for users who do not have the
# builtin exception hierarchy memorised, and include a 'shadowed'
@@ -181,6 +182,14 @@ class BugBearVisitor(ast.NodeVisitor):
good = sorted(set(names), key=names.index)
if "BaseException" in good:
good = ["BaseException"]
+ # Find and remove aliases exceptions and only leave the primary alone
+ primaries = filter(
+ lambda primary: primary in good, B014.exception_aliases.keys()
+ )
+ for primary in primaries:
+ aliases = B014.exception_aliases[primary]
+ good = list(filter(lambda e: e not in aliases, good))
+
for name, other in itertools.permutations(tuple(good), 2):
if issubclass(
getattr(builtins, name, type), getattr(builtins, other, ())
@@ -639,6 +648,16 @@ B014 = Error(
"Write `except {2}{1}:`, which catches exactly the same exceptions."
)
)
+B014.exception_aliases = {
+ "OSError": {
+ "IOError",
+ "EnvironmentError",
+ "WindowsError",
+ "mmap.error",
+ "socket.error",
+ "select.error",
+ }
+}
# Those could be false positives but it's more dangerous to let them slip
# through if they're not.
| PyCQA/flake8-bugbear | c18db4fa8bdc15f0194541055dd4cf46dbaf18f1 | diff --git a/tests/b014.py b/tests/b014.py
index 4c26794..a3e64e6 100644
--- a/tests/b014.py
+++ b/tests/b014.py
@@ -1,6 +1,6 @@
"""
Should emit:
-B014 - on lines 10, 16, 27, 41, and 48
+B014 - on lines 10, 16, 27, 41, 48, and 55
"""
import re
@@ -48,3 +48,14 @@ try:
except (re.error, re.error):
# Duplicate exception types as attributes
pass
+
+
+try:
+ pass
+except (IOError, EnvironmentError, OSError):
+ # Detect if a primary exception and any its aliases are present.
+ #
+ # Since Python 3.3, IOError, EnvironmentError, WindowsError, mmap.error,
+ # socket.error and select.error are aliases of OSError. See PEP 3151 for
+ # more info.
+ pass
diff --git a/tests/test_bugbear.py b/tests/test_bugbear.py
index bc4c61a..d6161af 100644
--- a/tests/test_bugbear.py
+++ b/tests/test_bugbear.py
@@ -178,6 +178,7 @@ class BugbearTestCase(unittest.TestCase):
B014(27, 0, vars=("MyError, MyError", "", "MyError")),
B014(41, 0, vars=("MyError, BaseException", " as e", "BaseException")),
B014(48, 0, vars=("re.error, re.error", "", "re.error")),
+ B014(55, 0, vars=("IOError, EnvironmentError, OSError", "", "OSError"),),
)
self.assertEqual(errors, expected)
| B014 recommends `except ():`
In the following code:
```
try:
with open(fn) as f:
return f.read().split('\n')
except (OSError, IOError):
pass
```
The new B014 warning shows this:
> B014 Redundant exception types in `except (OSError, IOError):`. Write `except ():`, which catches exactly the same exceptions.
Doesn't seem to make sense to me. Is this a bug :bear:? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_bugbear.py::BugbearTestCase::test_b014"
] | [
"tests/test_bugbear.py::BugbearTestCase::test_b001",
"tests/test_bugbear.py::BugbearTestCase::test_b002",
"tests/test_bugbear.py::BugbearTestCase::test_b003",
"tests/test_bugbear.py::BugbearTestCase::test_b004",
"tests/test_bugbear.py::BugbearTestCase::test_b005",
"tests/test_bugbear.py::BugbearTestCase::test_b006_b008",
"tests/test_bugbear.py::BugbearTestCase::test_b007",
"tests/test_bugbear.py::BugbearTestCase::test_b009_b010",
"tests/test_bugbear.py::BugbearTestCase::test_b011",
"tests/test_bugbear.py::BugbearTestCase::test_b012",
"tests/test_bugbear.py::BugbearTestCase::test_b013",
"tests/test_bugbear.py::BugbearTestCase::test_b301_b302_b305",
"tests/test_bugbear.py::BugbearTestCase::test_b303_b304",
"tests/test_bugbear.py::BugbearTestCase::test_b306",
"tests/test_bugbear.py::BugbearTestCase::test_b901",
"tests/test_bugbear.py::BugbearTestCase::test_b902",
"tests/test_bugbear.py::BugbearTestCase::test_b903",
"tests/test_bugbear.py::BugbearTestCase::test_b950",
"tests/test_bugbear.py::BugbearTestCase::test_selfclean_bugbear",
"tests/test_bugbear.py::BugbearTestCase::test_selfclean_test_bugbear",
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_any_valid_code",
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_site_code"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2020-06-28T00:04:20Z" | mit |
|
PyCQA__flake8-bugbear-161 | diff --git a/bugbear.py b/bugbear.py
index 873fc10..974e5cb 100644
--- a/bugbear.py
+++ b/bugbear.py
@@ -124,8 +124,10 @@ def _to_name_str(node):
# "pkg.mod.error", handling any depth of attribute accesses.
if isinstance(node, ast.Name):
return node.id
- assert isinstance(node, ast.Attribute)
- return _to_name_str(node.value) + "." + node.attr
+ try:
+ return _to_name_str(node.value) + "." + node.attr
+ except AttributeError:
+ return _to_name_str(node.value)
def _typesafe_issubclass(cls, class_or_tuple):
| PyCQA/flake8-bugbear | eb85c48cc32a6a401d22339f675c6310e2e91eef | diff --git a/tests/test_bugbear.py b/tests/test_bugbear.py
index ccce9da..c755fb9 100644
--- a/tests/test_bugbear.py
+++ b/tests/test_bugbear.py
@@ -340,6 +340,20 @@ class TestFuzz(unittest.TestCase):
if f.endswith(".py"):
BugBearChecker(filename=str(Path(dirname) / f))
+ def test_does_not_crash_on_tuple_expansion_in_except_statement(self):
+ # akin to test_does_not_crash_on_any_valid_code
+ # but targets a rare case that's not covered by hypothesmith.from_grammar
+ # see https://github.com/PyCQA/flake8-bugbear/issues/153
+ syntax_tree = ast.parse(
+ "grey_list = (ValueError,)\n"
+ "black_list = (TypeError,)\n"
+ "try:\n"
+ " int('1e3')\n"
+ "except (*grey_list, *black_list):\n"
+ " print('error caught')"
+ )
+ BugBearVisitor(filename="<string>", lines=[]).visit(syntax_tree)
+
if __name__ == "__main__":
unittest.main()
| crash on tuple expansion in try/except block
Running flake8 against the following file produces no report as an exception is raised in bugbear
```python
# dummy.py
grey_list = (ValueError,)
black_list = (TypeError,)
try:
int("1e3")
except (*grey_list, *black_list):
print("error caught")
```
crashes with
```
Traceback (most recent call last):
...
File "/Users/clm/.pyenv/versions/3.8.6/envs/yt_dev/lib/python3.8/site-packages/bugbear.py", line 126, in _to_name_str
assert isinstance(node, ast.Attribute)
AssertionError
```
For information
```
$ flake8 --version
3.8.4 (flake8-bugbear: 20.11.1, mccabe: 0.6.1, pycodestyle: 2.6.0, pyflakes: 2.2.0) CPython 3.8.6 on Darwin
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_tuple_expansion_in_except_statement"
] | [
"tests/test_bugbear.py::BugbearTestCase::test_b001",
"tests/test_bugbear.py::BugbearTestCase::test_b002",
"tests/test_bugbear.py::BugbearTestCase::test_b003",
"tests/test_bugbear.py::BugbearTestCase::test_b004",
"tests/test_bugbear.py::BugbearTestCase::test_b005",
"tests/test_bugbear.py::BugbearTestCase::test_b006_b008",
"tests/test_bugbear.py::BugbearTestCase::test_b007",
"tests/test_bugbear.py::BugbearTestCase::test_b009_b010",
"tests/test_bugbear.py::BugbearTestCase::test_b011",
"tests/test_bugbear.py::BugbearTestCase::test_b012",
"tests/test_bugbear.py::BugbearTestCase::test_b013",
"tests/test_bugbear.py::BugbearTestCase::test_b014",
"tests/test_bugbear.py::BugbearTestCase::test_b015",
"tests/test_bugbear.py::BugbearTestCase::test_b016",
"tests/test_bugbear.py::BugbearTestCase::test_b301_b302_b305",
"tests/test_bugbear.py::BugbearTestCase::test_b303_b304",
"tests/test_bugbear.py::BugbearTestCase::test_b306",
"tests/test_bugbear.py::BugbearTestCase::test_b901",
"tests/test_bugbear.py::BugbearTestCase::test_b902",
"tests/test_bugbear.py::BugbearTestCase::test_b902_py38",
"tests/test_bugbear.py::BugbearTestCase::test_b903",
"tests/test_bugbear.py::BugbearTestCase::test_b950",
"tests/test_bugbear.py::BugbearTestCase::test_selfclean_bugbear",
"tests/test_bugbear.py::BugbearTestCase::test_selfclean_test_bugbear",
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_any_valid_code",
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_site_code"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2021-03-05T17:25:15Z" | mit |
|
PyCQA__flake8-bugbear-173 | diff --git a/bugbear.py b/bugbear.py
index b881b8b..40708b9 100644
--- a/bugbear.py
+++ b/bugbear.py
@@ -694,6 +694,13 @@ B008.immutable_calls = {
"frozenset",
"types.MappingProxyType",
"MappingProxyType",
+ "re.compile",
+ "operator.attrgetter",
+ "operator.itemgetter",
+ "operator.methodcaller",
+ "attrgetter",
+ "itemgetter",
+ "methodcaller",
}
B009 = Error(
message=(
| PyCQA/flake8-bugbear | 63239e09ededc6efe841555376f288b115154953 | diff --git a/tests/b006_b008.py b/tests/b006_b008.py
index e60e968..40bf797 100644
--- a/tests/b006_b008.py
+++ b/tests/b006_b008.py
@@ -1,7 +1,10 @@
import collections
import logging
+import operator
+import re
import time
import types
+from operator import attrgetter, itemgetter, methodcaller
from types import MappingProxyType
@@ -98,3 +101,21 @@ def float_int_is_wrong(value=float(3)):
def float_str_not_inf_or_nan_is_wrong(value=float("3.14")):
pass
+
+
+def re_compile_ok(value=re.compile("foo")):
+ pass
+
+
+def operators_ok(
+ v=operator.attrgetter("foo"),
+ v2=operator.itemgetter("foo"),
+ v3=operator.methodcaller("foo"),
+):
+ pass
+
+
+def operators_ok_unqualified(
+ v=attrgetter("foo"), v2=itemgetter("foo"), v3=methodcaller("foo")
+):
+ pass
diff --git a/tests/test_bugbear.py b/tests/test_bugbear.py
index c6e31bb..428d313 100644
--- a/tests/test_bugbear.py
+++ b/tests/test_bugbear.py
@@ -99,15 +99,15 @@ class BugbearTestCase(unittest.TestCase):
self.assertEqual(
errors,
self.errors(
- B006(26, 24),
- B006(30, 29),
- B006(34, 19),
- B006(38, 19),
- B006(42, 31),
- B008(51, 38),
- B006(67, 32),
- B008(95, 29),
- B008(99, 44),
+ B006(29, 24),
+ B006(33, 29),
+ B006(37, 19),
+ B006(41, 19),
+ B006(45, 31),
+ B008(54, 38),
+ B006(70, 32),
+ B008(98, 29),
+ B008(102, 44),
),
)
| B008: Whitelist operator.{attrgetter,itemgetter,methodcaller} default args
Calling those functions in default args is perfectly fine and *not* calling them would most likely be a bug. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_bugbear.py::BugbearTestCase::test_b006_b008"
] | [
"tests/test_bugbear.py::BugbearTestCase::test_b001",
"tests/test_bugbear.py::BugbearTestCase::test_b002",
"tests/test_bugbear.py::BugbearTestCase::test_b003",
"tests/test_bugbear.py::BugbearTestCase::test_b004",
"tests/test_bugbear.py::BugbearTestCase::test_b005",
"tests/test_bugbear.py::BugbearTestCase::test_b007",
"tests/test_bugbear.py::BugbearTestCase::test_b009_b010",
"tests/test_bugbear.py::BugbearTestCase::test_b011",
"tests/test_bugbear.py::BugbearTestCase::test_b012",
"tests/test_bugbear.py::BugbearTestCase::test_b013",
"tests/test_bugbear.py::BugbearTestCase::test_b014",
"tests/test_bugbear.py::BugbearTestCase::test_b015",
"tests/test_bugbear.py::BugbearTestCase::test_b016",
"tests/test_bugbear.py::BugbearTestCase::test_b017",
"tests/test_bugbear.py::BugbearTestCase::test_b301_b302_b305",
"tests/test_bugbear.py::BugbearTestCase::test_b303_b304",
"tests/test_bugbear.py::BugbearTestCase::test_b306",
"tests/test_bugbear.py::BugbearTestCase::test_b901",
"tests/test_bugbear.py::BugbearTestCase::test_b902",
"tests/test_bugbear.py::BugbearTestCase::test_b902_py38",
"tests/test_bugbear.py::BugbearTestCase::test_b903",
"tests/test_bugbear.py::BugbearTestCase::test_b950",
"tests/test_bugbear.py::BugbearTestCase::test_selfclean_bugbear",
"tests/test_bugbear.py::BugbearTestCase::test_selfclean_test_bugbear",
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_any_valid_code",
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_site_code",
"tests/test_bugbear.py::TestFuzz::test_does_not_crash_on_tuple_expansion_in_except_statement"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2021-06-19T16:19:27Z" | mit |