instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
int64 0
0
| environment_setup_commit
stringclasses 89
values | FAIL_TO_PASS
sequencelengths 1
4.94k
| PASS_TO_PASS
sequencelengths 0
7.82k
| meta
dict | created_at
unknown | license
stringclasses 8
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Jc2k__aiohomekit-233 | diff --git a/aiohomekit/zeroconf.py b/aiohomekit/zeroconf.py
index 51b6b48..030b99c 100644
--- a/aiohomekit/zeroconf.py
+++ b/aiohomekit/zeroconf.py
@@ -25,7 +25,7 @@ from ipaddress import ip_address
import logging
import async_timeout
-from zeroconf import ServiceListener, ServiceStateChange, Zeroconf
+from zeroconf import DNSPointer, ServiceListener, ServiceStateChange, Zeroconf
from zeroconf.asyncio import AsyncServiceBrowser, AsyncServiceInfo, AsyncZeroconf
from aiohomekit.characteristic_cache import CharacteristicCacheType
@@ -216,12 +216,16 @@ class ZeroconfController(AbstractController):
self._async_zeroconf_instance, self.hap_type
)
self._browser.service_state_changed.register_handler(self._handle_service)
+ await self._async_update_from_cache(zc)
+ return self
+
+ async def _async_update_from_cache(self, zc: Zeroconf) -> None:
+ """Load the records from the cache."""
infos = [
- AsyncServiceInfo(self.hap_type, record.name)
- for record in zc.cache.get_all_by_details(self.hap_type, TYPE_PTR, CLASS_IN)
+ AsyncServiceInfo(self.hap_type, record.alias)
+ for record in self._async_get_ptr_records(zc)
]
-
tasks = []
for info in infos:
if info.load_from_cache(self._async_zeroconf_instance.zeroconf):
@@ -232,7 +236,9 @@ class ZeroconfController(AbstractController):
if tasks:
await asyncio.gather(*tasks)
- return self
+ def _async_get_ptr_records(self, zc: Zeroconf) -> list[DNSPointer]:
+ """Return all PTR records for the HAP type."""
+ return zc.cache.async_all_by_details(self.hap_type, TYPE_PTR, CLASS_IN)
def _handle_service(
self,
| Jc2k/aiohomekit | 0b846304e45bb9b8b6118abedddadac4e8d938ff | diff --git a/tests/conftest.py b/tests/conftest.py
index 7472d30..2fbb52f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -10,7 +10,7 @@ from unittest import mock
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from zeroconf import SignalRegistrationInterface
+from zeroconf import DNSCache, SignalRegistrationInterface
from aiohomekit import Controller
from aiohomekit.controller.ip import IpPairing
@@ -70,6 +70,7 @@ def mock_asynczeroconf():
zc.async_register_service = AsyncMock()
zc.async_close = AsyncMock()
zeroconf = MagicMock(name="zeroconf_mock")
+ zeroconf.cache = DNSCache()
zeroconf.async_wait_for_start = AsyncMock()
zeroconf.listeners = [AsyncServiceBrowserStub()]
zc.zeroconf = zeroconf
diff --git a/tests/test_controller_ip_controller.py b/tests/test_controller_ip_controller.py
index ea8f477..385d397 100644
--- a/tests/test_controller_ip_controller.py
+++ b/tests/test_controller_ip_controller.py
@@ -1,10 +1,12 @@
from collections.abc import Iterable
import contextlib
import socket
-from unittest.mock import MagicMock, patch
+from typing import Optional
+from unittest.mock import patch
import pytest
-from zeroconf.asyncio import AsyncServiceInfo
+from zeroconf import DNSQuestionType, Zeroconf
+from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf
from aiohomekit.characteristic_cache import CharacteristicCacheMemory
from aiohomekit.controller.ip.controller import IpController
@@ -12,9 +14,23 @@ from aiohomekit.exceptions import AccessoryNotFoundError
from aiohomekit.model.categories import Categories
from aiohomekit.model.status_flags import StatusFlags
+HAP_TYPE_TCP = "_hap._tcp.local."
+HAP_TYPE_UDP = "_hap._udp.local."
+CLASS_IN = 1
+TYPE_PTR = 12
[email protected]
-def _install_mock_service_info(mock_asynczeroconf) -> Iterable[AsyncServiceInfo]:
+
+class MockedAsyncServiceInfo(AsyncServiceInfo):
+ async def async_request(
+ self,
+ zc: "Zeroconf",
+ timeout: float,
+ question_type: Optional[DNSQuestionType] = None,
+ ) -> bool:
+ return self.load_from_cache(zc)
+
+
+def _get_mock_service_info():
desc = {
b"c#": b"1",
b"id": b"00:00:01:00:00:02",
@@ -23,10 +39,9 @@ def _install_mock_service_info(mock_asynczeroconf) -> Iterable[AsyncServiceInfo]
b"ci": b"5",
b"sf": b"0",
}
-
- info = AsyncServiceInfo(
- "_hap._tcp.local.",
- "foo._hap._tcp.local.",
+ return MockedAsyncServiceInfo(
+ HAP_TYPE_TCP,
+ f"foo.{HAP_TYPE_TCP}",
addresses=[socket.inet_aton("127.0.0.1")],
port=1234,
properties=desc,
@@ -34,26 +49,33 @@ def _install_mock_service_info(mock_asynczeroconf) -> Iterable[AsyncServiceInfo]
priority=0,
)
- mock_asynczeroconf.zeroconf.cache = MagicMock(
- get_all_by_details=MagicMock(
- return_value=[
- MagicMock(alias="foo._hap._tcp.local."),
- ]
- )
+
[email protected]
+def _install_mock_service_info(
+ mock_asynczeroconf: AsyncZeroconf, info: MockedAsyncServiceInfo
+) -> Iterable[AsyncServiceInfo]:
+ zeroconf: Zeroconf = mock_asynczeroconf.zeroconf
+ zeroconf.cache.async_add_records(
+ [*info.dns_addresses(), info.dns_pointer(), info.dns_service(), info.dns_text()]
+ )
+
+ assert (
+ zeroconf.cache.async_all_by_details(HAP_TYPE_TCP, TYPE_PTR, CLASS_IN)
+ is not None
)
- with patch("aiohomekit.zeroconf.AsyncServiceInfo", side_effect=[info]):
- yield info
+ with patch("aiohomekit.zeroconf.AsyncServiceInfo", MockedAsyncServiceInfo):
+ yield
-async def test_discover_find_one(mock_asynczeroconf):
+async def test_discover_find_one(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
-
- with _install_mock_service_info(mock_asynczeroconf):
+ with _install_mock_service_info(mock_asynczeroconf, _get_mock_service_info()):
async with controller:
result = await controller.async_find("00:00:01:00:00:02")
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
assert result.description.id == "00:00:01:00:00:02"
assert result.description.category == Categories.LIGHTBULB
@@ -64,15 +86,18 @@ async def test_discover_find_one(mock_asynczeroconf):
assert result.paired is True
-async def test_discover_find_one_unpaired(mock_asynczeroconf):
+async def test_discover_find_one_unpaired(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf) as svc:
- svc.properties[b"sf"] = b"1"
+ svc = _get_mock_service_info()
+ svc.properties[b"sf"] = b"1"
+ svc._set_properties(svc.properties)
+ with _install_mock_service_info(mock_asynczeroconf, svc):
async with controller:
result = await controller.async_find("00:00:01:00:00:02")
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
assert result.description.id == "00:00:01:00:00:02"
assert result.description.status_flags == StatusFlags.UNPAIRED
@@ -86,56 +111,73 @@ async def test_discover_find_none(mock_asynczeroconf):
async with controller:
with pytest.raises(AccessoryNotFoundError):
- await controller.async_find("00:00:00:00:00:00")
+ await controller.async_find("00:00:00:00:00:00", timeout=0.001)
-async def test_find_device_id_case_lower(mock_asynczeroconf):
+async def test_find_device_id_case_lower(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- svc_info.properties[b"id"] = b"aa:aa:aa:aa:aa:aa"
+ svc_info_1 = _get_mock_service_info()
+ svc_info_1.properties[b"id"] = b"aa:aa:aa:aa:aa:aa"
+ svc_info_1._set_properties(svc_info_1.properties)
+ with _install_mock_service_info(mock_asynczeroconf, svc_info_1):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
res = await controller.async_find("AA:AA:AA:AA:AA:AA")
assert res.description.id == "aa:aa:aa:aa:aa:aa"
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- svc_info.properties[b"id"] = b"aa:aa:aa:aa:aa:aa"
+ svc_info_2 = _get_mock_service_info()
+ svc_info_2.properties[b"id"] = b"aa:aa:aa:aa:aa:aa"
+ svc_info_2._set_properties(svc_info_2.properties)
+
+ with _install_mock_service_info(mock_asynczeroconf, svc_info_2):
+ svc_info_2.properties[b"id"] = b"aa:aa:aa:aa:aa:aa"
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
res = await controller.async_find("aa:aa:aa:aa:aa:aa")
assert res.description.id == "aa:aa:aa:aa:aa:aa"
-async def test_find_device_id_case_upper(mock_asynczeroconf):
+async def test_find_device_id_case_upper(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- svc_info.properties[b"id"] = b"AA:AA:aa:aa:AA:AA"
+ svc_info = _get_mock_service_info()
+ svc_info.properties[b"id"] = b"AA:AA:aa:aa:AA:AA"
+ svc_info._set_properties(svc_info.properties)
+ with _install_mock_service_info(mock_asynczeroconf, svc_info):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
res = await controller.async_find("AA:AA:AA:AA:AA:AA")
assert res.description.id == "aa:aa:aa:aa:aa:aa"
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- svc_info.properties[b"id"] = b"AA:AA:aa:aa:AA:AA"
+ svc_info = _get_mock_service_info()
+ svc_info.properties[b"id"] = b"AA:AA:aa:aa:AA:AA"
+ svc_info._set_properties(svc_info.properties)
+
+ with _install_mock_service_info(mock_asynczeroconf, svc_info):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
res = await controller.async_find("aa:aa:aa:aa:aa:aa")
assert res.description.id == "aa:aa:aa:aa:aa:aa"
-async def test_discover_discover_one(mock_asynczeroconf):
+async def test_discover_discover_one(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf):
+ srv_info = _get_mock_service_info()
+ with _install_mock_service_info(mock_asynczeroconf, srv_info):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
results = [d async for d in controller.async_discover()]
assert results[0].description.id == "00:00:01:00:00:02"
@@ -156,58 +198,75 @@ async def test_discover_none(mock_asynczeroconf):
assert results == []
-async def test_discover_missing_csharp(mock_asynczeroconf):
+async def test_discover_missing_csharp(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- del svc_info.properties[b"c#"]
+ svc_info = _get_mock_service_info()
+ del svc_info.properties[b"c#"]
+ svc_info._set_properties(svc_info.properties)
+
+ with _install_mock_service_info(mock_asynczeroconf, svc_info):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
results = [d async for d in controller.async_discover()]
assert results[0].description.id == "00:00:01:00:00:02"
assert results[0].description.config_num == 0
-async def test_discover_csharp_case(mock_asynczeroconf):
+async def test_discover_csharp_case(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- del svc_info.properties[b"c#"]
- svc_info.properties[b"C#"] = b"1"
+ svc_info = _get_mock_service_info()
+ del svc_info.properties[b"c#"]
+ svc_info.properties[b"C#"] = b"1"
+ svc_info._set_properties(svc_info.properties)
+ with _install_mock_service_info(mock_asynczeroconf, svc_info):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
results = [d async for d in controller.async_discover()]
assert results[0].description.config_num == 1
-async def test_discover_device_id_case_lower(mock_asynczeroconf):
+async def test_discover_device_id_case_lower(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- svc_info.properties[b"id"] = b"aa:aa:aa:aa:aa:aa"
+ svc_info = _get_mock_service_info()
+ svc_info.properties[b"id"] = b"aa:aa:aa:aa:aa:aa"
+ svc_info._set_properties(svc_info.properties)
+
+ with _install_mock_service_info(mock_asynczeroconf, svc_info):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
+
results = [d async for d in controller.async_discover()]
assert results[0].description.id == "aa:aa:aa:aa:aa:aa"
-async def test_discover_device_id_case_upper(mock_asynczeroconf):
+async def test_discover_device_id_case_upper(mock_asynczeroconf: AsyncZeroconf):
controller = IpController(
char_cache=CharacteristicCacheMemory(), zeroconf_instance=mock_asynczeroconf
)
- with _install_mock_service_info(mock_asynczeroconf) as svc_info:
- svc_info.properties[b"id"] = b"AA:AA:aa:aa:AA:AA"
+ svc_info = _get_mock_service_info()
+ svc_info.properties[b"id"] = b"AA:AA:aa:aa:AA:AA"
+ svc_info._set_properties(svc_info.properties)
+
+ with _install_mock_service_info(mock_asynczeroconf, svc_info):
async with controller:
+ await controller._async_update_from_cache(mock_asynczeroconf.zeroconf)
+
results = [d async for d in controller.async_discover()]
assert results[0].description.id == "aa:aa:aa:aa:aa:aa"
| Invalid HomeKit Zeroconf record
Getting the following in home assistant logs...
022-11-02 17:33:30.752 DEBUG (MainThread) [aiohomekit.protocol.tlv] sending [
1 (Identifier): (2 bytes/<class 'bytes'>) b'\x01\x00'
2 (Salt): (1 bytes/<class 'bytes'>) b'\x01'
]
2022-11-02 17:33:33.767 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address
2022-11-02 17:33:33.767 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address
2022-11-02 17:33:33.767 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address
2022-11-02 17:33:33.768 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address
2022-11-02 17:33:33.768 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address
2022-11-02 17:33:33.768 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address
2022-11-02 17:33:33.768 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address
2022-11-02 17:33:33.769 DEBUG (MainThread) [aiohomekit.zeroconf] _hap._tcp.local.: Not a valid homekit device: Invalid HomeKit Zeroconf record: Missing address | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_controller_ip_controller.py::test_discover_find_one",
"tests/test_controller_ip_controller.py::test_discover_find_one_unpaired",
"tests/test_controller_ip_controller.py::test_find_device_id_case_lower",
"tests/test_controller_ip_controller.py::test_find_device_id_case_upper",
"tests/test_controller_ip_controller.py::test_discover_discover_one",
"tests/test_controller_ip_controller.py::test_discover_missing_csharp",
"tests/test_controller_ip_controller.py::test_discover_csharp_case",
"tests/test_controller_ip_controller.py::test_discover_device_id_case_lower",
"tests/test_controller_ip_controller.py::test_discover_device_id_case_upper"
] | [
"tests/test_controller_ip_controller.py::test_discover_find_none",
"tests/test_controller_ip_controller.py::test_discover_none"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2022-11-03T00:42:54Z" | apache-2.0 |
|
JelleZijlstra__ast_decompiler-34 | diff --git a/CHANGELOG b/CHANGELOG
index 62b8063..3d37564 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,4 +1,5 @@
unreleased
+- Preserve literal newlines in docstrings
- Fix decompilation of complex infinity
- Add support for Python 3.10 pattern matching
- Fix incorrect decompilation of lambdas in comprehension guards on Python 3.9 and higher
diff --git a/ast_decompiler/decompiler.py b/ast_decompiler/decompiler.py
index d21c412..e642df8 100644
--- a/ast_decompiler/decompiler.py
+++ b/ast_decompiler/decompiler.py
@@ -834,13 +834,21 @@ class Decompiler(ast.NodeVisitor):
def write_string(self, string_value: str, kind: Optional[str] = None) -> None:
if kind is not None:
self.write(kind)
+ if isinstance(self.get_parent_node(), ast.Expr) and '"""' not in string_value:
+ self.write('"""')
+ s = string_value.encode("unicode-escape").decode("ascii")
+ s = s.replace("\\n", "\n").replace("\\r", "\r")
+ self.write(s)
+ self.write('"""')
+ return
if self.has_parent_of_type(ast.FormattedValue):
delimiter = '"'
else:
delimiter = "'"
self.write(delimiter)
s = string_value.encode("unicode-escape").decode("ascii")
- self.write(s.replace(delimiter, "\\" + delimiter))
+ s = s.replace(delimiter, "\\" + delimiter)
+ self.write(s)
self.write(delimiter)
def visit_FormattedValue(self, node: ast.FormattedValue) -> None:
| JelleZijlstra/ast_decompiler | f347ea7ddf9162d5b0e89edec73b4af0fa5fc9f7 | diff --git a/tests/test_basic.py b/tests/test_basic.py
index 5b39dbc..72a8ecb 100644
--- a/tests/test_basic.py
+++ b/tests/test_basic.py
@@ -1,6 +1,6 @@
import ast
from ast_decompiler import decompile
-from .tests import check, only_on_version
+from .tests import assert_decompiles, check, only_on_version
def test_non_module() -> None:
@@ -380,6 +380,24 @@ def test_Str() -> None:
b'foo'
"""
)
+ check('"a\\nb"')
+ assert_decompiles(
+ '''def f():
+ """Doc.
+
+ String.
+
+ """
+''',
+ '''
+def f():
+ """Doc.
+
+ String.
+
+ """
+''',
+ )
def test_Attribute() -> None:
| Docstrings are not preserved correctly.
Currently it's impossible to correctly preserve docstrings. This test shows the issue:
```
from .tests import assert_decompiles
def test_docstrings():
assert_decompiles('''
def a():
"""
Docstring.
"""
return 1
''', '''def a():
"""
Docstring.
"""
return 1
''', indentation=1)
```
Result:
```
>>> expected
def a():
"""
Docstring.
"""
return 1
>>> actual
def a():
'\n Docstring.\n '
return 1
```
Thoughts: when processing the function body in `write_function_def` we should look at the very first node in the function's body. If the node is of type Expr, and contains a string, then it's most likely docstring of this function and should be processed accordingly. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_basic.py::test_Str"
] | [
"tests/test_basic.py::test_non_module",
"tests/test_basic.py::test_FunctionDef",
"tests/test_basic.py::test_ClassDef",
"tests/test_basic.py::test_Return",
"tests/test_basic.py::test_Delete",
"tests/test_basic.py::test_Assign",
"tests/test_basic.py::test_AugAssign",
"tests/test_basic.py::test_Print",
"tests/test_basic.py::test_For",
"tests/test_basic.py::test_While",
"tests/test_basic.py::test_If",
"tests/test_basic.py::test_With",
"tests/test_basic.py::test_Raise",
"tests/test_basic.py::test_Raise_old_syntax",
"tests/test_basic.py::test_TryExcept",
"tests/test_basic.py::test_TryFinally",
"tests/test_basic.py::test_Assert",
"tests/test_basic.py::test_Import",
"tests/test_basic.py::test_ImportFrom",
"tests/test_basic.py::test_Exec",
"tests/test_basic.py::test_Global",
"tests/test_basic.py::test_Expr",
"tests/test_basic.py::test_Pass",
"tests/test_basic.py::test_Break",
"tests/test_basic.py::test_Continue",
"tests/test_basic.py::test_BoolOp",
"tests/test_basic.py::test_Binop",
"tests/test_basic.py::test_UnaryOp",
"tests/test_basic.py::test_Lambda",
"tests/test_basic.py::test_IfExp",
"tests/test_basic.py::test_Dict",
"tests/test_basic.py::test_Set",
"tests/test_basic.py::test_ListComp",
"tests/test_basic.py::test_SetComp",
"tests/test_basic.py::test_DictComp",
"tests/test_basic.py::test_GeneratorExp",
"tests/test_basic.py::test_Yield",
"tests/test_basic.py::test_Yield_in_print",
"tests/test_basic.py::test_Compare",
"tests/test_basic.py::test_Call",
"tests/test_basic.py::test_Repr",
"tests/test_basic.py::test_Num",
"tests/test_basic.py::test_longs",
"tests/test_basic.py::test_Attribute",
"tests/test_basic.py::test_Subscript",
"tests/test_basic.py::test_Name",
"tests/test_basic.py::test_List",
"tests/test_basic.py::test_Tuple",
"tests/test_basic.py::test_Slice",
"tests/test_basic.py::test_ExtSlice",
"tests/test_basic.py::test_Ellipsis",
"tests/test_basic.py::test_files"
] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2022-05-11T03:53:35Z" | apache-2.0 |
|
JeschkeLab__DeerLab-88 | diff --git a/deerlab/fitsignal.py b/deerlab/fitsignal.py
index f1e66978..61466142 100644
--- a/deerlab/fitsignal.py
+++ b/deerlab/fitsignal.py
@@ -16,7 +16,7 @@ from deerlab.utils import isempty, goodness_of_fit, Jacobian
def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
dd_par0=None, bg_par0=None, ex_par0=None, verbose=False,
dd_lb=None, bg_lb=None, ex_lb=None, dd_ub=None, bg_ub=None, ex_ub=None,
- weights=1, uqanalysis=True, regparam='aic', regtype = 'tikhonov'):
+ weights=1, uqanalysis=True, uq='covariance', regparam='aic', regtype = 'tikhonov'):
r"""
Fits a dipolar model to the experimental signal ``V`` with time axis ``t``, using
distance axis ``r``. The model is specified by the distance distribution (dd),
@@ -81,6 +81,14 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
If a model does not require parameters or are to be determined automatically it can be omitted or specified
as ``None`` (default).
+ uq : string or list, optional
+ Type of uncertainty quantification analysis. Any ``UncertQuant`` output returned by this function will
+ be adjusted accordingly. The options are:
+
+ * ``'covariance'`` - Covariance-based uncertainty quantification. Fast, but approximate.
+ * ``'bootstrap'`` - Bootstrapped uncertainty quantification. Slow, but accurate. By default, 1000 bootstrap
+ samples are used. Alternatively, a different number can be specified as follows ``uq=['bootstrap',Nsamples]``.
+
weights : array_like, optional
Array of weighting coefficients for the individual signals in global fitting,
the default is all weighted equally.
@@ -239,6 +247,18 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
if len(ex_model)!=nSignals:
ex_model = ex_model*nSignals
+ # Default bootstrap samples
+ bootsamples = 1000
+ if isinstance(uq, str):
+ uq = [uq]
+ if uq[0]!='bootstrap' and uq[0]!='covariance':
+ raise KeyError("Uncertainty quantification must be either 'covariance' or 'bootstrap'.")
+
+ if uq[0]=='bootstrap':
+ # OVerride default if user has specified bootstraped samples
+ if len(uq)>1: bootsamples = uq[1]
+ uq = uq[0]
+
# Combine input boundary and start conditions
par0 = [[] if par0_i is None else par0_i for par0_i in [dd_par0,bg_par0,ex_par0]]
lb = [[] if lb_i is None else lb_i for lb_i in [dd_lb,bg_lb,ex_lb]]
@@ -334,11 +354,11 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
K_ = dl.dipolarkernel(t[iSignal],r,pathways,Bfcn)
Ks.append(K_)
Bs.append(B_)
-
+
return Ks, Bs
# =========================================================================
- def splituq(full_uq,scales,Kfit=None):
+ def splituq(full_uq,Pfit,Vfit,Bfit,parfit_,Kfit,scales=1):
# =========================================================================
"""
Uncertainty quantification
@@ -392,6 +412,11 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
# ----------------------------------
nonneg = np.zeros_like(r)
if parametricDistribution:
+ # Prepare parametric model
+ if includeForeground:
+ Pfcn = lambda par: dd_model(r,par[ddidx])
+ else:
+ Pfcn = lambda _: np.ones_like(r)/np.trapz(np.ones_like(r),r)
Pfit_uq = paruq.propagate(Pfcn,nonneg,[])
else:
subcovmat = covmat[np.ix_(Pfreeidx,Pfreeidx)]
@@ -455,14 +480,39 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
else:
Vmod_uq.append([None])
- return Vfit_uq,Pfit_uq,Bfit_uq,Vmod_uq,Vunmod_uq,paruq_bg,paruq_ex,paruq_dd
+ return Vfit_uq, Pfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd
# =========================================================================
- OnlyRegularization = np.all(~parametricDistribution & ~includeExperiment & ~includeBackground)
- OnlyParametric = not OnlyRegularization and (parametricDistribution or not includeForeground)
+ def calculate_Vmod_Vunmod(parfit,Vfit,Bfit,scales):
+ # =========================================================================
+ " Calculation of the (un)modulated components of the dipolar signal"
+
+ # Calculate the unmodulated contribution (Vunmod)
+ # --------------------------------------------------------
+ Vunmod = []
+ for j in range(nSignals):
+ if includeExperiment[j]:
+ Lam0 = ex_model[j](parfit[exidx[j]])[0][0]
+ if includeBackground[j]:
+ Vunmod.append(Lam0*np.array(Bfit[j]))
+ else:
+ Vunmod.append(np.full_like(t[j],scales[j]*Lam0))
+ else:
+ Vunmod.append(np.zeros_like(t[j]))
+
+ # Calculate the modulated contribution (Vmod)
+ # --------------------------------------------------------
+ Vmod = []
+ for j in range(nSignals):
+ Vmod.append(Vfit[i] - Vunmod[i])
- if OnlyRegularization:
+ return Vmod, Vunmod
+ # =========================================================================
+
+ def regularization_analysis(Vexp):
+ # =========================================================================
+ " Analysis workflow for non-parametric models based on regularized least-squares"
# Use basic dipolar kernel
Ks = [dl.dipolarkernel(ts,r) for ts in t]
@@ -472,20 +522,27 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
Pfit = fit.P
Pfit_uq = fit.uncertainty
scales = np.atleast_1d(fit.scale)
-
alphaopt = fit.regparam
# Get fitted models
Vfit = [scale*K@Pfit for K,scale in zip(Ks,scales)]
Bfit = [scale*np.ones_like(V) for V,scale in zip(Vexp,scales)]
+ Vmod, Vunmod = calculate_Vmod_Vunmod(None,Vfit,Bfit,scales)
# No parameters
- parfit_ = np.asarray([None])
- if uqanalysis:
- Vfit_uq, Pfit_uq, Bfit_uq,Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd = splituq(Pfit_uq,scales,Ks)
+ parfit = np.asarray([None])
+
+ if uqanalysis and uq=='covariance':
+ Vfit_uq, Pfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd = splituq(Pfit_uq,Pfit,Vfit,Bfit,parfit,Ks,scales)
+ return fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit, Pfit_uq, Vfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd, scales, alphaopt
+ else:
+ return fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit, scales, alphaopt
+ # =========================================================================
+
+ def nonlinear_lsq_analysis(Vexp):
+ # =========================================================================
+ " Analysis workflow for fully parametric models based on nonlinear least-squares"
- elif OnlyParametric:
-
# Prepare the full-parametric model
if includeForeground:
Pfcn = lambda par: dd_model(r,par[ddidx])
@@ -495,30 +552,37 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
# Non-linear parametric fit
fit = dl.fitparamodel(Vexp,Vmodel,par0,lb,ub,weights=weights,uqanalysis=uqanalysis)
- parfit_ = fit.param
+ parfit = fit.param
param_uq = fit.uncertainty
- scales = fit.scale
+ scales = np.atleast_1d(fit.scale)
alphaopt = None
# Get fitted models
- Vfit = Vmodel(parfit_)
- _,Bfit = multiPathwayModel(parfit_)
+ Vfit = Vmodel(parfit)
+ _,Bfit = multiPathwayModel(parfit)
if includeForeground:
- Pfit = Pfcn(parfit_)
+ Pfit = Pfcn(parfit)
else:
Pfit = []
if type(Vfit) is not list:
Vfit = [Vfit]
- if type(scales) is not list:
- scales = [scales]
+ if type(Bfit) is not list:
+ Bfit = [Bfit]
Bfit = [scale*B for B,scale in zip(Bfit,scales)]
- Vfit = [V*scale for scale,V in zip(scales,Vfit) ]
+ Vfit = [scale*V for V,scale in zip(Vfit,scales) ]
+ Vmod, Vunmod = calculate_Vmod_Vunmod(parfit,Vfit,Bfit,scales)
+
+ if uqanalysis and uq=='covariance':
+ Vfit_uq, Pfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd = splituq(param_uq,Pfit,Vfit,Bfit,parfit,None, scales)
+ return fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit, Pfit_uq, Vfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd,scales,alphaopt
+ else:
+ return fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit, scales, alphaopt
+ # =========================================================================
+
+ def separable_nonlinear_lsq_analysis(Vexp):
+ # =========================================================================
+ " Analysis workflow for semiparametric models based on separable nonlinear least-squares"
- if uqanalysis:
- Vfit_uq, Pfit_uq, Bfit_uq,Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd = splituq(param_uq,scales)
-
- else:
-
# Non-negativity constraint on distributions
lbl = np.zeros_like(r)
@@ -528,41 +592,78 @@ def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer,
# Separable non-linear least squares (SNNLS)
fit = dl.snlls(Vexp_,lambda par: multiPathwayModel(par)[0],par0,lb,ub,lbl, reg=True,
regparam=regparam, uqanalysis=uqanalysis, weights=weights)
- parfit_ = fit.nonlin
+ parfit = fit.nonlin
Pfit = fit.lin
snlls_uq = fit.uncertainty
alphaopt = fit.regparam
scales = [prescales[i]*np.trapz(Pfit,r) for i in range(nSignals)]
# Get the fitted models
- Kfit,Bfit = multiPathwayModel(parfit_)
+ Kfit,Bfit = multiPathwayModel(parfit)
Bfit = [scale*B for B,scale in zip(Bfit,scales)]
Vfit = [scale*K@Pfit for K,scale in zip(Kfit,scales)]
+ Vmod, Vunmod = calculate_Vmod_Vunmod(parfit,Vfit,Bfit,scales)
- if uqanalysis:
- Vfit_uq, Pfit_uq, Bfit_uq,Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd = splituq(snlls_uq,scales,Kfit)
-
- # Calculate the unmodulated contribution (Vunmod)
- # --------------------------------------------------------
- Vunmod = []
- for j in range(nSignals):
- if includeExperiment[j]:
- Lam0 = ex_model[j](parfit_[exidx[j]])[0][0]
- if includeBackground[j]:
- Vunmod.append(Lam0*np.array(Bfit[j]) )
- else:
- print(ex_model[j](parfit_[exidx[j]]))
- print(scales)
- Vunmod.append(np.full_like(t[j],scales[j]*Lam0))
+ if uqanalysis and uq=='covariance':
+ Vfit_uq, Pfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd = splituq(snlls_uq, Pfit, Vfit, Bfit, parfit, Kfit, scales)
+ return fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit, Pfit_uq, Vfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd,scales,alphaopt
else:
- Vunmod.append(np.zeros_like(t[j]))
+ return fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit, scales, alphaopt
+ # =========================================================================
-
- # Calculate the modulated contribution (Vmod)
- # --------------------------------------------------------
- Vmod = []
- for j in range(nSignals):
- Vmod.append(Vfit[i] - Vunmod[i])
+ # Analyze the data
+ # ----------------------
+
+ # Determine type of model
+ nonparametric = np.all(~parametricDistribution & ~includeExperiment & ~includeBackground)
+ fullparametric = not nonparametric and (parametricDistribution or not includeForeground)
+ semiparametric = not nonparametric and not fullparametric
+
+ # Choose appropiate analysis for type of model
+ if nonparametric:
+ analysis = regularization_analysis
+ elif fullparametric:
+ analysis = nonlinear_lsq_analysis
+ elif semiparametric:
+ analysis = separable_nonlinear_lsq_analysis
+
+ # Run the analysis
+ results = analysis(Vexp)
+
+ # Unpack results
+ if uqanalysis and uq=='covariance':
+ fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit_, Pfit_uq, Vfit_uq, Bfit_uq, Vmod_uq, Vunmod_uq, paruq_bg, paruq_ex, paruq_dd, scales, alphaopt = results
+ else:
+ fit, Pfit, Vfit, Bfit, Vmod, Vunmod, parfit_, scales, alphaopt = results
+
+ # Bootstrapping uncertainty quantification
+ # -----------------------------------------
+ if uqanalysis and uq=='bootstrap':
+
+ def bootstrapfcn(Vexp):
+ # ======================================================
+ # Fit the data
+ _, Pfit_, Vfit_, Bfit_, Vmod_, Vunmod_, parfit, _, _ = analysis(Vexp)
+ # Extract the individual parameter subsets
+ parfit_bg = [parfit[bgidx[n]] for n in range(nSignals)]
+ parfit_ex = [parfit[exidx[n]] for n in range(nSignals)]
+ parfit_dd = parfit[ddidx]
+
+ return Pfit_,*Vfit_,*Bfit_,*Vmod_,*Vunmod_,*parfit_bg,*parfit_ex,parfit_dd
+ # ======================================================
+
+ # Run bootstrapping
+ boot_uq = dl.bootan(bootstrapfcn,Vexp,Vfit,samples=bootsamples,verbose=verbose)
+
+ # Unpack bootstrapping results
+ Pfit_uq = boot_uq[0]
+ Vfit_uq = [boot_uq[1+n] for n in range(nSignals)]
+ Bfit_uq = [boot_uq[1+nSignals+n] for n in range(nSignals)]
+ Vmod_uq = [boot_uq[1+2*nSignals+n] for n in range(nSignals)]
+ Vunmod_uq = [boot_uq[1+3*nSignals+n] for n in range(nSignals)]
+ paruq_bg = [boot_uq[1+4*nSignals+n] for n in range(nSignals)]
+ paruq_ex = [boot_uq[1+5*nSignals+n] for n in range(nSignals)]
+ paruq_dd = boot_uq[-1]
# Normalize distribution
# -----------------------
| JeschkeLab/DeerLab | 182ef2fef45d98c19cf436b2388f0d96b135922e | diff --git a/test/test_fitsignal.py b/test/test_fitsignal.py
index 2d66362b..a5101f1a 100644
--- a/test/test_fitsignal.py
+++ b/test/test_fitsignal.py
@@ -332,28 +332,30 @@ def assert_confidence_intervals(pci50,pci95,pfit,lb,ub):
assert not errors, "Errors occured:\n{}".format("\n".join(errors))
#----------------------------------------------------------------------
-def assert_confinter_param(subset):
#----------------------------------------------------------------------
- exmodel = ex_4pdeer
- ddmodel = dd_gauss
- bgmodel = bg_exp
+exmodel = ex_4pdeer
+ddmodel = dd_gauss
+bgmodel = bg_exp
- r = np.linspace(2,6,40)
- P = ddmodel(r,[4.5, 0.25])
+r = np.linspace(2,6,40)
+P = ddmodel(r,[4.5, 0.25])
- info = exmodel()
- parIn = info['Start']
- pathways = exmodel(parIn)
+info = exmodel()
+parIn = info['Start']
+pathways = exmodel(parIn)
- kappa = 0.4
- Bmodel = lambda t,lam: bgmodel(t,kappa)
+kappa = 0.4
+Bmodel = lambda t,lam: bgmodel(t,kappa)
- t = np.linspace(0,5,100)
- np.random.seed(0)
- V = dipolarkernel(t,r,pathways,Bmodel)@P + whitegaussnoise(t,0.01)
-
- fit = fitsignal(V,t,r,ddmodel,bgmodel,exmodel,uqanalysis=True)
+t = np.linspace(0,5,100)
+np.random.seed(0)
+V = dipolarkernel(t,r,pathways,Bmodel)@P + whitegaussnoise(t,0.01)
+fit = fitsignal(V,t,r,ddmodel,bgmodel,exmodel,uqanalysis=True)
+#----------------------------------------------------------------------
+
+def assert_confinter_param(subset):
+#----------------------------------------------------------------------
if subset == 'ex':
info = exmodel()
pfit = fit.exparam
@@ -394,33 +396,37 @@ def test_confinter_ddparam():
assert_confinter_param('dd')
# ======================================================================
-def assert_confinter_models(subset):
#----------------------------------------------------------------------
- exmodel = ex_4pdeer
- if subset == 'Pfitfree':
- ddmodel = 'P'
- subset = 'Pfit'
- else:
- ddmodel= dd_gauss
- bgmodel = bg_exp
+exmodel = ex_4pdeer
+bgmodel = bg_exp
- r = np.linspace(2,6,40)
- P = dd_gauss(r,[4.5, 0.25])
+r = np.linspace(2,6,40)
+P = dd_gauss(r,[4.5, 0.25])
- info = exmodel()
- parIn = info['Start']
- pathways = exmodel(parIn)
+info = exmodel()
+parIn = info['Start']
+pathways = exmodel(parIn)
- kappa = 0.4
- Bmodel = lambda t: bgmodel(t,kappa)
+kappa = 0.4
+Bmodel = lambda t: bgmodel(t,kappa)
- t = np.linspace(0,5,100)
- np.random.seed(0)
- V = dipolarkernel(t,r,pathways,Bmodel)@P + whitegaussnoise(t,0.03)
-
- fit = fitsignal(V,t,r,ddmodel,bgmodel,exmodel,uqanalysis=True)
+t = np.linspace(0,5,100)
+np.random.seed(0)
+V = dipolarkernel(t,r,pathways,Bmodel)@P + whitegaussnoise(t,0.03)
+
+fit_Pparam = fitsignal(V,t,r,ddmodel,bgmodel,exmodel,uq='covariance')
+fit_Pfree = fitsignal(V,t,r,'P',bgmodel,exmodel,uq='covariance')
+#----------------------------------------------------------------------
- if subset == 'Pfit':
+def assert_confinter_models(subset):
+#----------------------------------------------------------------------
+
+ if subset=='Pfitfree':
+ fit = fit_Pfree
+ else:
+ fit = fit_Pparam
+
+ if subset == 'Pfit' or subset == 'Pfitfree':
modelfit = fit.P
lb = np.zeros_like(r)
ub = np.full_like(r,inf)
@@ -466,6 +472,7 @@ def test_confinter_Bfit():
assert_confinter_models('Bfit')
# ======================================================================
+
def assert_confinter_noforeground():
# ======================================================================
"Check that the confidence inervals for a pure background fit are correct"
@@ -674,3 +681,96 @@ def test_cost_value():
assert isinstance(fit.cost,float) and np.round(fit.cost/np.sum(fit.residuals**2),5)==1
# ======================================================================
+
+# ----------------------------------------------------------------------
+ exmodel = ex_4pdeer
+ bgmodel = bg_exp
+
+ r = np.linspace(2,6,40)
+ P = dd_gauss(r,[4.5, 0.25])
+
+ info = exmodel()
+ parIn = info['Start']
+ pathways = exmodel(parIn)
+
+ kappa = 0.4
+ Bmodel = lambda t: bgmodel(t,kappa)
+
+ t = np.linspace(0,5,100)
+ np.random.seed(0)
+ V = dipolarkernel(t,r,pathways,Bmodel)@P + whitegaussnoise(t,0.03)
+
+ fit = fitsignal(V,t,r,ddmodel,bgmodel,exmodel,uq=['bootstrap',2])
+# ----------------------------------------------------------------------
+
+def assert_boot_ci(quantity):
+# ----------------------------------------------------------------------
+
+ if quantity=='P':
+ ci = fit.Puncert.ci(95)
+ elif quantity=='V':
+ ci = fit.Vuncert.ci(95)
+ elif quantity=='Vmod':
+ ci = fit.VmodUncert.ci(95)
+ elif quantity=='Vunmod':
+ ci = fit.VunmodUncert.ci(95)
+ elif quantity=='B':
+ ci = fit.Buncert.ci(95)
+ elif quantity=='ddparam':
+ ci = fit.ddparamUncert.ci(95)
+ elif quantity=='bgparam':
+ ci = fit.bgparamUncert.ci(95)
+ elif quantity=='exparam':
+ ci = fit.exparamUncert.ci(95)
+
+ assert np.all(ci[:,0]<=ci[:,1])
+# ----------------------------------------------------------------------
+
+
+def test_bootci_P():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('P')
+# ======================================================================
+
+def test_bootci_V():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('V')
+# ======================================================================
+
+def test_bootci_B():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('B')
+# ======================================================================
+
+def test_bootci_Vmod():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('Vmod')
+# ======================================================================
+
+def test_bootci_Vunmod():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('Vunmod')
+# ======================================================================
+
+def test_bootci_ddparam():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('ddparam')
+# ======================================================================
+
+def test_bootci_exparam():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('exparam')
+# ======================================================================
+
+def test_bootci_bparam():
+# ======================================================================
+ "Check that the bootstrapped confidence intervals work"
+ assert_boot_ci('bgparam')
+# ======================================================================
\ No newline at end of file
| Internalize bootstrapping for fitsignal quantities
At the moment `fitsignal` does only return uncertainty quantification based on asymptotic approximation (covariance-based). If the user wants to get bootstrapped confidence intervals the `bootan` must be used. While for advanced users this might be simple:
```` python
fit = dl.fitsignal(Vexp,t,rfit)
def myfcn(Vexp):
fit = dl.fitsignal(Vexp,t,rfit)
return fit.P,fit.V,fit.B,fit.exparam,fit.bgparam,fit.ddparam
bootuq = dl.bootan(myfcn,Vexp,fit.V,samples=N,verbose=True)
````
For routine use this is cumbersome. This could be easily internalized into `fitsignal` since the output quantities are hard-coded. The bootstrapping could then be exposed just as an option, for example:
````python
fit = dl.fitsignal(Vexp,t,rfit,uq='covariance')
fit = dl.fitsignal(Vexp,t,rfit,uq='bootstrap')
```` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_fitsignal.py::test_4pdeer",
"test/test_fitsignal.py::test_5pdeer",
"test/test_fitsignal.py::test_7pdeer",
"test/test_fitsignal.py::test_ovl4pdeer",
"test/test_fitsignal.py::test_ridme1",
"test/test_fitsignal.py::test_ridme3",
"test/test_fitsignal.py::test_ridme5",
"test/test_fitsignal.py::test_dipevo_function",
"test/test_fitsignal.py::test_form_factor",
"test/test_fitsignal.py::test_full_parametric",
"test/test_fitsignal.py::test_no_foreground",
"test/test_fitsignal.py::test_start_values",
"test/test_fitsignal.py::test_boundaries",
"test/test_fitsignal.py::test_boundaries_adjust_bg",
"test/test_fitsignal.py::test_boundaries_adjust_ex",
"test/test_fitsignal.py::test_boundaries_adjust_dd",
"test/test_fitsignal.py::test_global_4pdeer",
"test/test_fitsignal.py::test_global_full_parametric",
"test/test_fitsignal.py::test_global_mixed_backgrounds",
"test/test_fitsignal.py::test_global_mixed_experiments",
"test/test_fitsignal.py::test_confinter_exparam",
"test/test_fitsignal.py::test_confinter_bgparam",
"test/test_fitsignal.py::test_confinter_ddparam",
"test/test_fitsignal.py::test_confinter_Pfit",
"test/test_fitsignal.py::test_confinter_Pfitfree",
"test/test_fitsignal.py::test_confinter_Vfit",
"test/test_fitsignal.py::test_confinter_Bfit",
"test/test_fitsignal.py::test_global_scale_4pdeer",
"test/test_fitsignal.py::test_V_scale_parametric",
"test/test_fitsignal.py::test_V_scale",
"test/test_fitsignal.py::test_V_scale_regularized",
"test/test_fitsignal.py::test_plot",
"test/test_fitsignal.py::test_physical_bg_model",
"test/test_fitsignal.py::test_phenomenological_bg_model",
"test/test_fitsignal.py::test_Vunmod",
"test/test_fitsignal.py::test_cost_value",
"test/test_fitsignal.py::test_bootci_P",
"test/test_fitsignal.py::test_bootci_V",
"test/test_fitsignal.py::test_bootci_B",
"test/test_fitsignal.py::test_bootci_Vmod",
"test/test_fitsignal.py::test_bootci_Vunmod",
"test/test_fitsignal.py::test_bootci_ddparam",
"test/test_fitsignal.py::test_bootci_exparam",
"test/test_fitsignal.py::test_bootci_bparam"
] | [] | {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-12T10:46:14Z" | mit |
|
JoshData__python-email-validator-58 | diff --git a/README.md b/README.md
index 2ef35de..929b525 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ Key features:
login forms or other uses related to identifying users.
* Gives friendly error messages when validation fails (appropriate to show
to end users).
-* (optionally) Checks deliverability: Does the domain name resolve?
+* (optionally) Checks deliverability: Does the domain name resolve? And you can override the default DNS resolver.
* Supports internationalized domain names and (optionally)
internationalized local parts.
* Normalizes email addresses (super important for internationalized
@@ -69,23 +69,27 @@ This validates the address and gives you its normalized form. You should
put the normalized form in your database and always normalize before
checking if an address is in your database.
-The validator will accept internationalized email addresses, but email
-addresses with non-ASCII characters in the *local* part of the address
-(before the @-sign) require the
-[SMTPUTF8](https://tools.ietf.org/html/rfc6531) extension which may not
-be supported by your mail submission library or your outbound mail
-server. If you know ahead of time that SMTPUTF8 is not supported then
-**add the keyword argument allow\_smtputf8=False to fail validation for
-addresses that would require SMTPUTF8**:
+When validating many email addresses or to control the timeout (the default is 15 seconds), create a caching [dns.resolver.Resolver](https://dnspython.readthedocs.io/en/latest/resolver-class.html) to reuse in each call:
```python
-valid = validate_email(email, allow_smtputf8=False)
+from email_validator import validate_email, caching_resolver
+
+resolver = caching_resolver(timeout=10)
+
+while True:
+ valid = validate_email(email, dns_resolver=resolver)
```
+The validator will accept internationalized email addresses, but not all
+mail systems can send email to an addresses with non-ASCII characters in
+the *local* part of the address (before the @-sign). See the `allow_smtputf8`
+option below.
+
+
Overview
--------
-The module provides a single function `validate_email(email_address)` which
+The module provides a function `validate_email(email_address)` which
takes an email address (either a `str` or ASCII `bytes`) and:
- Raises a `EmailNotValidError` with a helpful, human-readable error
@@ -128,6 +132,9 @@ shown):
`allow_empty_local=False`: Set to `True` to allow an empty local part (i.e.
`@example.com`), e.g. for validating Postfix aliases.
+
+`dns_resolver=None`: Pass an instance of [dns.resolver.Resolver](https://dnspython.readthedocs.io/en/latest/resolver-class.html) to control the DNS resolver including setting a timeout and [a cache](https://dnspython.readthedocs.io/en/latest/resolver-caching.html). The `caching_resolver` function shown above is a helper function to construct a dns.resolver.Resolver with a [LRUCache](https://dnspython.readthedocs.io/en/latest/resolver-caching.html#dns.resolver.LRUCache). Reuse the same resolver instance across calls to `validate_email` to make use of the cache.
+
Internationalized email addresses
---------------------------------
diff --git a/email_validator/__init__.py b/email_validator/__init__.py
index ded7899..f960f67 100644
--- a/email_validator/__init__.py
+++ b/email_validator/__init__.py
@@ -180,12 +180,20 @@ def __get_length_reason(addr, utf8=False, limit=EMAIL_MAX_LENGTH):
return reason.format(prefix, diff, suffix)
+def caching_resolver(timeout=DEFAULT_TIMEOUT, cache=None):
+ resolver = dns.resolver.Resolver()
+ resolver.cache = cache or dns.resolver.LRUCache()
+ resolver.lifetime = timeout # timeout, in seconds
+ return resolver
+
+
def validate_email(
email,
allow_smtputf8=True,
allow_empty_local=False,
check_deliverability=True,
timeout=DEFAULT_TIMEOUT,
+ dns_resolver=None
):
"""
Validates an email address, raising an EmailNotValidError if the address is not valid or returning a dict of
@@ -273,7 +281,9 @@ def validate_email(
if check_deliverability:
# Validate the email address's deliverability and update the
# return dict with metadata.
- deliverability_info = validate_email_deliverability(ret["domain"], ret["domain_i18n"], timeout)
+ deliverability_info = validate_email_deliverability(
+ ret["domain"], ret["domain_i18n"], timeout, dns_resolver
+ )
if "mx" in deliverability_info:
ret.mx = deliverability_info["mx"]
ret.mx_fallback_type = deliverability_info["mx-fallback"]
@@ -443,15 +453,22 @@ def validate_email_domain_part(domain):
}
-def validate_email_deliverability(domain, domain_i18n, timeout=DEFAULT_TIMEOUT):
+def validate_email_deliverability(domain, domain_i18n, timeout=DEFAULT_TIMEOUT, dns_resolver=None):
# Check that the domain resolves to an MX record. If there is no MX record,
# try an A or AAAA record which is a deprecated fallback for deliverability.
- def dns_resolver_resolve_shim(resolver, domain, record):
+ # If no dns.resolver.Resolver was given, get dnspython's default resolver.
+ # Override the default resolver's timeout. This may affect other uses of
+ # dnspython in this process.
+ if dns_resolver is None:
+ dns_resolver = dns.resolver.get_default_resolver()
+ dns_resolver.lifetime = timeout
+
+ def dns_resolver_resolve_shim(domain, record):
try:
# dns.resolver.Resolver.resolve is new to dnspython 2.x.
# https://dnspython.readthedocs.io/en/latest/resolver-class.html#dns.resolver.Resolver.resolve
- return resolver.resolve(domain, record)
+ return dns_resolver.resolve(domain, record)
except AttributeError:
# dnspython 2.x is only available in Python 3.6 and later. For earlier versions
# of Python, we maintain compatibility with dnspython 1.x which has a
@@ -460,7 +477,7 @@ def validate_email_deliverability(domain, domain_i18n, timeout=DEFAULT_TIMEOUT):
# which we prevent by adding a "." to the domain name to make it absolute.
# dns.resolver.Resolver.query is deprecated in dnspython version 2.x.
# https://dnspython.readthedocs.io/en/latest/resolver-class.html#dns.resolver.Resolver.query
- return resolver.query(domain + ".", record)
+ return dns_resolver.query(domain + ".", record)
try:
# We need a way to check how timeouts are handled in the tests. So we
@@ -469,28 +486,23 @@ def validate_email_deliverability(domain, domain_i18n, timeout=DEFAULT_TIMEOUT):
if getattr(validate_email_deliverability, 'TEST_CHECK_TIMEOUT', False):
raise dns.exception.Timeout()
- resolver = dns.resolver.get_default_resolver()
-
- if timeout:
- resolver.lifetime = timeout
-
try:
# Try resolving for MX records and get them in sorted priority order.
- response = dns_resolver_resolve_shim(resolver, domain, "MX")
+ response = dns_resolver_resolve_shim(domain, "MX")
mtas = sorted([(r.preference, str(r.exchange).rstrip('.')) for r in response])
mx_fallback = None
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
# If there was no MX record, fall back to an A record.
try:
- response = dns_resolver_resolve_shim(resolver, domain, "A")
+ response = dns_resolver_resolve_shim(domain, "A")
mtas = [(0, str(r)) for r in response]
mx_fallback = "A"
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
# If there was no A record, fall back to an AAAA record.
try:
- response = dns_resolver_resolve_shim(resolver, domain, "AAAA")
+ response = dns_resolver_resolve_shim(domain, "AAAA")
mtas = [(0, str(r)) for r in response]
mx_fallback = "AAAA"
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
| JoshData/python-email-validator | ad53fb4166a10a09b76b4a9861a305017a98f5fc | diff --git a/tests/test_main.py b/tests/test_main.py
index af975ba..d2fd923 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1,7 +1,9 @@
+from unittest import mock
+import dns.resolver
import pytest
from email_validator import EmailSyntaxError, EmailUndeliverableError, \
validate_email, validate_email_deliverability, \
- ValidatedEmail
+ caching_resolver, ValidatedEmail
# Let's test main but rename it to be clear
from email_validator import main as validator_main
@@ -344,3 +346,27 @@ def test_main_output_shim(monkeypatch, capsys):
# The \n is part of the print statement, not part of the string, which is what the b'...' is
# Since we're mocking py 2.7 here instead of actually using 2.7, this was the closest I could get
assert stdout == "b'An email address cannot have a period immediately after the @-sign.'\n"
+
+
[email protected]("dns.resolver.LRUCache.put")
+def test_validate_email__with_caching_resolver(mocked_put):
+ dns_resolver = caching_resolver()
+ validate_email("[email protected]", dns_resolver=dns_resolver)
+ assert mocked_put.called
+
+ with mock.patch("dns.resolver.LRUCache.get") as mocked_get:
+ validate_email("[email protected]", dns_resolver=dns_resolver)
+ assert mocked_get.called
+
+
[email protected]("dns.resolver.LRUCache.put")
+def test_validate_email__with_configured_resolver(mocked_put):
+ dns_resolver = dns.resolver.Resolver()
+ dns_resolver.lifetime = 10
+ dns_resolver.cache = dns.resolver.LRUCache(max_size=1000)
+ validate_email("[email protected]", dns_resolver=dns_resolver)
+ assert mocked_put.called
+
+ with mock.patch("dns.resolver.LRUCache.get") as mocked_get:
+ validate_email("[email protected]", dns_resolver=dns_resolver)
+ assert mocked_get.called
| Idea: cache successful dns lookups?
Would it make sense to cache (in memory) dns records that got a match to prevent a large amount of dns lookups for common email providers like gmail.com, hotmail.com etc?
If also caching non matches is fine `functools.lru_cache` decorator could be added to validate_email_deliverability, otherwise some custom logic could be written to keep a small cache of recent valid domains.
What do you think? I could probably make a PR if you like it? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_main.py::test_email_valid[[email protected]]",
"tests/test_main.py::test_email_valid[[email protected]]",
"tests/test_main.py::test_email_valid[user+mailbox/[email protected]]",
"tests/test_main.py::test_email_valid[!#$%&'*+-/=?^_`.{|}[email protected]]",
"tests/test_main.py::test_email_valid[\\u4f0a\\u662d\\u5091@\\u90f5\\u4ef6.\\u5546\\u52d9-output4]",
"tests/test_main.py::test_email_valid[\\u0930\\u093e\\u092e@\\u092e\\u094b\\u0939\\u0928.\\u0908\\u0928\\u094d\\u092b\\u094b-output5]",
"tests/test_main.py::test_email_valid[\\u044e\\u0437\\u0435\\u0440@\\u0435\\u043a\\u0437\\u0430\\u043c\\u043f\\u043b.\\u043a\\u043e\\u043c-output6]",
"tests/test_main.py::test_email_valid[\\u03b8\\u03c3\\u03b5\\u03c1@\\u03b5\\u03c7\\u03b1\\u03bc\\u03c0\\u03bb\\u03b5.\\u03c8\\u03bf\\u03bc-output7]",
"tests/test_main.py::test_email_valid[\\uf96e\\u58eb\\u8c6a@\\u81fa\\u7db2\\u4e2d\\u5fc3.tw-output8]",
"tests/test_main.py::test_email_valid[jeff@\\u81fa\\u7db2\\u4e2d\\u5fc3.tw-output9]",
"tests/test_main.py::test_email_valid[\\uf96e\\u58eb\\u8c6a@\\u81fa\\u7db2\\u4e2d\\u5fc3.\\u53f0\\u7063-output10]",
"tests/test_main.py::test_email_valid[jeff\\uf96e@\\u81fa\\u7db2\\u4e2d\\u5fc3.tw-output11]",
"tests/test_main.py::test_email_valid[\\xf1o\\xf1\\[email protected]]",
"tests/test_main.py::test_email_valid[\\u6211\\[email protected]]",
"tests/test_main.py::test_email_valid[\\u7532\\u6590\\u9ed2\\u5ddd\\u65e5\\[email protected]]",
"tests/test_main.py::test_email_valid[\\u0447\\u0435\\u0431\\u0443\\u0440\\u0430\\u0448\\u043a\\u0430\\u044f\\u0449\\u0438\\u043a-\\u0441-\\u0430\\u043f\\u0435\\u043b\\u044c\\u0441\\u0438\\u043d\\u0430\\u043c\\u0438.\\u0440\\[email protected]]",
"tests/test_main.py::test_email_valid[\\u0909\\u0926\\u093e\\u0939\\u0930\\u0923.\\u092a\\u0930\\u0940\\u0915\\u094d\\[email protected]]",
"tests/test_main.py::test_email_valid[\\u03b9\\u03c9\\u03ac\\u03bd\\u03bd\\u03b7\\u03c2@\\u03b5\\u03b5\\u03c4\\u03c4.gr-output17]",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[my@\\uff0e\\uff0eleadingfwdot.com-An",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[[email protected]\\n-The",
"tests/test_main.py::test_email_invalid[my@example\\n.com-The",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[[email protected]",
"tests/test_main.py::test_email_invalid[me@\\u2488wouldbeinvalid.com-The",
"tests/test_main.py::test_email_invalid[@example.com-There",
"tests/test_main.py::test_email_invalid[\\[email protected]",
"tests/test_main.py::test_email_invalid[m\\[email protected]",
"tests/test_main.py::test_email_invalid[my\\[email protected]",
"tests/test_main.py::test_email_invalid[11111111112222222222333333333344444444445555555555666666666677777@example.com-The",
"tests/test_main.py::test_email_invalid[111111111122222222223333333333444444444455555555556666666666777777@example.com-The",
"tests/test_main.py::test_email_invalid[me@1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.111111111122222222223333333333444444444455555555556.com-The",
"tests/test_main.py::test_email_invalid[my.long.address@1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.11111111112222222222333333333344444.info-The",
"tests/test_main.py::test_email_invalid[my.long.address@\\u03bb111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.11111111112222222222333333.info-The",
"tests/test_main.py::test_email_invalid[my.long.address@\\u03bb111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444.info-The",
"tests/test_main.py::test_email_invalid[my.\\u03bbong.address@1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.111111111122222222223333333333444.info-The",
"tests/test_main.py::test_email_invalid[my.\\u03bbong.address@1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444444444555555555.6666666666777777777788888888889999999999000000000.1111111111222222222233333333334444.info-The",
"tests/test_main.py::test_dict_accessor",
"tests/test_main.py::test_deliverability_no_records",
"tests/test_main.py::test_deliverability_found",
"tests/test_main.py::test_deliverability_fails",
"tests/test_main.py::test_deliverability_dns_timeout",
"tests/test_main.py::test_main_single_good_input",
"tests/test_main.py::test_main_single_bad_input",
"tests/test_main.py::test_main_multi_input",
"tests/test_main.py::test_main_input_shim",
"tests/test_main.py::test_main_output_shim",
"tests/test_main.py::test_validate_email__with_caching_resolver",
"tests/test_main.py::test_validate_email__with_configured_resolver"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-11-14T22:04:36Z" | cc0-1.0 |
|
JrGoodle__clowder-224 | diff --git a/clowder/cmd.py b/clowder/cmd.py
index cf55992f..940b8ff6 100644
--- a/clowder/cmd.py
+++ b/clowder/cmd.py
@@ -646,7 +646,7 @@ def exit_unrecognized_command(parser):
def exit_clowder_not_found():
"""Print clowder not found message and exit"""
- cprint('No clowder found in the current directory\n', 'red')
+ cprint(' - No clowder found in the current directory\n', 'red')
sys.exit(1)
def main():
diff --git a/clowder/project.py b/clowder/project.py
index f46f2a1a..dc7a598b 100644
--- a/clowder/project.py
+++ b/clowder/project.py
@@ -17,7 +17,6 @@ from clowder.utility.git_print_utilities import (
)
from clowder.utility.git_utilities import (
git_create_repo,
- git_current_sha,
git_existing_local_branch,
git_existing_remote_branch,
git_existing_repository,
@@ -28,6 +27,7 @@ from clowder.utility.git_utilities import (
git_prune_local,
git_prune_remote,
git_reset_head,
+ git_sha_long,
git_start,
git_stash,
git_status,
@@ -125,7 +125,7 @@ class Project(object):
'path': self.path,
'depth': self.depth,
'forks': forks_yaml,
- 'ref': git_current_sha(self.full_path()),
+ 'ref': git_sha_long(self.full_path()),
'remote': self.remote_name,
'source': self.source.name}
diff --git a/clowder/utility/git_print_utilities.py b/clowder/utility/git_print_utilities.py
index e048ee99..cde9d04a 100644
--- a/clowder/utility/git_print_utilities.py
+++ b/clowder/utility/git_print_utilities.py
@@ -2,12 +2,12 @@
from termcolor import colored, cprint
from clowder.utility.git_utilities import (
git_current_branch,
- git_current_sha,
git_existing_repository,
git_is_detached,
git_is_dirty,
git_new_local_commits,
- git_new_upstream_commits
+ git_new_upstream_commits,
+ git_sha_short
)
def format_project_string(repo_path, name):
@@ -34,7 +34,7 @@ def format_project_ref_string(repo_path):
status = ' (' + local_commits_output + '/' + upstream_commits_output + ')'
if git_is_detached(repo_path):
- current_ref = git_current_sha(repo_path)
+ current_ref = git_sha_short(repo_path)
return colored('(HEAD @ ' + current_ref + ')', 'magenta')
else:
current_branch = git_current_branch(repo_path)
diff --git a/clowder/utility/git_utilities.py b/clowder/utility/git_utilities.py
index 722906c9..a6091d76 100644
--- a/clowder/utility/git_utilities.py
+++ b/clowder/utility/git_utilities.py
@@ -71,7 +71,7 @@ def git_create_repo(url, repo_path, remote, ref, depth=0):
try:
shutil.rmtree(repo_path)
except:
- message = colored(" - Failed remove directory ", 'red')
+ message = colored(" - Failed to remove directory ", 'red')
print(message + format_path(repo_path))
finally:
print()
@@ -93,7 +93,7 @@ def git_create_repo(url, repo_path, remote, ref, depth=0):
try:
shutil.rmtree(repo_path)
except:
- message = colored(" - Failed remove directory ", 'red')
+ message = colored(" - Failed to remove directory ", 'red')
print(message + format_path(repo_path))
finally:
print()
@@ -121,11 +121,6 @@ def git_current_branch(repo_path):
repo = _repo(repo_path)
return repo.head.ref.name
-def git_current_sha(repo_path):
- """Return current git sha for checked out commit"""
- repo = _repo(repo_path)
- return repo.head.commit.hexsha
-
def git_existing_repository(path):
"""Check if a git repository exists"""
return os.path.isdir(os.path.join(path, '.git'))
@@ -345,6 +340,17 @@ def git_reset_head(repo_path):
repo = _repo(repo_path)
repo.head.reset(index=True, working_tree=True)
+def git_sha_long(repo_path):
+ """Return long sha for currently checked out commit"""
+ repo = _repo(repo_path)
+ return repo.head.commit.hexsha
+
+def git_sha_short(repo_path):
+ """Return short sha of currently checked out commit"""
+ repo = _repo(repo_path)
+ sha = repo.head.commit.hexsha
+ return repo.git.rev_parse(sha, short=True)
+
def git_start(repo_path, remote, branch, depth, tracking):
"""Start new branch in repository"""
repo = _repo(repo_path)
| JrGoodle/clowder | ba94600aa61ce6bc866644c461c89fe9ca67375b | diff --git a/test/test_git_utilities.py b/test/test_git_utilities.py
index 8b887c39..4bab4333 100644
--- a/test/test_git_utilities.py
+++ b/test/test_git_utilities.py
@@ -3,9 +3,9 @@ import os
import unittest
from clowder.utility.git_utilities import (
git_current_branch,
- git_current_sha,
git_is_detached,
git_is_dirty,
+ git_sha_long,
_ref_type,
_truncate_ref
)
@@ -26,9 +26,9 @@ class GitUtilitiesTest(unittest.TestCase):
"""Test git_current_branch() function"""
self.assertEqual(git_current_branch(self.kit_project_path), 'master')
- def test_git_current_sha(self):
- """Test git_current_sha() function"""
- self.assertEqual(git_current_sha(self.sasha_project_path), self.sha_ref)
+ def test_git_sha_long(self):
+ """Test git_sha_long() function"""
+ self.assertEqual(git_sha_long(self.sasha_project_path), self.sha_ref)
def test_git_is_detached(self):
"""Test git_is_detached() function"""
| Support short commit hashes
Use for output display | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_git_utilities.py::GitUtilitiesTest::test_ref_type_branch",
"test/test_git_utilities.py::GitUtilitiesTest::test_ref_type_sha",
"test/test_git_utilities.py::GitUtilitiesTest::test_ref_type_tag",
"test/test_git_utilities.py::GitUtilitiesTest::test_ref_type_unknown",
"test/test_git_utilities.py::GitUtilitiesTest::test_truncate_ref_branch",
"test/test_git_utilities.py::GitUtilitiesTest::test_truncate_ref_sha",
"test/test_git_utilities.py::GitUtilitiesTest::test_truncate_ref_tag"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2017-09-16T23:42:03Z" | mit |
|
Justintime50__github-archive-44 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 80dcc7c..b922fe4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,9 @@
# CHANGELOG
+## NEXT RELEASE
+
+* Adds an `--include` and `--exclude` CLI flag that accepts a comma-separated list of repo names to either include or exclude. If neither are passed, no filtering will occur (closes #43)
+
## v4.3.0 (2021-12-16)
* Adds the ability to specify a custom base_url for GitHub (useful for enterprise GitHub users with a custom hostname, closes #41)
diff --git a/README.md b/README.md
index 8c52d73..506af5d 100644
--- a/README.md
+++ b/README.md
@@ -48,6 +48,10 @@ Options:
-c, --clone Pass this flag to clone git assets.
-p, --pull Pass this flag to pull git assets.
-f, --forks Pass this flag to include forked git assets.
+ -i INCLUDE, --include INCLUDE
+ Pass a comma separated list of repos to include in the Archive.
+ -e EXCLUDE, --exclude EXCLUDE
+ Pass a comma separated list of repos to exclude from the Archive.
-l LOCATION, --location LOCATION
The location where you want your GitHub Archive to be stored.
-ht, --https Use HTTPS URLs instead of SSH.
diff --git a/github_archive/archive.py b/github_archive/archive.py
index df4faf3..8af491c 100644
--- a/github_archive/archive.py
+++ b/github_archive/archive.py
@@ -40,6 +40,8 @@ class GithubArchive:
pull=False,
forks=False,
location=DEFAULT_LOCATION,
+ include=None,
+ exclude=None,
use_https=False,
timeout=DEFAULT_TIMEOUT,
threads=DEFAULT_NUM_THREADS,
@@ -56,6 +58,8 @@ class GithubArchive:
self.pull = pull
self.forks = forks
self.location = location
+ self.include = include.lower().split(',') if include else ''
+ self.exclude = exclude.lower().split(',') if exclude else ''
self.use_https = use_https
self.timeout = timeout
self.threads = threads
@@ -195,6 +199,10 @@ class GithubArchive:
message = 'At least one git operation and one list must be provided to run github-archive.'
logger.critical(message)
raise ValueError(message)
+ elif self.include and self.exclude:
+ message = 'The include and exclude flags are mutually exclusive. Only one can be used on each run.'
+ logger.critical(message)
+ raise ValueError(message)
def authenticated_user_in_users(self) -> bool:
return self.authenticated_user.login.lower() in self.users
@@ -243,24 +251,35 @@ class GithubArchive:
return final_sorted_list
def iterate_repos_to_archive(self, repos: List[Repository.Repository], operation: str):
- """Iterate over each repository and start a thread if it can be archived."""
+ """Iterate over each repository and start a thread if it can be archived.
+
+ We ignore repos not in the include or in the exclude list if either are present.
+ """
+ logger = woodchips.get(LOGGER_NAME)
thread_limiter = BoundedSemaphore(self.threads)
thread_list = []
for repo in repos:
- repo_owner_username = repo.owner.login.lower()
- repo_path = os.path.join(self.location, 'repos', repo_owner_username, repo.name)
- repo_thread = Thread(
- target=self.archive_repo,
- args=(
- thread_limiter,
- repo,
- repo_path,
- operation,
- ),
- )
- thread_list.append(repo_thread)
- repo_thread.start()
+ if (
+ (not self.include and not self.exclude)
+ or (self.include and repo.name in self.include)
+ or (self.exclude and repo.name not in self.exclude)
+ ):
+ repo_owner_username = repo.owner.login.lower()
+ repo_path = os.path.join(self.location, 'repos', repo_owner_username, repo.name)
+ repo_thread = Thread(
+ target=self.archive_repo,
+ args=(
+ thread_limiter,
+ repo,
+ repo_path,
+ operation,
+ ),
+ )
+ thread_list.append(repo_thread)
+ repo_thread.start()
+ else:
+ logger.debug(f'{repo.name} skipped due to include/exclude filtering')
# Wait for the number of threads in thread_limiter to finish before moving on
for thread in thread_list:
diff --git a/github_archive/cli.py b/github_archive/cli.py
index dad77fd..a33c6d2 100644
--- a/github_archive/cli.py
+++ b/github_archive/cli.py
@@ -91,6 +91,22 @@ class GithubArchiveCli:
default=False,
help='Pass this flag to include forked git assets.',
)
+ parser.add_argument(
+ '-i',
+ '--include',
+ type=str,
+ required=False,
+ default=None,
+ help='Pass a comma separated list of repos to include in the Archive.',
+ )
+ parser.add_argument(
+ '-e',
+ '--exclude',
+ type=str,
+ required=False,
+ default=None,
+ help='Pass a comma separated list of repos to exclude from the Archive.',
+ )
parser.add_argument(
'-l',
'--location',
@@ -144,6 +160,8 @@ class GithubArchiveCli:
pull=self.pull,
forks=self.forks,
location=self.location,
+ include=self.include,
+ exclude=self.exclude,
use_https=self.https,
timeout=self.timeout,
threads=self.threads,
| Justintime50/github-archive | b04355be48d3abcee2d9ddaf9b26cb1e9edc7e4d | diff --git a/test/unit/test_archive.py b/test/unit/test_archive.py
index 51024ce..edd4cea 100644
--- a/test/unit/test_archive.py
+++ b/test/unit/test_archive.py
@@ -291,6 +291,24 @@ def test_initialize_project_missing_all_cli_args(mock_logger):
assert message == str(error.value)
+@patch('logging.Logger.critical')
+def test_initialize_project_include_exclude_together(mock_logger):
+ # TODO: Is it possible to test all variations easily in one test?
+ # Parametrize doesn't work great because we can't easily swap the param name being used
+ message = 'The include and exclude flags are mutually exclusive. Only one can be used on each run.'
+ with pytest.raises(ValueError) as error:
+ github_archive = GithubArchive(
+ users='justintime50',
+ clone=True,
+ include='mock-repo',
+ exclude='another-mock-repo',
+ )
+ github_archive.initialize_project()
+
+ mock_logger.assert_called_with(message)
+ assert message == str(error.value)
+
+
@patch('github_archive.archive.Github.get_user')
def test_authenticated_user_in_users(mock_get_user):
authenticated_user_in_users = GithubArchive(
@@ -375,6 +393,32 @@ def test_iterate_repos_matching_authed_username(mock_archive_repo, mock_github_i
mock_archive_repo.assert_called_once()
+@patch('github_archive.archive.Github')
+@patch('github_archive.archive.GithubArchive.archive_repo')
+def test_iterate_repos_include_list(mock_archive_repo, mock_github_instance, mock_git_asset):
+ """Tests that we iterate repos that are on the include list."""
+ repos = [mock_git_asset]
+ GithubArchive(
+ users='mock_username',
+ include='mock-asset-name',
+ ).iterate_repos_to_archive(repos, CLONE_OPERATION)
+
+ mock_archive_repo.assert_called_once()
+
+
+@patch('github_archive.archive.Github')
+@patch('github_archive.archive.GithubArchive.archive_repo')
+def test_iterate_repos_exclude_list(mock_archive_repo, mock_github_instance, mock_git_asset):
+ """Tests that we do not iterate repos that are on the exclude list."""
+ repos = [mock_git_asset]
+ GithubArchive(
+ users='mock_username',
+ exclude='mock-asset-name',
+ ).iterate_repos_to_archive(repos, CLONE_OPERATION)
+
+ mock_archive_repo.assert_not_called()
+
+
@patch('github_archive.archive.Github')
@patch('github_archive.archive.GithubArchive.archive_gist')
def test_iterate_gists(mock_archive_gist, mock_github_instance, mock_git_asset):
| Add `--exclude` and `--include` flags for repo filtering
Hi,
It would be nice to be able to change the download path with an argument, and also have an argument for a limit of repository. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/unit/test_archive.py::test_initialize_project_include_exclude_together",
"test/unit/test_archive.py::test_iterate_repos_include_list",
"test/unit/test_archive.py::test_iterate_repos_exclude_list"
] | [
"test/unit/test_archive.py::test_run_token_view",
"test/unit/test_archive.py::test_run_token_clone",
"test/unit/test_archive.py::test_run_token_pull",
"test/unit/test_archive.py::test_run_users_view",
"test/unit/test_archive.py::test_run_users_clone",
"test/unit/test_archive.py::test_run_users_pull",
"test/unit/test_archive.py::test_run_orgs_view",
"test/unit/test_archive.py::test_run_orgs_clone",
"test/unit/test_archive.py::test_run_orgs_pull",
"test/unit/test_archive.py::test_run_gists_view",
"test/unit/test_archive.py::test_run_gists_clone",
"test/unit/test_archive.py::test_run_gists_pull",
"test/unit/test_archive.py::test_run_stars_view",
"test/unit/test_archive.py::test_run_stars_clone",
"test/unit/test_archive.py::test_run_stars_pull",
"test/unit/test_archive.py::test_setup_logger",
"test/unit/test_archive.py::test_initialize_project",
"test/unit/test_archive.py::test_initialize_project_missing_list",
"test/unit/test_archive.py::test_initialize_project_missing_operation",
"test/unit/test_archive.py::test_initialize_project_missing_all_cli_args",
"test/unit/test_archive.py::test_authenticated_user_in_users",
"test/unit/test_archive.py::test_get_all_git_assets",
"test/unit/test_archive.py::test_get_all_user_repos",
"test/unit/test_archive.py::test_get_all_org_repos",
"test/unit/test_archive.py::test_get_get_all_gists",
"test/unit/test_archive.py::test_iterate_repos_not_matching_authed_username",
"test/unit/test_archive.py::test_iterate_repos_matching_authed_username",
"test/unit/test_archive.py::test_iterate_gists",
"test/unit/test_archive.py::test_view_repos",
"test/unit/test_archive.py::test_view_gists",
"test/unit/test_archive.py::test_archive_repo_success",
"test/unit/test_archive.py::test_archive_repo_use_https_success",
"test/unit/test_archive.py::test_archive_repo_clone_exists",
"test/unit/test_archive.py::test_archive_repo_timeout_exception",
"test/unit/test_archive.py::test_archive_repo_called_process_error",
"test/unit/test_archive.py::test_archive_gist_success",
"test/unit/test_archive.py::test_archive_gist_clone_exists",
"test/unit/test_archive.py::test_archive_gist_timeout_exception",
"test/unit/test_archive.py::test_archive_gist_called_process_error"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2022-02-19T06:50:30Z" | mit |
|
KATO-Hiro__Somen-Soupy-262 | diff --git a/snippets/math/matrix_rotation.py b/snippets/math/matrix_rotation.py
new file mode 100644
index 0000000..e2d91f1
--- /dev/null
+++ b/snippets/math/matrix_rotation.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+
+
+from typing import List
+
+
+# See:
+# https://kazun-kyopro.hatenablog.com/entry/ABC/298/B
+def rotate_90_degrees_to_right(array: List[List]):
+ return [list(ai)[::-1] for ai in zip(*array)]
| KATO-Hiro/Somen-Soupy | 8084e1d49cb729fb72ba8010154cf0e897421f6f | diff --git a/tests/test_matrix_rotation.py b/tests/test_matrix_rotation.py
new file mode 100644
index 0000000..ab76962
--- /dev/null
+++ b/tests/test_matrix_rotation.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+
+from snippets.math.matrix_rotation import rotate_90_degrees_to_right
+
+
+class TestMatrixRotation:
+ def test_rotate_90_degrees_to_right(self):
+ array = [[0, 1, 1], [1, 0, 0], [0, 1, 0]]
+
+ # 1st.
+ actual = rotate_90_degrees_to_right(array)
+ expected = [[0, 1, 0], [1, 0, 1], [0, 0, 1]]
+ assert actual == expected
+
+ # 2nd.
+ actual = rotate_90_degrees_to_right(actual)
+ expected = [[0, 1, 0], [0, 0, 1], [1, 1, 0]]
+ assert actual == expected
+
+ # 3rd.
+ actual = rotate_90_degrees_to_right(actual)
+ expected = [[1, 0, 0], [1, 0, 1], [0, 1, 0]]
+ assert actual == expected
+
+ # 4th (= array).
+ actual = rotate_90_degrees_to_right(actual)
+ expected = array
+ assert actual == expected
| 行列を右に90度回転
```py
from typing import List
# See:
# https://kazun-kyopro.hatenablog.com/entry/ABC/298/B
def rotate_90_degrees_to_right(array: List[List]):
return [list(ai)[::-1] for ai in zip(*array)]
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_matrix_rotation.py::TestMatrixRotation::test_rotate_90_degrees_to_right"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2023-04-16T06:42:32Z" | cc0-1.0 |
|
KATO-Hiro__Somen-Soupy-300 | diff --git a/snippets/data_structure/sum_of_top_kth.py b/snippets/data_structure/sum_of_top_kth.py
new file mode 100644
index 0000000..1149736
--- /dev/null
+++ b/snippets/data_structure/sum_of_top_kth.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+"""
+Usage:
+
+# n, k, q = map(int, input().split())
+# a = [0] * n
+
+# # smallest
+# s = SumOfTopKth(k, ascending_order=True)
+# # largest
+# s = SumOfTopKth(k, ascending_order=False)
+
+# for _ in range(q):
+# xi, yi = map(int, input().split())
+# xi -= 1
+
+# s.discard(a[xi])
+# s.add(yi)
+# print(s.query())
+# a[xi] = yi
+"""
+
+from collections import defaultdict
+from heapq import heappop, heappush
+
+
+class SumOfTopKth:
+ """Sum of the k-th number from the smallest (largest) to the k-th.
+
+ See:
+ https://atcoder.jp/contests/abc306/submissions/42339375
+ """
+
+ __slots__ = (
+ "_summed",
+ "_k",
+ "_in",
+ "_out",
+ "_d_in",
+ "_d_out",
+ "_freq",
+ "_ascending_order",
+ )
+
+ def __init__(self, k: int, ascending_order=True) -> None:
+ self._k = k
+ self._summed = 0
+ self._in = []
+ self._out = []
+ self._d_in = []
+ self._d_out = []
+ self._ascending_order = ascending_order
+ self._freq = defaultdict(int)
+
+ def query(self) -> int:
+ return self._summed if self._ascending_order else -self._summed
+
+ def add(self, x: int) -> None:
+ if not self._ascending_order:
+ x = -x
+
+ self._freq[x] += 1
+ heappush(self._in, -x)
+ self._summed += x
+ self._modify()
+
+ def discard(self, x: int) -> None:
+ if not self._ascending_order:
+ x = -x
+ if self._freq[x] == 0:
+ return
+
+ self._freq[x] -= 1
+
+ if self._in and -self._in[0] == x:
+ self._summed -= x
+ heappop(self._in)
+ elif self._in and -self._in[0] > x:
+ self._summed -= x
+ heappush(self._d_in, -x)
+ else:
+ heappush(self._d_out, x)
+
+ self._modify()
+
+ def set_k(self, k: int) -> None:
+ self._k = k
+ self._modify()
+
+ def get_k(self) -> int:
+ return self._k
+
+ def _modify(self) -> None:
+ while self._out and (len(self._in) - len(self._d_in) < self._k):
+ p = heappop(self._out)
+
+ if self._d_out and p == self._d_out[0]:
+ heappop(self._d_out)
+ else:
+ self._summed += p
+ heappush(self._in, -p)
+
+ while len(self._in) - len(self._d_in) > self._k:
+ p = -heappop(self._in)
+
+ if self._d_in and p == -self._d_in[0]:
+ heappop(self._d_in)
+ else:
+ self._summed -= p
+ heappush(self._out, p)
+
+ while self._d_in and self._in[0] == self._d_in[0]:
+ heappop(self._in)
+ heappop(self._d_in)
+
+ def __len__(self) -> int:
+ return len(self._in) + len(self._out) - len(self._d_in) - len(self._d_out)
+
+ def __contains__(self, x: int) -> bool:
+ if not self._ascending_order:
+ x = -x
+ return self._freq[x] > 0
| KATO-Hiro/Somen-Soupy | 87948f39437876aab4fe08ff046bdb3150980da2 | diff --git a/tests/test_sum_of_top_kth.py b/tests/test_sum_of_top_kth.py
new file mode 100644
index 0000000..aee861e
--- /dev/null
+++ b/tests/test_sum_of_top_kth.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+
+from snippets.data_structure.sum_of_top_kth import SumOfTopKth
+
+
+class TestSumOfTopKth:
+ def test_sum_of_top_kth_by_descending_order(self) -> None:
+ s = SumOfTopKth(k=5, ascending_order=False)
+
+ for ai in [3, 1, 4, 1, 5, 9, 2, 6]:
+ s.add(ai)
+
+ actual = s.query()
+ expected = 27
+ assert actual == expected
+
+ s.discard(4)
+ s.add(3)
+ actual = s.query()
+ expected = 26
+ assert actual == expected
+
+ s.set_k(8)
+ actual = s.query()
+ expected = 30
+ assert actual == expected
+
+ s.discard(2)
+ s.add(10)
+ actual = s.query()
+ expected = 38
+ assert actual == expected
+
+ def test_sum_of_top_kth_by_ascending_order(self) -> None:
+ s = SumOfTopKth(k=5, ascending_order=True)
+
+ for ai in [3, 1, 4, 1, 5, 9, 2, 6]:
+ s.add(ai)
+
+ actual = s.query()
+ expected = 11
+ assert actual == expected
+
+ s.discard(4)
+ s.add(5)
+ actual = s.query()
+ expected = 12
+ assert actual == expected
+
+ s.set_k(8)
+ actual = s.query()
+ expected = 32
+ assert actual == expected
+
+ s.discard(6)
+ s.add(2)
+ actual = s.query()
+ expected = 28
+ assert actual == expected
| 数列を昇順/降順に並べたときに、k番目までの要素の総和を高速に計算
```py
# -*- coding: utf-8 -*-
from collections import defaultdict
from heapq import heappop, heappush
class SumOfTopKth:
"""Sum of the k-th number from the smallest (largest) to the k-th.
See:
https://atcoder.jp/contests/abc306/submissions/42339375
"""
__slots__ = (
"_summed",
"_k",
"_in",
"_out",
"_d_in",
"_d_out",
"_freq",
"_ascending_order",
)
def __init__(self, k: int, ascending_order=True) -> None:
self._k = k
self._summed = 0
self._in = []
self._out = []
self._d_in = []
self._d_out = []
self._ascending_order = ascending_order
self._freq = defaultdict(int)
def query(self) -> int:
return self._summed if self._ascending_order else -self._summed
def add(self, x: int) -> None:
if not self._ascending_order:
x = -x
self._freq[x] += 1
heappush(self._in, -x)
self._summed += x
self._modify()
def discard(self, x: int) -> None:
if not self._ascending_order:
x = -x
if self._freq[x] == 0:
return
self._freq[x] -= 1
if self._in and -self._in[0] == x:
self._summed -= x
heappop(self._in)
elif self._in and -self._in[0] > x:
self._summed -= x
heappush(self._d_in, -x)
else:
heappush(self._d_out, x)
self._modify()
def set_k(self, k: int) -> None:
self._k = k
self._modify()
def get_k(self) -> int:
return self._k
def _modify(self) -> None:
while self._out and (len(self._in) - len(self._d_in) < self._k):
p = heappop(self._out)
if self._d_out and p == self._d_out[0]:
heappop(self._d_out)
else:
self._summed += p
heappush(self._in, -p)
while len(self._in) - len(self._d_in) > self._k:
p = -heappop(self._in)
if self._d_in and p == -self._d_in[0]:
heappop(self._d_in)
else:
self._summed -= p
heappush(self._out, p)
while self._d_in and self._in[0] == self._d_in[0]:
heappop(self._in)
heappop(self._d_in)
def __len__(self) -> int:
return len(self._in) + len(self._out) - len(self._d_in) - len(self._d_out)
def __contains__(self, x: int) -> bool:
if not self._ascending_order:
x = -x
return self._freq[x] > 0
def main():
import sys
input = sys.stdin.readline
n, k, q = map(int, input().split())
a = [0] * n
s = SumOfTopKth(k, ascending_order=False)
for _ in range(q):
xi, yi = map(int, input().split())
xi -= 1
s.discard(a[xi])
s.add(yi)
print(s.query())
a[xi] = yi
if __name__ == "__main__":
main()
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_sum_of_top_kth.py::TestSumOfTopKth::test_sum_of_top_kth_by_descending_order",
"tests/test_sum_of_top_kth.py::TestSumOfTopKth::test_sum_of_top_kth_by_ascending_order"
] | [] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2023-06-18T13:34:43Z" | cc0-1.0 |
|
KIT-IAI__pyWATTS-41 | diff --git a/pywatts/callbacks/debug_callback.py b/pywatts/callbacks/debug_callback.py
index df55b0b..d8ab1f3 100644
--- a/pywatts/callbacks/debug_callback.py
+++ b/pywatts/callbacks/debug_callback.py
@@ -1,5 +1,5 @@
import xarray as xr
-from typing import Dict
+from typing import Dict, Optional
from pywatts.callbacks.base_callback import BaseCallback
@@ -36,6 +36,23 @@ class StatisticCallback(BaseCallback):
:type BaseCallback: BaseCallback
"""
+ def __init__(self, prefix: str, use_filemanager: Optional[bool] = None):
+ """
+ Initialise Statistical callback object given a filename and
+ optional use_filemanager flag.
+
+ :param prefix: Prefix to use for the line plot output file.
+ :type prefix: str
+ :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used.
+ :type use_filemanager: Optional[bool]
+ """
+ if use_filemanager is None:
+ # use base class default if use_filemanager is not set
+ super().__init__()
+ else:
+ super().__init__(use_filemanager)
+ self.prefix = prefix
+
def __call__(self, data_dict: Dict[str, xr.DataArray]):
"""
Implementation of abstract base method to print out
@@ -44,7 +61,13 @@ class StatisticCallback(BaseCallback):
:param data_dict: Dict of DataArrays that statistical information should be printed out.
:type data_dict: Dict[str, xr.DataArray]
"""
+ result_string = "\n# Statistical Callback\n"
print("\n# Statistical Callback")
+
for key in data_dict:
print(f"## {key}")
print(data_dict[key].to_pandas().describe())
+ result_string +=f"## {key}\n {data_dict[key].to_pandas().describe()} \n"
+
+ with open(self.get_path(f"{self.prefix}_Statistics.md"), "w") as file:
+ file.write(result_string)
\ No newline at end of file
diff --git a/pywatts/core/filemanager.py b/pywatts/core/filemanager.py
index 0353b95..9e42299 100644
--- a/pywatts/core/filemanager.py
+++ b/pywatts/core/filemanager.py
@@ -7,7 +7,7 @@ from pywatts.core.exceptions.io_exceptions import IOException
logger = logging.getLogger()
-ALLOWED_FILES = ["png", "csv", "xlsx", "pickle", "tex", "json", "h5", "pt"]
+ALLOWED_FILES = ["png", "csv", "xlsx", "pickle", "tex", "json", "h5", "pt", "md"]
class FileManager:
| KIT-IAI/pyWATTS | 35982f34725135adef011c33ac61af5cdebf1b59 | diff --git a/tests/unit/core/test_filemanager.py b/tests/unit/core/test_filemanager.py
index c4e2f16..f9f24ad 100644
--- a/tests/unit/core/test_filemanager.py
+++ b/tests/unit/core/test_filemanager.py
@@ -63,7 +63,7 @@ class TestFilemanager(unittest.TestCase):
self.filemanager.get_path("result.test")
self.assertEqual(cm.exception.args,
("test is not an allowed file type. Allowed types are ['png', 'csv', 'xlsx', "
- "'pickle', 'tex', 'json', 'h5', 'pt'].",))
+ "'pickle', 'tex', 'json', 'h5', 'pt', 'md'].",))
@patch("pywatts.core.filemanager.logger")
@patch("pywatts.core.filemanager.datetime")
| The summary function writes to a file (instead of printing)
**What is your feature request related to?**
* Problem related
**Please describe the problem**
It is possible to print out a summary of the data set in the latest step with the flag `summary = true`. Currently, this summary is simply printed and not saved.
**Describe the solution you'd like**
This summary should also be saved to a file for future reference if desired.
**What type of Feature Request**
* Core enhancement
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/core/test_filemanager.py::TestFilemanager::test_not_allowed_filetype"
] | [
"tests/unit/core/test_filemanager.py::TestFilemanager::test_duplicate_filename",
"tests/unit/core/test_filemanager.py::TestFilemanager::test_get_path",
"tests/unit/core/test_filemanager.py::TestFilemanager::test_get_path_filename_with_path"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-15T11:16:40Z" | mit |
|
Kattis__problemtools-253 | diff --git a/problemtools/problem2pdf.py b/problemtools/problem2pdf.py
index 3671bcf..583e8ac 100644
--- a/problemtools/problem2pdf.py
+++ b/problemtools/problem2pdf.py
@@ -9,12 +9,10 @@ from . import template
def convert(options: argparse.Namespace) -> bool:
-
problem = os.path.realpath(options.problem)
problembase = os.path.splitext(os.path.basename(problem))[0]
destfile = string.Template(options.destfile).safe_substitute(problem=problembase)
- texfile = problem
# Set up template if necessary
with template.Template(problem, language=options.language) as templ:
texfile = templ.get_file_name()
diff --git a/problemtools/run/__init__.py b/problemtools/run/__init__.py
index 79de604..6713e70 100644
--- a/problemtools/run/__init__.py
+++ b/problemtools/run/__init__.py
@@ -102,13 +102,18 @@ def get_program(path, language_config=None, work_dir=None, include_dir=None,
files = [path]
else:
build = os.path.join(path, 'build')
- if os.path.isfile(build) and os.access(path, os.X_OK):
+ if os.path.isfile(build) and os.access(build, os.X_OK):
return BuildRun(path, work_dir)
files = rutil.list_files_recursive(path)
if language_config is not None:
lang = language_config.detect_language(files)
if lang is not None:
- return SourceCode(path, lang,
- work_dir=work_dir, include_dir=include_dir)
+ if include_dir is not None:
+ lang_dir = os.path.join(include_dir, lang.lang_id)
+ build = os.path.join(lang_dir, 'build')
+ if os.path.isfile(build) and os.access(build, os.X_OK):
+ return BuildRun(path, work_dir=work_dir, include_dir=lang_dir)
+
+ return SourceCode(path, lang, work_dir=work_dir, include_dir=include_dir)
return None
diff --git a/problemtools/run/buildrun.py b/problemtools/run/buildrun.py
index 208527c..a86777b 100644
--- a/problemtools/run/buildrun.py
+++ b/problemtools/run/buildrun.py
@@ -12,12 +12,14 @@ from .errors import ProgramError
from .program import Program
from . import rutil
+log = logging.getLogger(__file__)
+
class BuildRun(Program):
"""Class for build/run-script program.
"""
- def __init__(self, path, work_dir=None):
+ def __init__(self, path, work_dir=None, include_dir=None):
"""Instantiate BuildRun object.
Args:
@@ -28,12 +30,6 @@ class BuildRun(Program):
if not os.path.isdir(path):
raise ProgramError('%s is not a directory' % path)
- build = os.path.join(path, 'build')
- if not os.path.isfile(build):
- raise ProgramError('%s does not have a build script' % path)
- if not os.access(build, os.X_OK):
- raise ProgramError('%s/build is not executable' % path)
-
if work_dir is None:
work_dir = tempfile.mkdtemp()
@@ -47,7 +43,15 @@ class BuildRun(Program):
os.makedirs(self.path)
rutil.add_files(path, self.path)
+ if include_dir is not None and os.path.isdir(include_dir):
+ rutil.add_files(include_dir, self.path)
+ # Check for existence of build script after copying include_dir, since that could contain the script
+ build = os.path.join(self.path, 'build')
+ if not os.path.isfile(build):
+ raise ProgramError('%s does not have a build script' % path)
+ if not os.access(build, os.X_OK):
+ raise ProgramError('%s/build is not executable' % path)
def __str__(self):
"""String representation"""
@@ -65,8 +69,8 @@ class BuildRun(Program):
run = os.path.join(self.path, 'run')
if status:
- logging.debug('Build script failed (status %d) when compiling %s\n', status, self.name)
- self._compile_result = (False, 'build script failed with exit code %d' % (status))
+ log.debug('Build script failed (status %d) when compiling %s', status, self.name)
+ self._compile_result = (False, f'build script failed with exit code {status:d}')
elif not os.path.isfile(run) or not os.access(run, os.X_OK):
self._compile_result = (False, 'build script did not produce an executable called "run"')
else:
diff --git a/problemtools/run/program.py b/problemtools/run/program.py
index a86fb8e..3cdad78 100644
--- a/problemtools/run/program.py
+++ b/problemtools/run/program.py
@@ -8,6 +8,9 @@ import logging
from .errors import ProgramError
+log = logging.getLogger(__name__)
+
+
class Program(object):
"""Abstract base class for programs.
"""
@@ -70,7 +73,7 @@ class Program(object):
@staticmethod
def __run_wait(argv, infile, outfile, errfile, timelim, memlim, working_directory=None):
- logging.debug('run "%s < %s > %s 2> %s"',
+ log.debug('run "%s < %s > %s 2> %s"',
' '.join(argv), infile, outfile, errfile)
pid = os.fork()
if pid == 0: # child
@@ -111,7 +114,7 @@ class Program(object):
print(exc)
os.kill(os.getpid(), signal.SIGTERM)
# Unreachable
- logging.error("Unreachable part of run_wait reached")
+ log.error("Unreachable part of run_wait reached")
os.kill(os.getpid(), signal.SIGTERM)
(pid, status, rusage) = os.wait4(pid, 0)
return status, rusage.ru_utime + rusage.ru_stime
diff --git a/problemtools/run/source.py b/problemtools/run/source.py
index a7724bd..3fa5b8b 100644
--- a/problemtools/run/source.py
+++ b/problemtools/run/source.py
@@ -12,6 +12,9 @@ from .errors import ProgramError
from .program import Program
from . import rutil
+log = logging.getLogger(__name__)
+
+
class SourceCode(Program):
"""Class representing a program provided by source code.
"""
@@ -103,7 +106,7 @@ class SourceCode(Program):
if not os.path.isfile(compiler) or not os.access(compiler, os.X_OK):
return (False, '%s does not seem to be installed, expected to find compiler at %s' % (self.language.name, compiler))
- logging.debug('compile command: %s', command)
+ log.debug('compile command: %s', command)
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
diff --git a/problemtools/verifyproblem.py b/problemtools/verifyproblem.py
index 2a0dd19..824e8e8 100644
--- a/problemtools/verifyproblem.py
+++ b/problemtools/verifyproblem.py
@@ -31,6 +31,8 @@ from . import run
from typing import Callable, Literal, Pattern, Match
+log = logging.getLogger(__name__)
+
Verdict = Literal['AC', 'TLE', 'OLE', 'MLE', 'RTE', 'WA', 'PAC', 'JE']
def is_TLE(status: int, may_signal_with_usr1: bool=False) -> bool:
@@ -91,6 +93,7 @@ class ProblemAspect:
warnings = 0
bail_on_error = False
_check_res: bool|None = None
+ consider_warnings_errors = False
basename_regex = re.compile('^[a-zA-Z0-9][a-zA-Z0-9_.-]*[a-zA-Z0-9]$')
consider_warnings_errors: bool
@@ -110,28 +113,28 @@ class ProblemAspect:
return f'{msg}:\n' + '\n'.join(' '*8 + line for line in lines)
- def error(self, msg: str, additional_info: str|None=None) -> None:
+ def __init__(self, name):
+ self.log = log.getChild(name)
+
+ def error(self, msg: str, additional_info: str|None=None, *args) -> None:
self._check_res = False
ProblemAspect.errors += 1
- logging.error('in %s: %s', self, ProblemAspect.__append_additional_info(msg, additional_info))
+ self.log.error(ProblemAspect.__append_additional_info(msg, additional_info), *args)
if ProblemAspect.bail_on_error:
raise VerifyError(msg)
- def warning(self, msg: str, additional_info: str|None=None) -> None:
+ def warning(self, msg: str, additional_info: str|None=None, *args) -> None:
if ProblemAspect.consider_warnings_errors:
- self.error(msg)
+ self.error(msg, additional_info, *args)
return
ProblemAspect.warnings += 1
- logging.warning('in %s: %s', self, ProblemAspect.__append_additional_info(msg, additional_info))
-
- def msg(self, msg: str) -> None:
- print(msg)
+ self.log.warning(ProblemAspect.__append_additional_info(msg, additional_info), *args)
- def info(self, msg: str) -> None:
- logging.info(': %s', msg)
+ def info(self, msg: str, *args) -> None:
+ self.log.info(msg, *args)
- def debug(self, msg: str) -> None:
- logging.debug(': %s', msg)
+ def debug(self, msg: str, *args) -> None:
+ self.log.debug(msg, *args)
def check_basename(self, path: str) -> None:
basename = os.path.basename(path)
@@ -140,6 +143,7 @@ class ProblemAspect:
class TestCase(ProblemAspect):
def __init__(self, problem: Problem, base: str, testcasegroup: TestCaseGroup):
+ super().__init__(f"{problem.shortname}.test.{testcasegroup.name}.{os.path.basename(base)}")
self._base = base
self.infile = f'{base}.in'
self.ansfile = f'{base}.ans'
@@ -248,6 +252,8 @@ class TestCase(ProblemAspect):
return (res, res_low, res_high, True)
outfile = os.path.join(self._problem.tmpdir, 'output')
+ errfile = os.path.join(self._problem.tmpdir, 'error')
+
if sys.stdout.isatty():
msg = f'Running {sub} on {self}...'
sys.stdout.write(msg)
@@ -256,16 +262,23 @@ class TestCase(ProblemAspect):
if self._problem.is_interactive:
res_high = self._problem.output_validators.validate_interactive(self, sub, timelim_high, self._problem.submissions)
else:
- status, runtime = sub.run(self.infile, outfile,
+ status, runtime = sub.run(infile=self.infile, outfile=outfile, errfile=errfile,
timelim=timelim_high+1,
memlim=self._problem.config.get('limits')['memory'], set_work_dir=True)
if is_TLE(status) or runtime > timelim_high:
res_high = SubmissionResult('TLE')
elif is_RTE(status):
- res_high = SubmissionResult('RTE')
+ try:
+ with open(errfile, mode="rt") as f:
+ info = f.read()
+ except IOError:
+ self.info("Failed to read error file %s", errfile)
+ info = None
+ res_high = SubmissionResult('RTE', additional_info=info)
else:
res_high = self._problem.output_validators.validate(self, outfile)
res_high.runtime = runtime
+
if sys.stdout.isatty():
sys.stdout.write('\b \b' * (len(msg)))
if res_high.runtime <= timelim_low:
@@ -318,8 +331,13 @@ class TestCaseGroup(ProblemAspect):
self._parent = parent
self._problem = problem
self._datadir = datadir
+ self.name = os.path.relpath(os.path.abspath(self._datadir),
+ os.path.abspath(self._problem.probdir)).replace("/", ".")
+
+ super().__init__(f"{problem.shortname}.test.{self.name}")
+
self._seen_oob_scores = False
- self.debug(f' Loading test data group {datadir}')
+ self.debug('Loading test data group %s', datadir)
configfile = os.path.join(self._datadir, 'testdata.yaml')
self.config = {}
if os.path.isfile(configfile):
@@ -374,7 +392,7 @@ class TestCaseGroup(ProblemAspect):
def __str__(self) -> str:
- return f'test case group {os.path.relpath(self._datadir, os.path.join(self._problem.probdir))}'
+ return f'test case group {self.name}'
def set_symlinks(self) -> None:
for sub in self._items:
@@ -627,6 +645,7 @@ class ProblemConfig(ProblemAspect):
_VALID_LICENSES = ['unknown', 'public domain', 'cc0', 'cc by', 'cc by-sa', 'educational', 'permission']
def __init__(self, problem: Problem):
+ super().__init__(f"{problem.shortname}.config")
self.debug(' Loading problem config')
self._problem = problem
self.configfile = os.path.join(problem.probdir, 'problem.yaml')
@@ -1061,6 +1080,7 @@ class Generators(ProblemAspect):
class ProblemStatement(ProblemAspect):
def __init__(self, problem: Problem):
+ super().__init__(f"{problem.shortname}.statement")
self.debug(' Loading problem statement')
self._problem = problem
self.languages = []
@@ -1136,6 +1156,7 @@ class Attachments(ProblemAspect):
"""
def __init__(self, problem: Problem):
+ super().__init__(f"{problem.shortname}.attachments")
attachments_path = os.path.join(problem.probdir, 'attachments')
self.attachments: list[str] = []
if os.path.isdir(attachments_path):
@@ -1165,7 +1186,7 @@ class Attachments(ProblemAspect):
_JUNK_CASES = [
('an empty file', b''),
- ('a binary file with byte values 0 up to 256', bytearray(x for x in range(256))),
+ ('a binary file with random bytes', bytearray(random.Random(0).randbytes(1024))),
('a text file with the ASCII characters 32 up to 127', bytearray(x for x in range(32, 127))),
('a random text file with printable ASCII characters', bytearray(random.choice(string.printable.encode('utf8')) for _ in range(200))),
]
@@ -1185,6 +1206,7 @@ _JUNK_MODIFICATIONS = [
class InputFormatValidators(ProblemAspect):
def __init__(self, problem: Problem):
+ super().__init__(f"{problem.shortname}.input_validator")
self._problem = problem
input_validators_path = os.path.join(problem.probdir, 'input_format_validators')
if os.path.isdir(input_validators_path):
@@ -1304,6 +1326,7 @@ class Graders(ProblemAspect):
_default_grader = run.get_tool('default_grader')
def __init__(self, problem: Problem):
+ super().__init__(f"{problem.shortname}.grader")
self._problem = problem
self._graders: list = run.find_programs(os.path.join(problem.probdir, 'graders'),
language_config=problem.language_config,
@@ -1382,7 +1405,7 @@ class Graders(ProblemAspect):
# TODO: check that all graders give same result
if not shadow_result:
- self.info(f'Grade on {testcasegroup} is {verdict} ({score})')
+ self.debug(f'Grade on {testcasegroup} is {verdict} ({score})')
return (verdict, score)
@@ -1392,6 +1415,7 @@ class OutputValidators(ProblemAspect):
def __init__(self, problem: Problem):
+ super().__init__(f"{problem.shortname}.output_validator")
self._problem = problem
self._validators = run.find_programs(os.path.join(problem.probdir,
'output_validators'),
@@ -1455,15 +1479,15 @@ class OutputValidators(ProblemAspect):
return self._check_res
@staticmethod
- def __get_feedback(feedback_dir: str) -> str|None:
+ def _get_feedback(feedback_dir: str) -> str|None:
all_feedback = []
for feedback_file in os.listdir(feedback_dir):
feedback_path = os.path.join(feedback_dir, feedback_file)
if os.path.getsize(feedback_path) == 0:
continue
all_feedback.append(f'=== {feedback_file}: ===')
- # FIXME handle feedback files containing non-text
- with open(feedback_path, 'r') as feedback:
+ # Note: The file could contain non-unicode characters, "replace" to be on the safe side
+ with open(feedback_path, 'r', errors="replace") as feedback:
# Cap amount of feedback per file at some high-ish
# size, so that a buggy validator spewing out lots of
# data doesn't kill us.
@@ -1484,15 +1508,15 @@ class OutputValidators(ProblemAspect):
if not os.WIFEXITED(status):
return SubmissionResult('JE',
reason=f'output validator {val} crashed, status {status}',
- additional_info=OutputValidators.__get_feedback(feedbackdir))
+ additional_info=OutputValidators._get_feedback(feedbackdir))
ret = os.WEXITSTATUS(status)
if ret not in [42, 43]:
return SubmissionResult('JE',
reason=f'output validator {val} exited with status {ret}',
- additional_info=OutputValidators.__get_feedback(feedbackdir))
+ additional_info=OutputValidators._get_feedback(feedbackdir))
if ret == 43:
- return SubmissionResult('WA', additional_info=OutputValidators.__get_feedback(feedbackdir))
+ return SubmissionResult('WA', additional_info=OutputValidators._get_feedback(feedbackdir))
if custom_score:
if os.path.isfile(score_file):
@@ -1585,11 +1609,28 @@ class OutputValidators(ProblemAspect):
for val in self._actual_validators():
if val is not None and val.compile()[0]:
feedbackdir = tempfile.mkdtemp(prefix='feedback', dir=self._problem.tmpdir)
+ validator_output = tempfile.mkdtemp(prefix='checker_out', dir=self._problem.tmpdir)
+ outfile = validator_output + "/out.txt"
+ errfile = validator_output + "/err.txt"
status, runtime = val.run(submission_output,
args=[testcase.infile, testcase.ansfile, feedbackdir] + flags,
- timelim=val_timelim, memlim=val_memlim)
+ timelim=val_timelim, memlim=val_memlim,
+ outfile=outfile, errfile=errfile)
+ if self.log.isEnabledFor(logging.DEBUG):
+ try:
+ with open(outfile, mode="rt") as f:
+ output = f.read()
+ if output:
+ self.log.debug("Validator output:\n%s", output)
+ with open(errfile, mode="rt") as f:
+ error = f.read()
+ if error:
+ self.log.debug("Validator stderr:\n%s", error)
+ except IOError as e:
+ self.info("Failed to read validator output: %s", e)
res = self._parse_validator_results(val, status, feedbackdir, testcase)
shutil.rmtree(feedbackdir)
+ shutil.rmtree(validator_output)
if res.verdict != 'AC':
return res
@@ -1609,6 +1650,7 @@ class Submissions(ProblemAspect):
]
def __init__(self, problem: Problem):
+ super().__init__(f"{problem.shortname}.submission")
self._submissions = {}
self._problem = problem
srcdir = os.path.join(problem.probdir, 'submissions')
@@ -1742,6 +1784,7 @@ class Problem(ProblemAspect):
def __init__(self, probdir: str):
self.probdir = os.path.realpath(probdir)
self.shortname: str|None = os.path.basename(self.probdir)
+ super().__init__(self.shortname)
self.language_config = languages.load_language_config()
def __enter__(self) -> Problem:
diff --git a/requirements.txt b/requirements.txt
index d6e1198..ecf975e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,1 +1,1 @@
--e .
+-e .
\ No newline at end of file
| Kattis/problemtools | 563f0d10c1fd83d985ee7d1f7ad412bb3481f60e | diff --git a/problemtools/tests/test_output_validator.py b/problemtools/tests/test_output_validator.py
new file mode 100644
index 0000000..afd6f2d
--- /dev/null
+++ b/problemtools/tests/test_output_validator.py
@@ -0,0 +1,25 @@
+import random
+import pathlib
+import string
+import tempfile
+
+from problemtools.verifyproblem import OutputValidators
+
+
+def test_output_validator_feedback():
+ r = random.Random(0)
+ with tempfile.TemporaryDirectory() as directory:
+ feedback = pathlib.Path(directory) / "feedback.txt"
+ text = "".join(r.choices(string.printable))
+ feedback.write_text(text)
+ data = OutputValidators._get_feedback(directory)
+ assert text in data
+
+
+def test_output_validator_feedback_non_unicode():
+ r = random.Random(0)
+ with tempfile.TemporaryDirectory() as directory:
+ feedback = pathlib.Path(directory) / "feedback.txt"
+ feedback.write_bytes(r.randbytes(1024))
+ # Just test that this does not throw an error
+ OutputValidators._get_feedback(directory)
| UnicodeDecodeError problems due to non-parseable _JUNK_CASES
When validating problems with the current develop branch, I get the following issue:
`UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 137: invalid start byte`, thrown in `verifyproblem.py", line 1141, in __get_feedback: all_feedback.append(feedback.read(128*1024))`
The problem is that the validator (based on `validate.h`) reports
```
TC 1: Judge says possible but author says !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~��������������������������������������������������������������������������������������������������������������������������������!
```
(output of `cat judgemessage.txt`).
This in turn is caused by the following entry in `_JUNK_CASES`: `('a binary file with byte values 0 up to 256', bytearray(x for x in range(256)))`. This creates an answer which contains non-unicode characters, which then fails to parse because when reading feedback, `verifyproblem.py` expects unicode answers.
Commenting out this line fixes the problem. I suggest one should either use every printable character / random unicode characters or make the parsing of judgemessage more robust (iterate character by character over the input and if it cannot be parsed replace with questionmark or similar?) | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"problemtools/tests/test_output_validator.py::test_output_validator_feedback_non_unicode",
"problemtools/tests/test_output_validator.py::test_output_validator_feedback"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2024-02-12T10:56:44Z" | mit |
|
Kinto__kinto-1015 | diff --git a/Dockerfile b/Dockerfile
index 714e7d75..663e2611 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -13,7 +13,7 @@ RUN \
apt-get install -y build-essential git python3-dev libssl-dev libffi-dev libpq-dev; \
pip3 install -e /code[postgresql,monitoring]; \
pip3 install kinto-pusher kinto-fxa kinto-attachment ; \
- kinto --ini $KINTO_INI init --backend=memory; \
+ kinto --ini $KINTO_INI init --host 0.0.0.0 --backend=memory; \
apt-get remove -y -qq build-essential git python3-dev libssl-dev libffi-dev libpq-dev; \
apt-get autoremove -y -qq; \
apt-get clean -y
diff --git a/kinto/__main__.py b/kinto/__main__.py
index b1921857..3b02cf5d 100644
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -57,6 +57,11 @@ def main(args=None):
dest='backend',
required=False,
default=None)
+ subparser.add_argument('--host',
+ help='Host to listen() on.',
+ dest='host',
+ required=False,
+ default='127.0.0.1')
elif command == 'migrate':
subparser.add_argument('--dry-run',
action='store_true',
@@ -114,7 +119,7 @@ def main(args=None):
except KeyError:
pass
- init(config_file, backend)
+ init(config_file, backend, parsed_args['host'])
# Install postgresql libraries if necessary
if backend == "postgresql":
diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py
index f83aff3a..1406a911 100644
--- a/kinto/config/__init__.py
+++ b/kinto/config/__init__.py
@@ -26,9 +26,10 @@ def render_template(template, destination, **kwargs):
output.write(rendered)
-def init(config_file, backend):
+def init(config_file, backend, host='127.0.0.1'):
values = {}
+ values['host'] = host
values['secret'] = core_utils.random_bytes_hex(32)
values['kinto_version'] = __version__
diff --git a/kinto/config/kinto.tpl b/kinto/config/kinto.tpl
index f180f637..214e17ec 100644
--- a/kinto/config/kinto.tpl
+++ b/kinto/config/kinto.tpl
@@ -4,7 +4,7 @@
[server:main]
use = egg:waitress#main
-host = 127.0.0.1
+host = {host}
port = %(http_port)s
| Kinto/kinto | 6fb29c638364943adde35fabe9dbe6e7b34cbd21 | diff --git a/tests/test_config.py b/tests/test_config.py
index dfe30121..2a20e97a 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -17,6 +17,7 @@ class ConfigTest(unittest.TestCase):
template = "kinto.tpl"
dest = tempfile.mktemp()
config.render_template(template, dest,
+ host='127.0.0.1',
secret='secret',
storage_backend='storage_backend',
cache_backend='cache_backend',
@@ -41,6 +42,7 @@ class ConfigTest(unittest.TestCase):
dest = os.path.join(tempfile.mkdtemp(), 'config', 'kinto.ini')
config.render_template("kinto.tpl", dest,
+ host='127.0.0.1',
secret='secret',
storage_backend='storage_backend',
cache_backend='cache_backend',
@@ -68,6 +70,7 @@ class ConfigTest(unittest.TestCase):
postgresql_url = "postgres://postgres:postgres@localhost/postgres"
self.assertDictEqual(kwargs, {
+ 'host': '127.0.0.1',
'secret': kwargs['secret'],
'storage_backend': 'kinto.core.storage.postgresql',
'cache_backend': 'kinto.core.cache.postgresql',
@@ -90,6 +93,7 @@ class ConfigTest(unittest.TestCase):
self.maxDiff = None # See the full diff in case of error
self.assertDictEqual(kwargs, {
+ 'host': '127.0.0.1',
'secret': kwargs['secret'],
'storage_backend': 'kinto_redis.storage',
'cache_backend': 'kinto_redis.cache',
@@ -109,6 +113,7 @@ class ConfigTest(unittest.TestCase):
self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))
self.assertDictEqual(kwargs, {
+ 'host': '127.0.0.1',
'secret': kwargs['secret'],
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
@@ -120,26 +125,11 @@ class ConfigTest(unittest.TestCase):
'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
})
- def test_render_template_creates_directory_if_necessary(self):
- temp_path = tempfile.mkdtemp()
- destination = os.path.join(temp_path, 'config/kinto.ini')
- config.render_template('kinto.tpl', destination, **{
- 'secret': "abcd-ceci-est-un-secret",
- 'storage_backend': 'kinto.core.storage.memory',
- 'cache_backend': 'kinto.core.cache.memory',
- 'permission_backend': 'kinto.core.permission.memory',
- 'storage_url': '',
- 'cache_url': '',
- 'permission_url': '',
- 'kinto_version': '',
- 'config_file_timestamp': ''
- })
- self.assertTrue(os.path.exists(destination))
-
def test_render_template_works_with_file_in_cwd(self):
temp_path = tempfile.mkdtemp()
os.chdir(temp_path)
config.render_template('kinto.tpl', 'kinto.ini', **{
+ 'host': '127.0.0.1',
'secret': "abcd-ceci-est-un-secret",
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
| updated default kinto config breaks docker (kinto server not reacheable from host)
In e4e70fb, the default listen ip has been changed to 127.0.0.1.
I couldn't manage to export the port using docker, using this command from the [install documentation](http://kinto.readthedocs.io/en/latest/tutorials/install.html#environment-variables):
```
docker run --env-file kinto.env --link kinto_db:db -p 127.0.0.1:8888:8888 --name kinto_web croco/kinto:latest
```
For instance, when I do `curl http://127.0.0.1:8888` from the host I get `Recv failure: Connection reset by peer`.
When I reverted kinto.ini to listen on 0.0.0.0, curl from the host succeeds.
Maybe there is an environment variable to override the `host` entry in the `server:main` section? Then it would be fine, provided an updated documentation...
This is not visible with the published kinto/kinto-server image, since it's still 5.1. I built my own from master because I needed an armhf image. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_config.py::ConfigTest::test_init_postgresql_values",
"tests/test_config.py::ConfigTest::test_init_redis_values",
"tests/test_config.py::ConfigTest::test_init_memory_values"
] | [
"tests/test_config.py::ConfigTest::test_render_template_works_with_file_in_cwd",
"tests/test_config.py::ConfigTest::test_transpose_parameters_into_template",
"tests/test_config.py::ConfigTest::test_create_destination_directory",
"tests/test_config.py::ConfigTest::test_hmac_secret_is_text"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2017-01-05T23:18:05Z" | apache-2.0 |
|
Kinto__kinto-1943 | diff --git a/docs/commandline.rst b/docs/commandline.rst
index e56d2080..7c1addef 100644
--- a/docs/commandline.rst
+++ b/docs/commandline.rst
@@ -139,3 +139,19 @@ For example:
::
kinto rebuild-quotas --ini=config/postgresql.ini
+
+Flush Cache
+-----------
+
+Clears the Backend Cache.This can be useful for
+debugging.
+
+::
+
+ kinto flush-cache [--ini INI_FILE]
+
+For example:
+
+::
+
+ kinto flush-cache --ini kinto.ini
\ No newline at end of file
diff --git a/kinto/__main__.py b/kinto/__main__.py
index 4b935afe..dc2142b9 100644
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -24,12 +24,13 @@ def main(args=None):
if args is None:
args = sys.argv[1:]
- parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface")
+ parser = argparse.ArgumentParser(description="Kinto Command-Line Interface")
commands = (
"init",
"start",
"migrate",
"delete-collection",
+ "flush-cache",
"version",
"rebuild-quotas",
"create-user",
@@ -94,26 +95,34 @@ def main(args=None):
required=False,
default="127.0.0.1",
)
+
elif command == "migrate":
subparser.add_argument(
"--dry-run",
action="store_true",
- help="Simulate the migration operations " "and show information",
+ help="Simulate the migration operations and show information",
dest="dry_run",
required=False,
default=False,
)
+
elif command == "delete-collection":
subparser.add_argument(
- "--bucket", help="The bucket where the collection " "belongs to.", required=True
+ "--bucket",
+ help="The bucket where the collection belongs to.",
+ required=True
+ )
+ subparser.add_argument(
+ "--collection",
+ help="The collection to remove.",
+ required=True
)
- subparser.add_argument("--collection", help="The collection to remove.", required=True)
elif command == "rebuild-quotas":
subparser.add_argument(
"--dry-run",
action="store_true",
- help="Simulate the rebuild operation " "and show information",
+ help="Simulate the rebuild operation and show information",
dest="dry_run",
required=False,
default=False,
@@ -225,6 +234,10 @@ def main(args=None):
env, parsed_args["bucket"], parsed_args["collection"]
)
+ elif which_command == "flush-cache":
+ env = bootstrap(config_file, options={"command": "flush-cache"})
+ core_scripts.flush_cache(env)
+
elif which_command == "rebuild-quotas":
dry_run = parsed_args["dry_run"]
env = bootstrap(config_file, options={"command": "rebuild-quotas"})
diff --git a/kinto/core/scripts.py b/kinto/core/scripts.py
index 83a4a5ff..5c89fb06 100644
--- a/kinto/core/scripts.py
+++ b/kinto/core/scripts.py
@@ -25,3 +25,10 @@ def migrate(env, dry_run=False):
logger.error(message)
else:
getattr(registry, backend).initialize_schema(dry_run=dry_run)
+
+
+def flush_cache(env):
+ registry = env["registry"]
+ registry.cache.flush()
+ logger.info(f"Cache has been cleared.")
+ return 0
| Kinto/kinto | 173d8eabe81a19c709e22341f64a6f65d6fcff2c | diff --git a/tests/core/test_scripts.py b/tests/core/test_scripts.py
index 4a00c8d2..6aa241c5 100644
--- a/tests/core/test_scripts.py
+++ b/tests/core/test_scripts.py
@@ -40,3 +40,8 @@ class InitSchemaTest(unittest.TestCase):
reg.storage.initialize_schema.assert_called_with(dry_run=True)
reg.cache.initialize_schema.assert_called_with(dry_run=True)
reg.permission.initialize_schema.assert_called_with(dry_run=True)
+
+ def test_flush_cache_clear_the_cache_backend(self):
+ scripts.flush_cache({"registry": self.registry})
+ reg = self.registry
+ reg.cache.flush.assert_called_with()
diff --git a/tests/test_main.py b/tests/test_main.py
index 2dfe4a80..73acce40 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -362,3 +362,22 @@ class TestMain(unittest.TestCase):
mocked_logging.basicConfig.assert_called_with(
level=logging.INFO, format=DEFAULT_LOG_FORMAT
)
+
+ def test_cli_flush_cache_command_runs_flush_cache_script(self):
+ # Build a temporary ini file.
+ res = main(
+ [
+ "init",
+ "--ini",
+ TEMP_KINTO_INI,
+ "--backend",
+ "memory",
+ "--cache-backend",
+ "memory",
+ ]
+ )
+ assert res == 0
+ with mock.patch("kinto.__main__.core_scripts.flush_cache") as mocked_cache_script:
+ res = main(["flush-cache", "--ini", TEMP_KINTO_INI])
+ assert res == 0
+ assert mocked_cache_script.call_count == 1
| Add a clear cache command to kinto
For debugging purposes mainly we want to be able to clear the cache on time to time | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/core/test_scripts.py::InitSchemaTest::test_flush_cache_clear_the_cache_backend",
"tests/test_main.py::TestMain::test_cli_flush_cache_command_runs_flush_cache_script"
] | [
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_calls_initialize_schema_on_backends",
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_in_dry_run_mode",
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_in_read_only_display_an_error",
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_skips_missing_backends",
"tests/test_main.py::TestMain::test_cli_can_configure_logger_in_debug",
"tests/test_main.py::TestMain::test_cli_can_configure_logger_in_quiet",
"tests/test_main.py::TestMain::test_cli_can_display_kinto_version",
"tests/test_main.py::TestMain::test_cli_create_user_runs_account_script",
"tests/test_main.py::TestMain::test_cli_delete_collection_run_delete_collection_script",
"tests/test_main.py::TestMain::test_cli_init_asks_for_backend_if_not_specified",
"tests/test_main.py::TestMain::test_cli_init_asks_until_backend_is_valid",
"tests/test_main.py::TestMain::test_cli_init_asks_until_cache_backend_is_valid",
"tests/test_main.py::TestMain::test_cli_init_generates_configuration",
"tests/test_main.py::TestMain::test_cli_init_installs_memcached_dependencies_if_needed",
"tests/test_main.py::TestMain::test_cli_init_installs_postgresql_dependencies_if_needed",
"tests/test_main.py::TestMain::test_cli_init_installs_redis_dependencies_if_needed",
"tests/test_main.py::TestMain::test_cli_init_returns_if_file_exists",
"tests/test_main.py::TestMain::test_cli_migrate_command_runs_init_schema",
"tests/test_main.py::TestMain::test_cli_rebuild_quotas_run_rebuild_quotas_script",
"tests/test_main.py::TestMain::test_cli_start_runs_pserve",
"tests/test_main.py::TestMain::test_cli_start_with_quiet_option_runs_pserve_with_quiet",
"tests/test_main.py::TestMain::test_cli_start_with_reload_runs_pserve_with_reload",
"tests/test_main.py::TestMain::test_cli_start_with_verbose_option_runs_pserve_with_verbose",
"tests/test_main.py::TestMain::test_cli_use_default_logging_logger",
"tests/test_main.py::TestMain::test_fails_if_not_enough_args",
"tests/test_main.py::TestMain::test_main_takes_sys_argv_by_default"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2018-12-20T11:11:50Z" | apache-2.0 |
|
Kinto__kinto-emailer-37 | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 5536013..6e5ce32 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -9,6 +9,7 @@ This document describes changes between each past release.
**New features**
- Add a ``validate_setup.py`` script to check that server can actually send emails
+- Add a ``kinto-send-email`` command to test the configuration (fixes #35)
**Bug fixes**
diff --git a/README.rst b/README.rst
index 199f545..43aca1e 100644
--- a/README.rst
+++ b/README.rst
@@ -45,6 +45,15 @@ If ``mail.queue_path`` is set, the emails are storage in a local Maildir queue.
See `more details about Pyramid Mailer configuration <http://docs.pylonsproject.org/projects/pyramid_mailer/en/latest/#configuration>`_.
+Validate configuration
+----------------------
+
+The following command will send a dummy email to the specified recipient or will fail if the configuration is not correct:
+
+::
+
+ $ kinto-send-email config/kinto.ini [email protected]
+
Development
-----------
diff --git a/kinto_emailer/command_send.py b/kinto_emailer/command_send.py
new file mode 100644
index 0000000..6bf78d8
--- /dev/null
+++ b/kinto_emailer/command_send.py
@@ -0,0 +1,33 @@
+import sys
+
+from pyramid.paster import bootstrap
+from pyramid_mailer import get_mailer
+from pyramid_mailer.message import Message
+
+
+subject = "[kinto-emailer] Test"
+body = "If you received this email, the server is well configured."
+
+
+def main(args=None):
+ if args is None:
+ args = sys.argv[1:]
+ try:
+ config_file, recipient = args
+ except ValueError:
+ print("Usage: %s CONFIG RECIPIENT" % sys.argv[0])
+ return 1
+
+ print("Load config...")
+ env = bootstrap(config_file)
+
+ print("Send email to %r" % recipient)
+ registry = env['registry']
+ mailer = get_mailer(registry)
+
+ message = Message(subject=subject,
+ recipients=[recipient],
+ body=body)
+ mailer.send_immediately(message, fail_silently=False)
+ print("Done.")
+ return 0
diff --git a/setup.py b/setup.py
index 933eb1f..981e05b 100644
--- a/setup.py
+++ b/setup.py
@@ -20,6 +20,12 @@ REQUIREMENTS = [
'pyramid_mailer',
]
+ENTRY_POINTS = {
+ 'console_scripts': [
+ 'kinto-send-email = kinto_emailer.command_send:main'
+ ],
+}
+
setup(name='kinto-emailer',
version='0.4.0.dev0',
description='Kinto emailer plugin',
@@ -38,4 +44,5 @@ setup(name='kinto-emailer',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
- install_requires=REQUIREMENTS)
+ install_requires=REQUIREMENTS,
+ entry_points=ENTRY_POINTS)
| Kinto/kinto-emailer | 25d43b88daf9ceb808cb8eafa2351f502802bb82 | diff --git a/kinto_emailer/tests/test_command_send.py b/kinto_emailer/tests/test_command_send.py
new file mode 100644
index 0000000..0bba54e
--- /dev/null
+++ b/kinto_emailer/tests/test_command_send.py
@@ -0,0 +1,21 @@
+import mock
+import unittest
+
+from kinto_emailer import command_send
+
+
+class CommandTest(unittest.TestCase):
+ def test_uses_sys_args_by_default(self):
+ assert command_send.main() > 0 # will fail
+
+ def test_returns_non_zero_if_not_enough_args(self):
+ assert command_send.main([]) > 0
+
+ def test_calls_send_immmediately_with_parameters(self):
+ with mock.patch('kinto_emailer.command_send.bootstrap'):
+ with mock.patch('kinto_emailer.command_send.get_mailer') as get_mailer:
+ command_send.main(["config.ini", "[email protected]"])
+
+ args, kwargs = get_mailer().send_immediately.call_args_list[0]
+ assert "kinto-emailer" in args[0].subject
+ assert not kwargs["fail_silently"]
| Add a «test send» command
```
$ kinto-emailer-test config/kinto.ini [email protected]
Email sent successfully.
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"kinto_emailer/tests/test_command_send.py::CommandTest::test_returns_non_zero_if_not_enough_args",
"kinto_emailer/tests/test_command_send.py::CommandTest::test_calls_send_immmediately_with_parameters",
"kinto_emailer/tests/test_command_send.py::CommandTest::test_uses_sys_args_by_default"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2017-04-12T11:08:15Z" | apache-2.0 |
|
Knio__pynmea2-124 | diff --git a/pynmea2/types/proprietary/ash.py b/pynmea2/types/proprietary/ash.py
index d62a10e..0715bef 100644
--- a/pynmea2/types/proprietary/ash.py
+++ b/pynmea2/types/proprietary/ash.py
@@ -34,7 +34,7 @@ class ASHRATT(ASH):
'''
@staticmethod
def match(data):
- return re.match(r'^\d{6}\.\d{3}$', data[1])
+ return re.match(r'^\d{6}\.\d{2,3}$', data[1])
def __init__(self, *args, **kwargs):
self.subtype = 'ATT'
| Knio/pynmea2 | 2dab8f59045365463a33013cd1f95140943193fd | diff --git a/test/test_ash.py b/test/test_ash.py
index b33261e..24a2bac 100644
--- a/test/test_ash.py
+++ b/test/test_ash.py
@@ -32,6 +32,13 @@ def test_ashratt():
assert msg.render() == data
+def test_ashratt_with_2_vs_3_decimal_timestamp():
+ msg_3 = pynmea2.parse('$PASHR,130533.620,0.311,T,-80.467,-1.395,,0.066,0.067,0.215,2,3*0B')
+ msg_2 = pynmea2.parse('$PASHR,130533.62,0.311,T,-80.467,-1.395,,0.066,0.067,0.215,2,3*3B')
+
+ assert msg_3.timestamp == msg_2.timestamp
+
+
def test_ash_undefined():
'''
Test that non-ATT messages still fall back to the generic ASH type
| Issues with parshing PASHR sentences
Hi,
Just ran into some troubles when trying to parse an NMEA stream containing PASHR sentences, like below:
```
$PASHR,122144.75,240.68,T,,,,,,,2*04
$PASHR,122144.96,240.30,T,,,,,,,2*04
$PASHR,122145.18,239.91,T,,,,,,,2*06
$PASHR,122145.61,239.04,T,,,,,,,2*04
$PASHR,122145.83,238.59,T,,,,,,,2*01
$PASHR,122146.05,238.13,T,,,,,,,2*02
```
Seems like the special logic for these messages (see [here](https://github.com/Knio/pynmea2/blob/master/pynmea2/types/proprietary/ash.py#L24-L26) and [here](https://github.com/Knio/pynmea2/blob/master/pynmea2/types/proprietary/ash.py#L35-L37)) does not fit the messages that I have.
The regex in the the last link is not catching the messages due to different number of decimal values in the timestamp. I.e. `122144.75` does not give a match with `'^\d{6}\.\d{3}$'`. Changing the regex to `'^\d{6}\.\d{2,3}$'` seems to do the trick. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_ash.py::test_ashratt_with_2_vs_3_decimal_timestamp"
] | [
"test/test_ash.py::test_ashrltn",
"test/test_ash.py::test_ashratt",
"test/test_ash.py::test_ash_undefined"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2021-03-31T05:56:45Z" | mit |
|
Knio__pynmea2-151 | diff --git a/examples/nmea2gpx.py b/examples/nmea2gpx.py
new file mode 100644
index 0000000..87154ee
--- /dev/null
+++ b/examples/nmea2gpx.py
@@ -0,0 +1,103 @@
+'''
+Convert a NMEA ascii log file into a GPX file
+'''
+
+import argparse
+import datetime
+import logging
+import pathlib
+import re
+import xml.dom.minidom
+
+log = logging.getLogger(__name__)
+
+try:
+ import pynmea2
+except ImportError:
+ import sys
+ import pathlib
+ p = pathlib.Path(__file__).parent.parent
+ sys.path.append(str(p))
+ log.info(sys.path)
+ import pynmea2
+
+
+def main():
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('nmea_file')
+
+ args = parser.parse_args()
+ nmea_file = pathlib.Path(args.nmea_file)
+
+ if m := re.match(r'^(\d{2})(\d{2})(\d{2})', nmea_file.name):
+ date = datetime.date(year=2000 + int(m.group(1)), month=int(m.group(2)), day=int(m.group(3)))
+ log.debug('date parsed from filename: %r', date)
+ else:
+ date = None
+
+ author = 'https://github.com/Knio/pynmea2'
+ doc = xml.dom.minidom.Document()
+ doc.appendChild(root := doc.createElement('gpx'))
+ root.setAttribute('xmlns', "http://www.topografix.com/GPX/1/1")
+ root.setAttribute('version', "1.1")
+ root.setAttribute('creator', author)
+ root.setAttribute('xmlns', "http://www.topografix.com/GPX/1/1")
+ root.setAttribute('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance")
+ root.setAttribute('xsi:schemaLocation', "http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd")
+
+ root.appendChild(meta := doc.createElement('metadata'))
+ root.appendChild(trk := doc.createElement('trk'))
+ meta.appendChild(meta_name := doc.createElement('name'))
+ meta.appendChild(meta_author := doc.createElement('author'))
+ trk.appendChild(trk_name := doc.createElement('name'))
+ trk.appendChild(trkseg := doc.createElement('trkseg'))
+ meta_name.appendChild(doc.createTextNode(nmea_file.name))
+ trk_name. appendChild(doc.createTextNode(nmea_file.name))
+ meta_author.appendChild(author_link := doc.createElement('link'))
+ author_link.setAttribute('href', author)
+ author_link.appendChild(author_text := doc.createElement('text'))
+ author_link.appendChild(author_type := doc.createElement('type'))
+ author_text.appendChild(doc.createTextNode('Pynmea2'))
+ author_type.appendChild(doc.createTextNode('text/html'))
+
+ for line in open(args.nmea_file):
+ try:
+ msg = pynmea2.parse(line)
+ except Exception as e:
+ log.warning('Couldn\'t parse line: %r', e)
+ continue
+
+ if not (hasattr(msg, 'latitude') and hasattr(msg, 'longitude')):
+ continue
+
+ # if not hasattr(msg, 'altitude'):
+ # continue
+
+ trkseg.appendChild(trkpt := doc.createElement('trkpt'))
+
+ trkpt.setAttribute('lat', f'{msg.latitude:.6f}')
+ trkpt.setAttribute('lon', f'{msg.longitude:.6f}')
+ if hasattr(msg, 'altitude'):
+ trkpt.appendChild(ele := doc.createElement('ele'))
+ ele.appendChild(doc.createTextNode(f'{msg.altitude:.3f}'))
+
+ # TODO try msg.datetime
+
+ if date:
+ trkpt.appendChild(time := doc.createElement('time'))
+ dt = datetime.datetime.combine(date, msg.timestamp)
+ dts = dt.isoformat(timespec='milliseconds').replace('+00:00', 'Z')
+ time.appendChild(doc.createTextNode(dts))
+
+ xml_data = doc.toprettyxml(
+ indent=' ',
+ newl='\n',
+ encoding='utf8',
+ ).decode('utf8')
+ print(xml_data)
+
+
+
+if __name__ == '__main__':
+ logging.basicConfig(level=logging.DEBUG)
+ main()
\ No newline at end of file
diff --git a/pynmea2/nmea_utils.py b/pynmea2/nmea_utils.py
index 8cb64e8..36f0f95 100644
--- a/pynmea2/nmea_utils.py
+++ b/pynmea2/nmea_utils.py
@@ -2,6 +2,17 @@
import datetime
import re
+
+# python 2.7 backport
+if not hasattr(datetime, 'timezone'):
+ class UTC(datetime.tzinfo):
+ def utcoffset(self, dt):
+ return datetime.timedelta(0)
+ class timezone(object):
+ utc = UTC()
+ datetime.timezone = timezone
+
+
def valid(s):
return s == 'A'
@@ -18,7 +29,8 @@ def timestamp(s):
hour=int(s[0:2]),
minute=int(s[2:4]),
second=int(s[4:6]),
- microsecond=ms)
+ microsecond=ms,
+ tzinfo=datetime.timezone.utc)
return t
diff --git a/pynmea2/types/talker.py b/pynmea2/types/talker.py
index d27ddfe..8c00c7a 100644
--- a/pynmea2/types/talker.py
+++ b/pynmea2/types/talker.py
@@ -507,7 +507,7 @@ class XTE(TalkerSentence):
)
-class ZDA(TalkerSentence):
+class ZDA(TalkerSentence, DatetimeFix):
fields = (
("Timestamp", "timestamp", timestamp), # hhmmss.ss = UTC
("Day", "day", int), # 01 to 31
@@ -526,9 +526,9 @@ class ZDA(TalkerSentence):
return TZInfo(self.local_zone, self.local_zone_minutes)
@property
- def datetime(self):
+ def localdatetime(self):
d = datetime.datetime.combine(self.datestamp, self.timestamp)
- return d.replace(tzinfo=self.tzinfo)
+ return d.astimezone(self.tzinfo)
| Knio/pynmea2 | 988c297ce82d976db9094b435a1aa290e7d5b9ed | diff --git a/test/test_ash.py b/test/test_ash.py
index 37ad969..b7a9425 100644
--- a/test/test_ash.py
+++ b/test/test_ash.py
@@ -19,7 +19,7 @@ def test_ashratt():
assert type(msg) == pynmea2.ash.ASHRATT
assert msg.data == ['R', '130533.620', '0.311', 'T', '-80.467', '-1.395', '0.25', '0.066', '0.067', '0.215', '2', '3']
assert msg.manufacturer == 'ASH'
- assert msg.timestamp == datetime.time(13, 5, 33, 620000)
+ assert msg.timestamp == datetime.time(13, 5, 33, 620000, tzinfo=datetime.timezone.utc)
assert msg.true_heading == 0.311
assert msg.is_true_heading == 'T'
assert msg.roll == -80.467
diff --git a/test/test_nor.py b/test/test_nor.py
index a95d7a0..2c020b5 100644
--- a/test/test_nor.py
+++ b/test/test_nor.py
@@ -11,7 +11,7 @@ def test_norbt0():
assert msg.sentence_type == 'NORBT0'
assert msg.beam == 1
assert msg.datestamp == datetime.date(2021, 7, 4)
- assert msg.timestamp == datetime.time(13, 13, 35, 334100)
+ assert msg.timestamp == datetime.time(13, 13, 35, 334100, tzinfo=datetime.timezone.utc)
assert msg.dt1 == 23.961
assert msg.dt2 == -48.122
assert msg.bv == -32.76800
@@ -164,7 +164,7 @@ def test_nors1():
assert msg.manufacturer == 'NOR'
assert msg.sentence_type == 'NORS1'
assert msg.datestamp == datetime.date(2009, 11, 16)
- assert msg.timestamp == datetime.time(13, 24, 55)
+ assert msg.timestamp == datetime.time(13, 24, 55, tzinfo=datetime.timezone.utc)
assert msg.ec == 0
assert msg.sc == '34000034'
assert msg.battery_voltage == 23.9
@@ -203,7 +203,7 @@ def test_norc1():
assert type(msg) == pynmea2.nor.NORC1
assert msg.manufacturer == 'NOR'
assert msg.sentence_type == 'NORC1'
- assert msg.datetime == datetime.datetime(2009, 11, 16, 13, 24, 55)
+ assert msg.datetime == datetime.datetime(2009, 11, 16, 13, 24, 55, tzinfo=datetime.timezone.utc)
assert msg.cn == 3
assert msg.cp == 11.0
assert msg.vx == 0.332
@@ -242,7 +242,7 @@ def test_norh4():
assert msg.manufacturer == 'NOR'
assert msg.sentence_type == 'NORH4'
assert msg.datestamp == datetime.date(2009, 11, 16)
- assert msg.timestamp == datetime.time(14, 34, 59)
+ assert msg.timestamp == datetime.time(14, 34, 59, tzinfo=datetime.timezone.utc)
assert msg.ec == 0
assert msg.sc == '204C0002'
assert msg.render() == data
diff --git a/test/test_proprietary.py b/test/test_proprietary.py
index 3e6a526..58995f8 100644
--- a/test/test_proprietary.py
+++ b/test/test_proprietary.py
@@ -138,7 +138,7 @@ def test_ubx00():
assert type(msg) == pynmea2.ubx.UBX00
assert msg.identifier() == 'PUBX'
assert msg.ubx_type == '00'
- assert msg.timestamp == datetime.time(7, 44, 40)
+ assert msg.timestamp == datetime.time(7, 44, 40, tzinfo=datetime.timezone.utc)
assert msg.latitude == 47.06236716666667
assert msg.lat_dir == 'N'
assert msg.render() == data
@@ -157,7 +157,7 @@ def test_ubx04():
msg = pynmea2.parse(data)
assert type(msg) == pynmea2.ubx.UBX04
assert msg.date == datetime.date(2014, 10, 13)
- assert msg.time == datetime.time(7, 38, 24)
+ assert msg.time == datetime.time(7, 38, 24, tzinfo=datetime.timezone.utc)
assert msg.clk_bias == 495176
assert msg.render() == data
@@ -239,7 +239,7 @@ def test_KWDWPL():
data = "$PKWDWPL,053125,V,4531.7900,N,12253.4800,W,,,200320,,AC7FD-1,/-*10"
msg = pynmea2.parse(data)
assert msg.manufacturer == "KWD"
- assert msg.timestamp == datetime.time(5, 31, 25)
+ assert msg.timestamp == datetime.time(5, 31, 25, tzinfo=datetime.timezone.utc)
assert msg.status == 'V'
assert msg.is_valid == False
assert msg.lat == '4531.7900'
@@ -249,7 +249,7 @@ def test_KWDWPL():
assert msg.sog == None
assert msg.cog == None
assert msg.datestamp == datetime.date(2020, 3, 20)
- assert msg.datetime == datetime.datetime(2020, 3, 20, 5, 31, 25)
+ assert msg.datetime == datetime.datetime(2020, 3, 20, 5, 31, 25, tzinfo=datetime.timezone.utc)
assert msg.altitude == None
assert msg.wname == 'AC7FD-1'
assert msg.ts == '/-'
diff --git a/test/test_types.py b/test/test_types.py
index 565664d..1164d38 100644
--- a/test/test_types.py
+++ b/test/test_types.py
@@ -13,7 +13,7 @@ def test_GGA():
assert isinstance(msg, pynmea2.GGA)
# Timestamp
- assert msg.timestamp == datetime.time(18, 43, 53, 70000)
+ assert msg.timestamp == datetime.time(18, 43, 53, 70000, tzinfo=datetime.timezone.utc)
# Latitude
assert msg.lat == '1929.045'
# Latitude Direction
@@ -99,7 +99,7 @@ def test_GST():
data = "$GPGST,172814.0,0.006,0.023,0.020,273.6,0.023,0.020,0.031*6A"
msg = pynmea2.parse(data)
assert isinstance(msg, pynmea2.GST)
- assert msg.timestamp == datetime.time(hour=17, minute=28, second=14)
+ assert msg.timestamp == datetime.time(hour=17, minute=28, second=14, tzinfo=datetime.timezone.utc)
assert msg.rms == 0.006
assert msg.std_dev_major == 0.023
assert msg.std_dev_minor == 0.020
@@ -114,11 +114,11 @@ def test_RMC():
data = '''$GPRMC,225446,A,4916.45,N,12311.12,W,000.5,054.7,191194,020.3,E*68'''
msg = pynmea2.parse(data)
assert isinstance(msg, pynmea2.RMC)
- assert msg.timestamp == datetime.time(hour=22, minute=54, second=46)
+ assert msg.timestamp == datetime.time(hour=22, minute=54, second=46, tzinfo=datetime.timezone.utc)
assert msg.datestamp == datetime.date(1994, 11, 19)
assert msg.latitude == 49.274166666666666
assert msg.longitude == -123.18533333333333
- assert msg.datetime == datetime.datetime(1994, 11, 19, 22, 54, 46)
+ assert msg.datetime == datetime.datetime(1994, 11, 19, 22, 54, 46, tzinfo=datetime.timezone.utc)
assert msg.is_valid == True
assert msg.render() == data
@@ -129,7 +129,7 @@ def test_RMC_valid():
only test validation against supplied values.
Supplied means that a `,` exists it does NOT mean that a value had to be
- supplied in the space provided. See
+ supplied in the space provided. See
https://orolia.com/manuals/VSP/Content/NC_and_SS/Com/Topics/APPENDIX/NMEA_RMCmess.htm
@@ -140,7 +140,7 @@ def test_RMC_valid():
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,*33',
'$GPRMC,123519.00,V,4807.038,N,01131.000,E,,,230394,,*24',
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,*72',
-
+
# RMC Timing Messages
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,S*4C',
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,N*51',
@@ -151,7 +151,7 @@ def test_RMC_valid():
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,,S*0D',
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,,N*10',
'$GPRMC,123519.00,,4807.038,N,01131.000,E,,,230394,,,*5E',
-
+
# RMC Nav Messags
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,S,S*33',
'$GPRMC,123519.00,A,4807.038,N,01131.000,E,,,230394,,,S,V*36',
@@ -204,14 +204,16 @@ def test_ZDA():
data = '''$GPZDA,010203.05,06,07,2008,-08,30'''
msg = pynmea2.parse(data)
assert isinstance(msg, pynmea2.ZDA)
- assert msg.timestamp == datetime.time(hour=1, minute=2, second=3, microsecond=50000)
+ assert msg.timestamp == datetime.time(hour=1, minute=2, second=3, microsecond=50000, tzinfo=datetime.timezone.utc)
assert msg.day == 6
assert msg.month == 7
assert msg.year == 2008
+ assert msg.tzinfo.utcoffset(0) == datetime.timedelta(hours=-8, minutes=30)
assert msg.local_zone == -8
assert msg.local_zone_minutes == 30
assert msg.datestamp == datetime.date(2008, 7, 6)
- assert msg.datetime == datetime.datetime(2008, 7, 6, 1, 2, 3, 50000, msg.tzinfo)
+ assert msg.datetime == datetime.datetime(2008, 7, 6, 1, 2, 3, 50000, tzinfo=datetime.timezone.utc)
+ assert msg.localdatetime == datetime.datetime(2008, 7, 5, 17, 32, 3, 50000, tzinfo=msg.tzinfo)
def test_VPW():
data = "$XXVPW,1.2,N,3.4,M"
| RMC message is parsed without a timezone
Per the spec: https://www.trimble.com/OEM_ReceiverHelp/V4.44/en/NMEA-0183messages_RMC.html the date time in the RMC message is UTC, however, pynmea2 parses it and creates a date time with no timezone, thus calling timestamp() on the returned date returns the wrong timestamp.
To reproduce:
```
msg = '$GPRMC,184446.000,A,3720.18653,N,12153.38874,W,0.0,0.0,130220,,,A*7E'
parsed = pynmea2.parse(msg)
assert parsed.datetime.timestamp() == 1581619486.0
```
The above assertion fails unless your computer is set to UTC timezone.
The workaround to the bug for anyone else bumping against this is to `replace` the datetime with one that has the proper timezone:
```
msg = '$GPRMC,184446.000,A,3720.18653,N,12153.38874,W,0.0,0.0,130220,,,A*7E'
parsed = pynmea2.parse(msg)
assert parsed.datetime.replace(tzinfo=timezone.utc).timestamp() == 1581619486.0
```
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_proprietary.py::test_KWDWPL",
"test/test_proprietary.py::test_ubx04",
"test/test_proprietary.py::test_ubx00",
"test/test_types.py::test_GST",
"test/test_types.py::test_RMC",
"test/test_types.py::test_ZDA",
"test/test_types.py::test_GGA",
"test/test_nor.py::test_norh4",
"test/test_nor.py::test_nors1",
"test/test_nor.py::test_norbt0",
"test/test_nor.py::test_norc1",
"test/test_ash.py::test_ashratt"
] | [
"test/test_proprietary.py::test_srf",
"test/test_proprietary.py::test_extra_comma",
"test/test_proprietary.py::test_ubx03",
"test/test_proprietary.py::test_grm",
"test/test_proprietary.py::test_proprietary_2",
"test/test_proprietary.py::test_tnl",
"test/test_proprietary.py::test_proprietary_GRMW",
"test/test_proprietary.py::test_unknown_sentence",
"test/test_proprietary.py::test_proprietary_VTX_0012",
"test/test_proprietary.py::test_proprietary_MGNWPL",
"test/test_proprietary.py::test_proprietary_type",
"test/test_proprietary.py::test_proprietary_1",
"test/test_proprietary.py::test_create",
"test/test_proprietary.py::test_proprietary_VTX_0002",
"test/test_proprietary.py::test_proprietary_3",
"test/test_proprietary.py::test_proprietary_with_comma",
"test/test_types.py::test_XDR",
"test/test_types.py::test_RMC_valid",
"test/test_types.py::test_RTE",
"test/test_types.py::test_STALK_unidentified_command",
"test/test_types.py::test_R00",
"test/test_types.py::test_TXT",
"test/test_types.py::test_STALK",
"test/test_types.py::test_VPW",
"test/test_types.py::test_GLL",
"test/test_types.py::test_BOD",
"test/test_types.py::test_GRS",
"test/test_types.py::test_MWV",
"test/test_types.py::test_VBW",
"test/test_types.py::test_GSA",
"test/test_nor.py::test_norbt9",
"test/test_nor.py::test_norc4",
"test/test_nor.py::test_nori1",
"test/test_nor.py::test_norwt7",
"test/test_nor.py::test_nors4",
"test/test_nor.py::test_nor_undefined",
"test/test_nor.py::test_norbt4",
"test/test_nor.py::test_norbt7",
"test/test_nor.py::test_norwt4",
"test/test_nor.py::test_norwt9",
"test/test_ash.py::test_ash_undefined",
"test/test_ash.py::test_ashrltn",
"test/test_ash.py::test_ashratt_with_2_vs_3_decimal_timestamp"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-10-23T03:19:41Z" | mit |
|
Krukov__cashews-29 | diff --git a/cashews/__init__.py b/cashews/__init__.py
index d55971e..9611fa0 100644
--- a/cashews/__init__.py
+++ b/cashews/__init__.py
@@ -7,7 +7,7 @@ from .decorators import ( # noqa
fast_condition,
)
from .formatter import default_formatter, get_template_and_func_for, get_template_for_key # noqa
-from .helpers import add_prefix # noqa
+from .helpers import add_prefix, all_keys_lower # noqa
from .validation import set_invalidate_further # noqa
from .wrapper import Cache # noqa
diff --git a/cashews/backends/redis/backend.py b/cashews/backends/redis/backend.py
index 5bdeffa..f33099c 100644
--- a/cashews/backends/redis/backend.py
+++ b/cashews/backends/redis/backend.py
@@ -198,8 +198,5 @@ class _Redis(Backend):
return await self._client.get(key)
def close(self):
- del self._client
self._client = None
self.__is_init = False
-
- __del__ = close
diff --git a/cashews/formatter.py b/cashews/formatter.py
index fef1406..9fd9bf9 100644
--- a/cashews/formatter.py
+++ b/cashews/formatter.py
@@ -18,7 +18,7 @@ class _ReplaceFormatter(Formatter):
return self.__default(field_name), None
def format_field(self, value, format_spec):
- return format(value)
+ return format(format_value(value))
class _FuncFormatter(_ReplaceFormatter):
@@ -38,9 +38,9 @@ class _FuncFormatter(_ReplaceFormatter):
def format_field(self, value, format_spec):
format_spec, args = self.parse_format_spec(format_spec)
- value = super().format_field(value, format_spec if format_spec not in self._functions else "")
if format_spec in self._functions:
- return str(self._functions[format_spec](value, *args))
+ value = str(self._functions[format_spec](value, *args))
+ value = super().format_field(value, format_spec if format_spec not in self._functions else "")
return value
@staticmethod
@@ -51,6 +51,14 @@ class _FuncFormatter(_ReplaceFormatter):
return format_spec, args.replace(")", "").split(",")
+def format_value(value):
+ if value is None:
+ return ""
+ elif isinstance(value, bool):
+ return str(value).lower()
+ return value
+
+
default_formatter = _FuncFormatter(lambda name: "")
default_formatter._register("len", len)
diff --git a/cashews/helpers.py b/cashews/helpers.py
index 0c15206..c61fd99 100644
--- a/cashews/helpers.py
+++ b/cashews/helpers.py
@@ -19,6 +19,23 @@ def add_prefix(prefix: str):
return _middleware
+def all_keys_lower():
+ async def _middleware(call, *args, backend=None, cmd=None, **kwargs):
+ if cmd.lower() == "get_many":
+ return await call(*[key.lower() for key in args])
+ call_values = get_call_values(call, args, kwargs)
+ as_key = "key"
+ if cmd == "delete_match":
+ as_key = "pattern"
+ key = call_values.get(as_key)
+ if key:
+ call_values[as_key] = key.lower()
+ return await call(**call_values)
+ return await call(*args, **kwargs)
+
+ return _middleware
+
+
def memory_limit(min=0, max=None):
async def _memory_middleware(call, *args, backend=None, cmd=None, **kwargs):
if cmd != "set":
diff --git a/cashews/key.py b/cashews/key.py
index 93a65a9..b8652cc 100644
--- a/cashews/key.py
+++ b/cashews/key.py
@@ -71,9 +71,8 @@ def _get_cache_key(
"""
kwargs = kwargs or {}
key_values = get_call_values(func, args, kwargs)
- key_values = {k: v if v is not None else "" for k, v in key_values.items()}
_key_template = template or get_cache_key_template(func)
- return template_to_pattern(_key_template, _formatter=default_formatter, **key_values).lower()
+ return template_to_pattern(_key_template, _formatter=default_formatter, **key_values)
def get_func_params(func):
@@ -100,7 +99,7 @@ def get_cache_key_template(func: Callable, key: Optional[str] = None, prefix: st
if func_params and func_params[0] == "self":
name = [func.__module__, func.__qualname__]
params = {param_name: "{" + param_name + "}" for param_name in func_params}
- key = ":".join([*name, *chain(*params.items())]).lower()
+ key = ":".join([*name, *chain(*params.items())])
else:
_check_key_params(key, func_params)
if prefix:
diff --git a/cashews/serialize.py b/cashews/serialize.py
index aea1ecb..3affbb0 100644
--- a/cashews/serialize.py
+++ b/cashews/serialize.py
@@ -10,10 +10,6 @@ class UnSecureDataError(Exception):
pass
-class none:
- pass
-
-
class PickleSerializerMixin:
_digestmods = {
b"sha1": hashlib.sha1,
@@ -47,7 +43,9 @@ class PickleSerializerMixin:
def _process_value(self, value: Union[bytes, None, int, str], key, default=None):
if value is None:
return default
- if isinstance(value, int) or value.isdigit():
+ if isinstance(value, int):
+ return value
+ if value.isdigit():
return int(value)
try:
sign, value = value.split(b"_", 1)
@@ -60,8 +58,6 @@ class PickleSerializerMixin:
value = pickle.loads(value, fix_imports=False, encoding="bytes")
if self._check_repr:
repr(value)
- if value is none:
- return None
return value
async def get_many(self, *keys):
@@ -86,8 +82,6 @@ class PickleSerializerMixin:
return sign, digestmod
async def set(self, key: str, value, *args, **kwargs):
- if value is None:
- value = none
if isinstance(value, int) and not isinstance(value, bool):
return await super().set(key, value, *args, **kwargs)
value = pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=False)
diff --git a/cashews/validation.py b/cashews/validation.py
index ddb63d6..0d8bf99 100644
--- a/cashews/validation.py
+++ b/cashews/validation.py
@@ -11,7 +11,7 @@ from .key import get_call_values, get_func_params
async def invalidate_func(backend: Backend, func, kwargs: Optional[Dict] = None):
values = {**{param: "*" for param in get_func_params(func)}, **kwargs}
for template in get_templates_for_func(func):
- del_template = template_to_pattern(template, **values).lower()
+ del_template = template_to_pattern(template, **values)
await backend.delete_match(del_template)
| Krukov/cashews | f277756aeb43861304175965cc57cd6c8d224aaf | diff --git a/tests/test_key.py b/tests/test_key.py
index 6e8b99f..7b942cc 100644
--- a/tests/test_key.py
+++ b/tests/test_key.py
@@ -61,10 +61,10 @@ def test_cache_func_key_dict():
("args", "kwargs", "template", "key"),
(
(
- ("a1", "a2", "a3"),
- {"kwarg1": "k1", "kwarg3": "k3"},
+ ("A1", "a2", "a3"),
+ {"kwarg1": "k1", "kwarg3": True},
"{arg1}:{kwarg1}-{kwarg3}",
- "a1:k1-k3",
+ "A1:k1-true",
),
(
("a1", "a2", "a3"),
@@ -92,9 +92,9 @@ def test_cache_func_key_dict():
),
(
("a1",),
- {"kwarg1": "k1", "arg2": 2},
+ {"kwarg1": "K1", "arg2": 2},
"{arg2}:{kwarg1}:{kwarg3}",
- "2:k1:",
+ "2:K1:",
),
(("a1", "a2"), {"kwarg1": "test"}, "{kwarg1:len}", "4"),
(
diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py
index 6de9881..15f03d8 100644
--- a/tests/test_wrapper.py
+++ b/tests/test_wrapper.py
@@ -6,7 +6,7 @@ import pytest
from cashews.backends.memory import Memory
from cashews.disable_control import ControlMixin
from cashews.formatter import get_templates_for_func
-from cashews.helpers import add_prefix
+from cashews.helpers import add_prefix, all_keys_lower
from cashews.wrapper import Cache, _create_auto_init
pytestmark = pytest.mark.asyncio
@@ -167,6 +167,22 @@ async def test_auto_init(cache):
target.init.assert_called_once()
+async def test_all_keys_lower(cache: Cache, target):
+ cache._backends[""] = cache._backends[""][0], (all_keys_lower(),)
+ await cache.get(key="KEY")
+ target.get.assert_called_once_with(key="key", default=None)
+
+ await cache.set(key="KEY", value="value")
+ target.set.assert_called_once_with(
+ key="key",
+ value="value",
+ exist=None,
+ expire=None,
+ )
+ await cache.ping()
+ target.ping.assert_called_once_with(message=b"PING")
+
+
async def test_add_prefix(cache: Cache, target):
cache._backends[""] = cache._backends[""][0], (add_prefix("prefix!"),)
| Case sensitive keys (do not convert silently all string arguments to lowercase by default)
I was surprised when I occasionally found that all strings are converted to lowercase when building a key.
```python
import asyncio
import logging
from datetime import timedelta
from cashews import cache
logger = logging.getLogger(__name__)
logging.basicConfig(level="DEBUG")
async def logging_middleware(call, *args, backend=None, cmd=None, **kwargs):
key = args[0] if args else kwargs.get("key", kwargs.get("pattern", ""))
logger.info(f"{args}; {kwargs}")
# logger.info("=> Cache request: %s ", cmd, extra={"command": cmd, "cache_key": key})
return await call(*args, **kwargs)
cache.setup("mem://", middlewares=(logging_middleware, ))
@cache(ttl=timedelta(minutes=1))
async def get_name(user, version="v1"):
return user
async def main():
await get_name("Bob")
result_1 = await get_name("Bob")
result_2 = await get_name("bob")
print(result_1)
print(result_2)
value = await cache.get("__main__:get_name:user:bob:version:v1")
print(f"value: {value}")
value = await cache.get("__main__:get_name:user:Bob:version:v1")
print(f"value: {value}")
asyncio.run(main())
```
# Question
1. Is it a bug or a future?
2. Is there an easy way to disable strings lowercase when building a key? So if I pass `my_string=FooBar`, it will be saved as `FooBar`, not as `foobar`. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_wrapper.py::test_disable_cache_on_fail_return_2",
"tests/test_wrapper.py::test_disable_cmd",
"tests/test_wrapper.py::test_init",
"tests/test_wrapper.py::test_multilayer_cache",
"tests/test_wrapper.py::test_all_keys_lower",
"tests/test_wrapper.py::test_auto_init",
"tests/test_wrapper.py::test_init_disable",
"tests/test_wrapper.py::test_add_prefix_get_many",
"tests/test_wrapper.py::test_disable_decorators_get",
"tests/test_wrapper.py::test_disable_decorators_set",
"tests/test_wrapper.py::test_cache_decor_register",
"tests/test_wrapper.py::test_disable_ctz",
"tests/test_wrapper.py::test_smoke_cmds",
"tests/test_wrapper.py::test_disable_decorators",
"tests/test_wrapper.py::test_prefix",
"tests/test_wrapper.py::test_add_prefix_delete_match",
"tests/test_wrapper.py::test_disable_cache_on_fail_return",
"tests/test_wrapper.py::test_add_prefix",
"tests/test_key.py::test_get_key_template[func2-None-tests.test_key:func2:a:{a}:k:{k}]",
"tests/test_key.py::test_cache_key_args_kwargs[args10-kwargs10-{kwarg1:jwt(user)}-test]",
"tests/test_key.py::test_ttl_to_seconds[10m1s-601]",
"tests/test_key.py::test_get_key_template[func3-key:{k:len}:{k:hash(md5)}-key:{k:len}:{k:hash(md5)}]",
"tests/test_key.py::test_cache_key_args_kwargs[args9-kwargs9-{kwarg1:hash(md5)}-098f6bcd4621d373cade4e832627b4f6]",
"tests/test_key.py::test_detect_template_by_key[prefix:func1:test-func1:*]",
"tests/test_key.py::test_ttl_to_seconds[<lambda>-3600_0]",
"tests/test_key.py::test_detect_template_by_key[func2:user:1-None0]",
"tests/test_key.py::test_detect_template_by_key[func1:-func1:*]",
"tests/test_key.py::test_cache_key_args_kwargs[args0-kwargs0-{arg1}:{kwarg1}-{kwarg3}-A1:k1-true]",
"tests/test_key.py::test_get_key_template[func1-None-tests.test_key:func1:a:{a}]",
"tests/test_key.py::test_cache_key_args_kwargs[args4-kwargs4-{arg2}:{kwarg1}:{kwarg3}-2:k1:k3]",
"tests/test_key.py::test_detect_template_by_key[func3:2-func3:*]",
"tests/test_key.py::test_ttl_to_seconds[80-80]",
"tests/test_key.py::test_ttl_to_seconds[1-1]",
"tests/test_key.py::test_detect_template_by_key[func2:-:user:1-func2:*:user:*]",
"tests/test_key.py::test_ttl_to_seconds[10-10]",
"tests/test_key.py::test_cache_key_args_kwargs[args7-kwargs7-{user.name:len}-4]",
"tests/test_key.py::test_cache_key_args_kwargs[args6-kwargs6-{kwarg1:len}-4]",
"tests/test_key.py::test_detect_template_by_key[prefix:func2:test:user:1:1-None]",
"tests/test_key.py::test_cache_key_args_kwargs[args5-kwargs5-{arg2}:{kwarg1}:{kwarg3}-2:K1:]",
"tests/test_key.py::test_ttl_to_seconds[1m10s-70]",
"tests/test_key.py::test_get_key_template[func3-None-tests.test_key:func3:a:{a}:k:{k}]",
"tests/test_key.py::test_cache_func_key_dict",
"tests/test_key.py::test_get_key_template_error",
"tests/test_key.py::test_ttl_to_seconds[ttl0-10]",
"tests/test_key.py::test_detect_template_by_key[func:1-None]",
"tests/test_key.py::test_ttl_to_seconds[100.1-100.1]",
"tests/test_key.py::test_ttl_to_seconds[10s-10]",
"tests/test_key.py::test_cache_key_args_kwargs[args8-kwargs8-{kwarg1:hash}-098f6bcd4621d373cade4e832627b4f6]",
"tests/test_key.py::test_ttl_to_seconds[<lambda>-3600_1]",
"tests/test_key.py::test_detect_template_by_key[func1:test-func1:*]",
"tests/test_key.py::test_cache_key_args_kwargs[args3-kwargs3-{arg2}-{kwarg1}-{kwarg3}-2-k1-]",
"tests/test_key.py::test_cache_key_args_kwargs[args2-None-{arg1}-{kwarg1}-{kwarg3}-a1--]",
"tests/test_key.py::test_detect_template_by_key[func2:user:1-None1]",
"tests/test_key.py::test_cache_key_args_kwargs[args1-kwargs1-None-tests.test_key:func:arg1:a1:arg2:a2:kwarg1:k1:kwarg2:true]",
"tests/test_key.py::test_get_key_template[func2-key:{k}-key:{k}]"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-02-05T21:55:43Z" | mit |
|
KyleJamesWalker__yamlsettings-36 | diff --git a/setup.py b/setup.py
index d5957bc..51cbf92 100644
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@ requirements.update(all=sorted(set().union(*requirements.values())))
setup(
name='yamlsettings',
- version='2.0.3',
+ version='2.1.0',
description='Yaml Settings Configuration Module',
long_description=readme,
author='Kyle James Walker',
diff --git a/yamlsettings/helpers.py b/yamlsettings/helpers.py
index 50193b2..151b843 100644
--- a/yamlsettings/helpers.py
+++ b/yamlsettings/helpers.py
@@ -49,7 +49,7 @@ def update_from_env(yaml_dict, prefix=None):
env_path = "{0}{1}{2}".format(
prefix.upper(),
'_' if prefix else '',
- '_'.join([str(key).upper() for key in path])
+ '_'.join([str(key).replace('-', '_').upper() for key in path])
)
env_val = os.environ.get(env_path, None)
if env_val is not None:
| KyleJamesWalker/yamlsettings | 9a2c86590f867345b37e2dd91d8ecd2ef645c7d2 | diff --git a/tests/test_yamldict.py b/tests/test_yamldict.py
index 66b44c0..747dfb9 100644
--- a/tests/test_yamldict.py
+++ b/tests/test_yamldict.py
@@ -12,7 +12,7 @@ import unittest
from mock import mock_open
from yamlsettings import (load, load_all, save_all,
- update_from_env, update_from_file)
+ update_from_env, update_from_file, yamldict)
from . import builtin_module, path_override, open_override, isfile_override
@@ -228,6 +228,14 @@ class YamlDictTestCase(unittest.TestCase):
test_defaults.update({'a': (4,)})
self.assertEqual(test_defaults.a, (4,))
+ @mock.patch.dict('os.environ', {'FOO_BAR': 'new-baz'})
+ def test_dash_vars_with_env(self):
+ """Test items with dashes can be overritten with env"""
+ test_settings = yamldict.YAMLDict({'foo-bar': 'baz'})
+ assert test_settings['foo-bar'] == 'baz'
+ update_from_env(test_settings)
+ assert test_settings['foo-bar'] == 'new-baz'
+
if __name__ == '__main__':
unittest.main()
| Replace all non-variable characters to underscore in update_from_env
For example:
```python3
import os
import yamlsettings
from yamlsettings.yamldict import YAMLDict
os.environ['FOO_BAR'] = 'barbaz'
settings = YAMLDict({'foo-bar': None})
yamlsettings.update_from_env(settings)
assert settings['foo-bar'] == 'barbaz'
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_yamldict.py::YamlDictTestCase::test_dash_vars_with_env"
] | [
"tests/test_yamldict.py::YamlDictTestCase::test_load_single_file",
"tests/test_yamldict.py::YamlDictTestCase::test_stupid_override",
"tests/test_yamldict.py::YamlDictTestCase::test_update_from_file",
"tests/test_yamldict.py::YamlDictTestCase::test_save_all",
"tests/test_yamldict.py::YamlDictTestCase::test_clone_changes_isolated",
"tests/test_yamldict.py::YamlDictTestCase::test_yaml_dict_merge",
"tests/test_yamldict.py::YamlDictTestCase::test_load_first_found",
"tests/test_yamldict.py::YamlDictTestCase::test_file_writing",
"tests/test_yamldict.py::YamlDictTestCase::test_load_all",
"tests/test_yamldict.py::YamlDictTestCase::test_variable_override",
"tests/test_yamldict.py::YamlDictTestCase::test_load_with_envs",
"tests/test_yamldict.py::YamlDictTestCase::test_list_replace_on_update",
"tests/test_yamldict.py::YamlDictTestCase::test_rebase",
"tests/test_yamldict.py::YamlDictTestCase::test_update",
"tests/test_yamldict.py::YamlDictTestCase::test_limit"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2020-06-22T16:52:11Z" | mit |
|
LKI__chinese-calendar-11 | diff --git a/chinese_calendar/constants.py b/chinese_calendar/constants.py
index 85b9ee5..54bc6fc 100644
--- a/chinese_calendar/constants.py
+++ b/chinese_calendar/constants.py
@@ -23,6 +23,9 @@ class Holiday(Enum):
national_day = 'National Day', '国庆节', 3
mid_autumn_festival = 'Mid-autumn Festival', '中秋', 1
+ # special holidays
+ anti_fascist_70th_day = 'Anti-Fascist 70th Day', '中国人民抗日战争暨世界反法西斯战争胜利70周年纪念日', 1
+
holidays = {
datetime.date(year=2006, month=1, day=1): Holiday.new_years_day.value,
@@ -277,6 +280,8 @@ holidays = {
datetime.date(year=2015, month=5, day=1): Holiday.labour_day.value,
datetime.date(year=2015, month=6, day=20): Holiday.dragon_boat_festival.value,
datetime.date(year=2015, month=6, day=22): Holiday.dragon_boat_festival.value,
+ datetime.date(year=2015, month=9, day=3): Holiday.anti_fascist_70th_day.value,
+ datetime.date(year=2015, month=9, day=4): Holiday.anti_fascist_70th_day.value,
datetime.date(year=2015, month=9, day=27): Holiday.mid_autumn_festival.value,
datetime.date(year=2015, month=10, day=1): Holiday.national_day.value,
datetime.date(year=2015, month=10, day=2): Holiday.national_day.value,
@@ -423,6 +428,7 @@ workdays = {
datetime.date(year=2015, month=1, day=4): Holiday.new_years_day.value,
datetime.date(year=2015, month=2, day=15): Holiday.spring_festival.value,
datetime.date(year=2015, month=2, day=28): Holiday.spring_festival.value,
+ datetime.date(year=2015, month=9, day=6): Holiday.anti_fascist_70th_day.value,
datetime.date(year=2015, month=10, day=10): Holiday.national_day.value,
datetime.date(year=2016, month=2, day=6): Holiday.spring_festival.value,
datetime.date(year=2016, month=2, day=14): Holiday.spring_festival.value,
diff --git a/chinese_calendar/scripts/data.py b/chinese_calendar/scripts/data.py
index fc2eee2..1c55b9f 100644
--- a/chinese_calendar/scripts/data.py
+++ b/chinese_calendar/scripts/data.py
@@ -30,6 +30,9 @@ class Holiday(Enum):
national_day = 'National Day', '国庆节', 3
mid_autumn_festival = 'Mid-autumn Festival', '中秋', 1
+ # special holidays
+ anti_fascist_70th_day = 'Anti-Fascist 70th Day', '中国人民抗日战争暨世界反法西斯战争胜利70周年纪念日', 1
+
holidays = {}
@@ -120,6 +123,11 @@ class Arrangement(object):
五、端午节:6月20日放假,6月22日(星期一)补休。
六、中秋节:9月27日放假。
七、国庆节:10月1日至7日放假调休,共7天。10月10日(星期六)上班。
+
+ 注意:参见《国务院关于中国人民抗日战争暨世界反法西斯战争胜利70周年纪念日调休放假的通知》
+ http://www.gov.cn/zhengce/content/2015-05/13/content_9742.htm
+ 额外的放假安排如下:
+ 9月3日至5日调休放假,共3天。其中9月3日(星期四)放假,9月4日(星期五)调休,9月6日(星期日)上班。
"""
self.year_at(2015) \
.nyd().rest(1, 1).to(1, 3).work(1, 4) \
@@ -128,7 +136,8 @@ class Arrangement(object):
.ld().rest(5, 1) \
.dbf().rest(6, 20).rest(6, 22) \
.maf().rest(9, 27) \
- .nd().rest(10, 1).to(10, 7).work(10, 10)
+ .nd().rest(10, 1).to(10, 7).work(10, 10) \
+ .afd().rest(9, 3).to(9, 4).work(9, 6)
def _2014(self):
""" http://www.gov.cn/zwgk/2013-12/11/content_2546204.htm
@@ -360,6 +369,9 @@ class Arrangement(object):
def maf(self):
return self.mark(chinese_calendar.Holiday.mid_autumn_festival)
+ def afd(self):
+ return self.mark(chinese_calendar.Holiday.anti_fascist_70th_day)
+
def mark(self, holiday):
self.holiday = holiday
return self
| LKI/chinese-calendar | d6d1034a0c5eb9a8652cbe1aaca8d0baf08b6092 | diff --git a/tests/test_holiday_amount.py b/tests/test_holiday_amount.py
index 56ea75c..f0ce0ff 100644
--- a/tests/test_holiday_amount.py
+++ b/tests/test_holiday_amount.py
@@ -21,6 +21,7 @@ class HolidayAmountTests(unittest.TestCase):
holiday_amounts[2011] += 1 # 11年要补班12年的元旦假期
holiday_amounts[2012] -= 1 # 12年可以享受11年补班的假
holiday_amounts[2014] += 1 # 14年的节假日安排莫名少了一天
+ holiday_amounts[2015] -= 1 # 15年是中国人民抗日战争暨世界反法西斯战争胜利70周年,多放1天
for year in range(2007, 2018 + 1): # 06年数据少,不测了
- self.assertEqual(11, holiday_amounts[year])
+ self.assertEqual(11, holiday_amounts[year], 'Holiday amount of year {}'.format(year))
self.assertEqual(1, 1)
| 2015年 差一个假期
2015年有一个新增的”抗战胜利70周年纪念日“。那天全国都放假了。
http://www.gov.cn/zhengce/content/2015-05/13/content_9742.htm
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_holiday_amount.py::HolidayAmountTests::test_holiday_amount"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2018-01-22T04:57:05Z" | mit |
|
LSSTDESC__ceci-84 | diff --git a/ceci/stage.py b/ceci/stage.py
index 7819ab5..39d8ddc 100644
--- a/ceci/stage.py
+++ b/ceci/stage.py
@@ -12,7 +12,7 @@ import datetime
from abc import abstractmethod
from . import errors
from .monitor import MemoryMonitor
-from .config import StageConfig, cast_to_streamable
+from .config import StageParameter, StageConfig, cast_to_streamable
SERIAL = "serial"
MPI_PARALLEL = "mpi"
@@ -488,8 +488,10 @@ I currently know about these stages:
parser = argparse.ArgumentParser(description=f"Run pipeline stage {cls.name}")
parser.add_argument("stage_name")
for conf, def_val in cls.config_options.items():
- opt_type = def_val if isinstance(def_val, type) else type(def_val)
-
+ if isinstance(def_val, StageParameter):
+ opt_type = def_val.dtype
+ else:
+ opt_type = def_val if isinstance(def_val, type) else type(def_val)
if opt_type == bool:
parser.add_argument(f"--{conf}", action="store_const", const=True)
parser.add_argument(
| LSSTDESC/ceci | dc92ef19bde422edd75339554606fd1a807afdc8 | diff --git a/tests/test_stage.py b/tests/test_stage.py
index 2201e84..c0fe1ec 100644
--- a/tests/test_stage.py
+++ b/tests/test_stage.py
@@ -128,13 +128,25 @@ def test_parameter():
name = "test_stage_param"
inputs = [("inp1", HDFFile)]
outputs = []
- config_options = dict(a=StageParameter(float, 5., msg="a float"))
+ config_options = dict(
+ a=StageParameter(float, 5., msg="a float"),
+ b=StageParameter(str, msg="a str"),
+ )
def run(self):
pass
- stage_1 = TestStage.make_stage(a=6., inp1='dummy')
+ stage_1 = TestStage.make_stage(
+ a=6., b='puffins are not extinct?', inp1='dummy',
+ )
assert stage_1.config.a == 6.
+ assert stage_1.config.b == 'puffins are not extinct?'
+
+ cmd = "TestStage", "--a", "6", "--b", "puffins are not extinct?", "--inp", "dummy"
+ stage_1_cmd = TestStage(TestStage.parse_command_line(cmd))
+ assert stage_1_cmd.config.a == 6.
+ assert stage_1_cmd.config.b == 'puffins are not extinct?'
+
# This one should not work
class TestStage_2(PipelineStage):
@@ -198,7 +210,7 @@ def test_parameter():
-
+
def test_incomplete():
class Alpha(PipelineStage):
| Setting a StageParameter
If I try to set the data_path option on the bpz_lite stage on the command line by adding `--data_path=/path/to/somewhere` then the parameter value ends up with a broken value, looking like a string representation of the class, like `'<class StageParameter ...>'` or something like that. I guess the wrong thing is being str'd somewhere. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_stage.py::test_parameter"
] | [
"tests/test_stage.py::test_construct",
"tests/test_stage.py::test_make_stage",
"tests/test_stage.py::test_incomplete",
"tests/test_stage.py::test_auto_name",
"tests/test_stage.py::test_duplicate",
"tests/test_stage.py::test_explicit_config",
"tests/test_stage.py::test_okay_abc_dupe_name",
"tests/test_stage.py::test_okay_abc_dupe_name2",
"tests/test_stage.py::test_config_specified",
"tests/test_stage.py::test_bool_flags",
"tests/test_stage.py::test_open_input",
"tests/test_stage.py::test_open_output",
"tests/test_stage.py::test_map",
"tests/test_stage.py::test_unknown_stage"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2022-10-05T17:57:12Z" | bsd-3-clause |
|
LeMyst__WikibaseIntegrator-233 | diff --git a/wikibaseintegrator/datatypes/basedatatype.py b/wikibaseintegrator/datatypes/basedatatype.py
index 6804b1e..4f0393c 100644
--- a/wikibaseintegrator/datatypes/basedatatype.py
+++ b/wikibaseintegrator/datatypes/basedatatype.py
@@ -1,5 +1,4 @@
from wikibaseintegrator.models import Claim, Reference, References, Snak, Snaks
-from wikibaseintegrator.wbi_enums import WikibaseSnakType
class BaseDataType(Claim):
@@ -18,24 +17,7 @@ class BaseDataType(Claim):
"""
Constructor, will be called by all data types.
- :param value: Data value of the Wikibase data snak
- :type value: str or int or tuple
:param prop_nr: The property number a Wikibase snak belongs to
- :type prop_nr: A string with a prefixed 'P' and several digits e.g. 'P715' (Drugbank ID) or an int
- :param datatype: The Wikibase data type declaration of this snak
- :type datatype: str
- :param snaktype: One of the values in the enum WikibaseSnakValueType denoting the state of the value:
- KNOWN_VALUE, NO_VALUE or UNKNOWN_VALUE
- :type snaktype: WikibaseSnakType
- :param references: A one level nested list with reference Wikibase snaks of base type BaseDataType,
- e.g. references=[[<BaseDataType>, <BaseDataType>], [<BaseDataType>]]
- This will create two references, the first one with two statements, the second with one
- :type references: A one level nested list with instances of BaseDataType or children of it.
- :param qualifiers: A list of qualifiers for the Wikibase mainsnak
- :type qualifiers: A list with instances of BaseDataType or children of it.
- :param rank: The rank of a Wikibase mainsnak, should determine the status of a value
- :type rank: A string of one of three allowed values: 'normal', 'deprecated', 'preferred'
- :return:
"""
super().__init__(**kwargs)
diff --git a/wikibaseintegrator/wbi_helpers.py b/wikibaseintegrator/wbi_helpers.py
index ac0c990..9b3e1a8 100644
--- a/wikibaseintegrator/wbi_helpers.py
+++ b/wikibaseintegrator/wbi_helpers.py
@@ -21,7 +21,7 @@ class BColors:
UNDERLINE = '\033[4m'
-def mediawiki_api_call(method, mediawiki_api_url=None, session=None, max_retries=1000, retry_after=60, **kwargs):
+def mediawiki_api_call(method, mediawiki_api_url=None, session=None, max_retries=100, retry_after=60, **kwargs):
"""
:param method: 'GET' or 'POST'
:param mediawiki_api_url:
@@ -53,8 +53,8 @@ def mediawiki_api_call(method, mediawiki_api_url=None, session=None, max_retries
print(f"Connection error: {e}. Sleeping for {retry_after} seconds.")
sleep(retry_after)
continue
- if response.status_code == 503: # pragma: no cover
- print(f"service unavailable. sleeping for {retry_after} seconds")
+ if response.status_code in (500, 502, 503, 504):
+ print(f"Service unavailable (HTTP Code {response.status_code}). Sleeping for {retry_after} seconds.")
sleep(retry_after)
continue
@@ -205,8 +205,8 @@ def execute_sparql_query(query, prefix=None, endpoint=None, user_agent=None, max
print(f"Connection error: {e}. Sleeping for {retry_after} seconds.")
sleep(retry_after)
continue
- if response.status_code == 503:
- print(f"Service unavailable (503). Sleeping for {retry_after} seconds")
+ if response.status_code in (500, 502, 503, 504):
+ print(f"Service unavailable (HTTP Code {response.status_code}). Sleeping for {retry_after} seconds.")
sleep(retry_after)
continue
if response.status_code == 429:
| LeMyst/WikibaseIntegrator | 6036f281d760c6452618040f02590ced927e55ad | diff --git a/test/test_wbi_helpers.py b/test/test_wbi_helpers.py
index d361164..6a80431 100644
--- a/test/test_wbi_helpers.py
+++ b/test/test_wbi_helpers.py
@@ -7,13 +7,27 @@ from wikibaseintegrator.wbi_helpers import mediawiki_api_call_helper, get_user_a
def test_connection():
+ data = {'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}
+
+ mediawiki_api_call_helper(data=data, max_retries=2, retry_after=1, allow_anonymous=True)
+
with unittest.TestCase().assertRaises(MWApiError):
- mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, mediawiki_api_url="https://www.wikidataaaaaaa.org", max_retries=3,
- retry_after=1, allow_anonymous=True)
- with unittest.TestCase().assertRaises(requests.HTTPError):
- mediawiki_api_call_helper(data=None, mediawiki_api_url="https://httpbin.org/status/400", max_retries=3, retry_after=1, allow_anonymous=True)
+ mediawiki_api_call_helper(data=data, mediawiki_api_url="https://www.wikidataaaaaaa.org", max_retries=2, retry_after=1, allow_anonymous=True)
- mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, max_retries=3, retry_after=1, allow_anonymous=True)
+ with unittest.TestCase().assertRaises(MWApiError):
+ mediawiki_api_call_helper(data=data, mediawiki_api_url="https://httpbin.org/status/500", max_retries=2, retry_after=1, allow_anonymous=True)
+
+ with unittest.TestCase().assertRaises(MWApiError):
+ mediawiki_api_call_helper(data=data, mediawiki_api_url="https://httpbin.org/status/502", max_retries=2, retry_after=1, allow_anonymous=True)
+
+ with unittest.TestCase().assertRaises(MWApiError):
+ mediawiki_api_call_helper(data=data, mediawiki_api_url="https://httpbin.org/status/503", max_retries=2, retry_after=1, allow_anonymous=True)
+
+ with unittest.TestCase().assertRaises(MWApiError):
+ mediawiki_api_call_helper(data=data, mediawiki_api_url="https://httpbin.org/status/504", max_retries=2, retry_after=1, allow_anonymous=True)
+
+ with unittest.TestCase().assertRaises(requests.HTTPError):
+ mediawiki_api_call_helper(data=data, mediawiki_api_url="https://httpbin.org/status/400", max_retries=2, retry_after=1, allow_anonymous=True)
def test_user_agent(capfd):
| Add retry when getting 502 HTTP errors
![bild](https://user-images.githubusercontent.com/68460690/134591492-3ad2deb2-eb62-45a7-ae3e-dd7c36ae4c98.png)
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_wbi_helpers.py::test_connection"
] | [
"test/test_wbi_helpers.py::test_user_agent",
"test/test_wbi_helpers.py::test_allow_anonymous",
"test/test_wbi_helpers.py::test_sparql"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2021-09-24T06:33:00Z" | mit |
|
LeMyst__WikibaseIntegrator-420 | diff --git a/notebooks/item_create_new.ipynb b/notebooks/item_create_new.ipynb
index d498f68..7ad47bd 100644
--- a/notebooks/item_create_new.ipynb
+++ b/notebooks/item_create_new.ipynb
@@ -35,12 +35,12 @@
},
"outputs": [],
"source": [
- "from wikibaseintegrator.models import Qualifiers, References, Reference\n",
- "\n",
- "from wikibaseintegrator import WikibaseIntegrator\n",
- "from wikibaseintegrator import wbi_login\n",
"from wikibaseintegrator import datatypes\n",
- "from wikibaseintegrator.wbi_config import config"
+ "from wikibaseintegrator import wbi_login\n",
+ "from wikibaseintegrator import WikibaseIntegrator\n",
+ "from wikibaseintegrator.models import Qualifiers, Reference, References\n",
+ "from wikibaseintegrator.wbi_config import config\n",
+ "from wikibaseintegrator.wbi_enums import WikibaseRank, WikibaseSnakType"
]
},
{
@@ -162,7 +162,7 @@
"outputs": [
{
"data": {
- "text/plain": "<LanguageValue @ed5b70 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>"
+ "text/plain": "<LanguageValue @8e3ca0 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>"
},
"execution_count": 6,
"metadata": {},
@@ -198,7 +198,7 @@
"outputs": [
{
"data": {
- "text/plain": "<Aliases @ed4880 _Aliases__aliases={'en': [<Alias @ed56c0 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @ed5ae0 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>"
+ "text/plain": "<Aliases @c234c0 _Aliases__aliases={'en': [<Alias @c5ce50 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @c5cca0 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>"
},
"execution_count": 7,
"metadata": {},
@@ -234,7 +234,7 @@
"outputs": [
{
"data": {
- "text/plain": "<LanguageValue @ed5750 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>"
+ "text/plain": "<LanguageValue @c5d240 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>"
},
"execution_count": 8,
"metadata": {},
@@ -270,7 +270,7 @@
"outputs": [
{
"data": {
- "text/plain": "<Claims @ed4460 _Claims__claims={'P31533': [<String @ed5ab0 _Claim__mainsnak=<Snak @ed6920 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash=None _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @ed7370 _Qualifiers__qualifiers={'P828': [<Snak @ed6980 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=[] _Claim__id=None _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @ed5ff0 _References__references=[<Reference @ed6260 _Reference__hash=None _Reference__snaks=<Snaks @ed68c0 snaks={'P828': [<Snak @ed6aa0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>, <Reference @ed69e0 _Reference__hash=None _Reference__snaks=<Snaks @ed6950 snaks={'P828': [<Snak @ed7c40 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>]>>]}>"
+ "text/plain": "<Claims @c233a0 _Claims__claims={'P31533': [<String @c5e0b0 _Claim__mainsnak=<Snak @c5f700 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash=None _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5f820 _Qualifiers__qualifiers={'P828': [<Snak @c5dc00 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=[] _Claim__id=None _Claim__rank=<WikibaseRank.PREFERRED: 'preferred'> _Claim__removed=False _Claim__references=<References @c5e140 _References__references=[<Reference @c5fa00 _Reference__hash=None _Reference__snaks=<Snaks @c5da50 snaks={'P828': [<Snak @c5dd20 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>, <Reference @c5dc60 _Reference__hash=None _Reference__snaks=<Snaks @c5dbd0 snaks={'P828': [<Snak @c5f5b0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>, <Reference @c5e0e0 _Reference__hash=None _Reference__snaks=<Snaks @c5dba0 snaks={'P828': [<Snak @c5f730 _Snak__snaktype=<WikibaseSnakType.NO_VALUE: 'novalue'> _Snak__property_number='P828' _Snak__hash=None _Snak__datavalue={} _Snak__datatype='string'>]}> _Reference__snaks_order=[]>]>>], 'P3133': [<String @c5f580 _Claim__mainsnak=<Snak @c5f6d0 _Snak__snaktype=<WikibaseSnakType.UNKNOWN_VALUE: 'somevalue'> _Snak__property_number='P3133' _Snak__hash=None _Snak__datavalue={} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5f610 _Qualifiers__qualifiers={}> _Claim__qualifiers_order=[] _Claim__id=None _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @c5f7c0 _References__references=[]>>]}>"
},
"execution_count": 9,
"metadata": {},
@@ -282,18 +282,34 @@
"new_qualifiers.add(datatypes.String(prop_nr='P828', value='Item qualifier'))\n",
"\n",
"new_references = References()\n",
+ "\n",
+ "# Create a first reference\n",
"new_reference1 = Reference()\n",
"new_reference1.add(datatypes.String(prop_nr='P828', value='Item string reference'))\n",
"\n",
+ "# Create another reference\n",
"new_reference2 = Reference()\n",
"new_reference2.add(datatypes.String(prop_nr='P828', value='Another item string reference'))\n",
"\n",
+ "# Create a reference with \"no value\"\n",
+ "new_reference3 = Reference()\n",
+ "new_reference3.add(datatypes.String(prop_nr='P828', snaktype=WikibaseSnakType.NO_VALUE))\n",
+ "\n",
+ "# Add all the references to the References object\n",
"new_references.add(new_reference1)\n",
"new_references.add(new_reference2)\n",
+ "new_references.add(new_reference3)\n",
+ "\n",
+ "# Create the claim with the qualifiers and refererences. Set rank as 'preferred'.\n",
+ "new_claim = datatypes.String(prop_nr='P31533', value='A String property', qualifiers=new_qualifiers,\n",
+ " references=new_references, rank=WikibaseRank.PREFERRED)\n",
+ "\n",
+ "new_item.claims.add(new_claim)\n",
"\n",
- "new_claim = datatypes.String(prop_nr='P31533', value='A String property', qualifiers=new_qualifiers, references=new_references)\n",
+ "# Create a claim with an unknown value\n",
+ "unknown_claim = datatypes.String(prop_nr='P3133', snaktype=WikibaseSnakType.UNKNOWN_VALUE)\n",
"\n",
- "new_item.claims.add(new_claim)"
+ "new_item.claims.add(unknown_claim)"
]
},
{
@@ -320,7 +336,7 @@
"outputs": [
{
"data": {
- "text/plain": "<ItemEntity @ed4640 _BaseEntity__api=<wikibaseintegrator.wikibaseintegrator.WikibaseIntegrator object at 0x000001D8C4ED42E0>\n\t _BaseEntity__title=None\n\t _BaseEntity__pageid=None\n\t _BaseEntity__lastrevid=579081\n\t _BaseEntity__type='item'\n\t _BaseEntity__id='Q225256'\n\t _BaseEntity__claims=<Claims @ed79a0 _Claims__claims={'P31533': [<String @ed7550 _Claim__mainsnak=<Snak @ed7160 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash='112d32b098a091cc1398c779e76c763a523d4ffc' _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @ed6fb0 _Qualifiers__qualifiers={'P828': [<Snak @ed71c0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='8d721edd0365e35ed006822601a4837b35e68fd6' _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=['P828'] _Claim__id='Q225256$A1CB5069-5FF4-4EE4-BE99-D1607BFFB705' _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @ed7010 _References__references=[<Reference @ed75b0 _Reference__hash='9820f3e32182f8b5575be8b9cf55b9c7e5fbf269' _Reference__snaks=<Snaks @ed6f20 snaks={'P828': [<Snak @ed7220 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='811577f0f42a7059f39bd6b169366bb1fb2f9af3' _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>, <Reference @ed76a0 _Reference__hash='0d2ff45b3eace5dd184ad5f4ac0d1c6eff35e4ac' _Reference__snaks=<Snaks @ed7490 snaks={'P828': [<Snak @ed7580 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='774c2b3d70f072fb26d05a95d24445fbc8b2534e' _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>]>>]}>\n\t _ItemEntity__labels=<Labels @ed74f0 _LanguageValues__values={'en': <LanguageValue @ed7130 _LanguageValue__language='en' _LanguageValue__value='New item' _LanguageValue__removed=False>, 'fr': <LanguageValue @ed7190 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>}>\n\t _ItemEntity__descriptions=<Descriptions @ed4280 _LanguageValues__values={'en': <LanguageValue @ed54e0 _LanguageValue__language='en' _LanguageValue__value='A freshly created element' _LanguageValue__removed=False>, 'fr': <LanguageValue @ed7040 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>}>\n\t _ItemEntity__aliases=<Aliases @ed4760 _Aliases__aliases={'en': [<Alias @ed6e60 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @ed76d0 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>\n\t _ItemEntity__sitelinks=<Sitelinks @ed6350 sitelinks={}>>"
+ "text/plain": "<ItemEntity @c23520 _BaseEntity__api=<wikibaseintegrator.wikibaseintegrator.WikibaseIntegrator object at 0x0000024546C23400>\n\t _BaseEntity__title=None\n\t _BaseEntity__pageid=None\n\t _BaseEntity__lastrevid=598021\n\t _BaseEntity__type='item'\n\t _BaseEntity__id='Q226304'\n\t _BaseEntity__claims=<Claims @c5f0d0 _Claims__claims={'P31533': [<String @c5fee0 _Claim__mainsnak=<Snak @c5fe20 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P31533' _Snak__hash='112d32b098a091cc1398c779e76c763a523d4ffc' _Snak__datavalue={'value': 'A String property', 'type': 'string'} _Snak__datatype='string'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5ef80 _Qualifiers__qualifiers={'P828': [<Snak @c5fca0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='8d721edd0365e35ed006822601a4837b35e68fd6' _Snak__datavalue={'value': 'Item qualifier', 'type': 'string'} _Snak__datatype='string'>]}> _Claim__qualifiers_order=['P828'] _Claim__id='Q226304$C318B066-FD5E-4766-BD03-5F881145511A' _Claim__rank=<WikibaseRank.PREFERRED: 'preferred'> _Claim__removed=False _Claim__references=<References @c5efb0 _References__references=[<Reference @c5d900 _Reference__hash='9820f3e32182f8b5575be8b9cf55b9c7e5fbf269' _Reference__snaks=<Snaks @c5fd30 snaks={'P828': [<Snak @c5d720 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='811577f0f42a7059f39bd6b169366bb1fb2f9af3' _Snak__datavalue={'value': 'Item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>, <Reference @c5f340 _Reference__hash='0d2ff45b3eace5dd184ad5f4ac0d1c6eff35e4ac' _Reference__snaks=<Snaks @c5f280 snaks={'P828': [<Snak @c5d3f0 _Snak__snaktype=<WikibaseSnakType.KNOWN_VALUE: 'value'> _Snak__property_number='P828' _Snak__hash='774c2b3d70f072fb26d05a95d24445fbc8b2534e' _Snak__datavalue={'value': 'Another item string reference', 'type': 'string'} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>, <Reference @c5e290 _Reference__hash='4968e32f26488317c52a8883b49cb160b39e3428' _Reference__snaks=<Snaks @c5ffd0 snaks={'P828': [<Snak @c5c7c0 _Snak__snaktype=<WikibaseSnakType.NO_VALUE: 'novalue'> _Snak__property_number='P828' _Snak__hash='6e63dffef5a685b86c63dafda7a4748cbe8b029e' _Snak__datavalue={} _Snak__datatype='string'>]}> _Reference__snaks_order=['P828']>]>>], 'P3133': [<MonolingualText @c5f520 _Claim__mainsnak=<Snak @c5d060 _Snak__snaktype=<WikibaseSnakType.UNKNOWN_VALUE: 'somevalue'> _Snak__property_number='P3133' _Snak__hash='4b66bd689df0c4cd59c2df014b4e6a97ee99240d' _Snak__datavalue={} _Snak__datatype='monolingualtext'> _Claim__type='statement' _Claim__qualifiers=<Qualifiers @c5ef50 _Qualifiers__qualifiers={}> _Claim__qualifiers_order=[] _Claim__id='Q226304$7B072F85-CDB5-4F8D-9F34-ABDE829581FC' _Claim__rank=<WikibaseRank.NORMAL: 'normal'> _Claim__removed=False _Claim__references=<References @c5feb0 _References__references=[]>>]}>\n\t _ItemEntity__labels=<Labels @c5ff10 _LanguageValues__values={'en': <LanguageValue @c5fdf0 _LanguageValue__language='en' _LanguageValue__value='New item' _LanguageValue__removed=False>, 'fr': <LanguageValue @c5c5b0 _LanguageValue__language='fr' _LanguageValue__value='Nouvel élément' _LanguageValue__removed=False>}>\n\t _ItemEntity__descriptions=<Descriptions @c234f0 _LanguageValues__values={'en': <LanguageValue @c5cfd0 _LanguageValue__language='en' _LanguageValue__value='A freshly created element' _LanguageValue__removed=False>, 'fr': <LanguageValue @c5eda0 _LanguageValue__language='fr' _LanguageValue__value='Un élément fraichement créé' _LanguageValue__removed=False>}>\n\t _ItemEntity__aliases=<Aliases @c23550 _Aliases__aliases={'en': [<Alias @c5ef20 _LanguageValue__language='en' _LanguageValue__value='Item' _LanguageValue__removed=False>], 'fr': [<Alias @c5ed10 _LanguageValue__language='fr' _LanguageValue__value='Élément' _LanguageValue__removed=False>]}>\n\t _ItemEntity__sitelinks=<Sitelinks @c5d2a0 sitelinks={}>>"
},
"execution_count": 10,
"metadata": {},
diff --git a/wikibaseintegrator/models/claims.py b/wikibaseintegrator/models/claims.py
index 641ae1e..8a64564 100644
--- a/wikibaseintegrator/models/claims.py
+++ b/wikibaseintegrator/models/claims.py
@@ -8,7 +8,7 @@ from wikibaseintegrator.models.basemodel import BaseModel
from wikibaseintegrator.models.qualifiers import Qualifiers
from wikibaseintegrator.models.references import Reference, References
from wikibaseintegrator.models.snaks import Snak, Snaks
-from wikibaseintegrator.wbi_enums import ActionIfExists, WikibaseRank
+from wikibaseintegrator.wbi_enums import ActionIfExists, WikibaseRank, WikibaseSnakType
class Claims(BaseModel):
@@ -131,14 +131,15 @@ class Claims(BaseModel):
class Claim(BaseModel):
DTYPE = 'claim'
- def __init__(self, qualifiers: Optional[Qualifiers] = None, rank: Optional[WikibaseRank] = None, references: Optional[Union[References, List[Union[Claim, List[Claim]]]]] = None) -> None:
+ def __init__(self, qualifiers: Optional[Qualifiers] = None, rank: Optional[WikibaseRank] = None, references: Optional[Union[References, List[Union[Claim, List[Claim]]]]] = None, snaktype: WikibaseSnakType = WikibaseSnakType.KNOWN_VALUE) -> None:
"""
:param qualifiers:
:param rank:
:param references: A References object, a list of Claim object or a list of list of Claim object
+ :param snaktype:
"""
- self.mainsnak = Snak(datatype=self.DTYPE)
+ self.mainsnak = Snak(datatype=self.DTYPE, snaktype=snaktype)
self.type = 'statement'
self.qualifiers = qualifiers or Qualifiers()
self.qualifiers_order = []
diff --git a/wikibaseintegrator/models/snaks.py b/wikibaseintegrator/models/snaks.py
index 3d5f207..0388b7a 100644
--- a/wikibaseintegrator/models/snaks.py
+++ b/wikibaseintegrator/models/snaks.py
@@ -100,7 +100,7 @@ class Snak(BaseModel):
@datavalue.setter
def datavalue(self, value):
- if value is not None:
+ if value is not None and value != {}:
self.snaktype = WikibaseSnakType.KNOWN_VALUE
self.__datavalue = value
diff --git a/wikibaseintegrator/wbi_helpers.py b/wikibaseintegrator/wbi_helpers.py
index 107e274..2c539b9 100644
--- a/wikibaseintegrator/wbi_helpers.py
+++ b/wikibaseintegrator/wbi_helpers.py
@@ -472,6 +472,28 @@ def search_entities(search_string: str, language: Optional[str] = None, strict_l
return results
+def fulltext_search(search: str, max_results: int = 50, allow_anonymous: bool = True, **kwargs: Any) -> List[Dict[str, Any]]:
+ """
+ Perform a fulltext search on the mediawiki instance.
+ It's an exception to the "only wikibase related function" rule! WikibaseIntegrator is focused on wikibase-only functions to avoid spreading out and covering all functions of MediaWiki.
+
+ :param search: Search for page titles or content matching this value. You can use the search string to invoke special search features, depending on what the wiki's search backend implements.
+ :param max_results: How many total pages to return. The value must be between 1 and 500.
+ :param allow_anonymous: Allow anonymous interaction with the MediaWiki API. 'True' by default.
+ :param kwargs: Extra parameters for mediawiki_api_call_helper()
+ :return:
+ """
+ params = {
+ 'action': 'query',
+ 'list': 'search',
+ 'srsearch': search,
+ 'srlimit': max_results,
+ 'format': 'json'
+ }
+
+ return mediawiki_api_call_helper(data=params, allow_anonymous=allow_anonymous, **kwargs)['query']['search']
+
+
def generate_entity_instances(entities: Union[str, List[str]], allow_anonymous: bool = True, **kwargs: Any) -> List[Tuple[str, BaseEntity]]:
"""
A method which allows for retrieval of a list of Wikidata entities. The method generates a list of tuples where the first value in the tuple is the entity's ID, whereas the
| LeMyst/WikibaseIntegrator | 50dc26d6396c65162027cd6f82fe07e8437fca09 | diff --git a/test/test_wbi_core.py b/test/test_wbi_core.py
index 4bb3941..74d4e9a 100644
--- a/test/test_wbi_core.py
+++ b/test/test_wbi_core.py
@@ -210,6 +210,9 @@ class TestWbiCore(unittest.TestCase):
with self.assertRaises(ValueError):
t4.mainsnak.snaktype = 'invalid_value'
+ t5 = String(prop_nr='P1', snaktype=WikibaseSnakType.NO_VALUE)
+ assert t5.mainsnak.get_json()['snaktype'] == WikibaseSnakType.NO_VALUE.value
+
def test_new_item_creation(self):
data = [
String(value='test1', prop_nr='P1'),
| Feature request: Add helper method for snaktype
As a user I want to indicate directly when instantiating a claim that it has no-value or unknown-value. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_wbi_core.py::TestWbiCore::test_snaktype"
] | [
"test/test_wbi_core.py::TestWbiCore::test_basedatatype_action_if_exists",
"test/test_wbi_core.py::TestWbiCore::test_count_references",
"test/test_wbi_core.py::TestWbiCore::test_description",
"test/test_wbi_core.py::TestWbiCore::test_entity_generator",
"test/test_wbi_core.py::TestWbiCore::test_get",
"test/test_wbi_core.py::TestWbiCore::test_get_property_list",
"test/test_wbi_core.py::TestWbiCore::test_get_qualifier_properties",
"test/test_wbi_core.py::TestWbiCore::test_item_engine",
"test/test_wbi_core.py::TestWbiCore::test_label",
"test/test_wbi_core.py::TestWbiCore::test_new_extra_item_creation",
"test/test_wbi_core.py::TestWbiCore::test_new_item_creation",
"test/test_wbi_core.py::TestWbiCore::test_rank",
"test/test_wbi_core.py::TestWbiCore::test_wd_search"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2022-09-23T18:12:57Z" | mit |
|
Libensemble__libensemble-540 | diff --git a/.travis.yml b/.travis.yml
index 18f0bd5a..1680bf51 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -102,7 +102,7 @@ install:
- conda install $COMPILERS
- conda install libblas libopenblas # Prevent 'File exists' error
- - if [[ "$TRAVIS_PYTHON_VERSION" == "3.8" ]]; then
+ - if [[ "$TRAVIS_PYTHON_VERSION" == "3.8" ]] || [[ "$PY" == "3.8" ]]; then
conda install nlopt mpi4py scipy mpich;
export PETSC_CONFIGURE_OPTIONS='--with-batch';
conda install petsc4py;
diff --git a/libensemble/gen_funcs/aposmm_localopt_support.py b/libensemble/gen_funcs/aposmm_localopt_support.py
index d277550d..20734a7e 100644
--- a/libensemble/gen_funcs/aposmm_localopt_support.py
+++ b/libensemble/gen_funcs/aposmm_localopt_support.py
@@ -6,6 +6,7 @@ __all__ = ['LocalOptInterfacer', 'run_local_nlopt', 'run_local_tao',
'run_local_dfols', 'run_local_scipy_opt', 'run_external_localopt']
import psutil
+from libensemble.tools.tools import osx_set_mp_method
import numpy as np
from libensemble.message_numbers import STOP_TAG, EVAL_GEN_TAG # Only used to simulate receiving from manager
from multiprocessing import Event, Process, Queue
@@ -14,6 +15,9 @@ import libensemble.gen_funcs
optimizer_list = ['petsc', 'nlopt', 'dfols', 'scipy', 'external']
optimizers = libensemble.gen_funcs.rc.aposmm_optimizers
+# Resolves multiprocessing issues with Python 3.8+ on macOS
+osx_set_mp_method()
+
if optimizers is None:
from petsc4py import PETSc
import nlopt
diff --git a/libensemble/history.py b/libensemble/history.py
index 6486bff2..826193ee 100644
--- a/libensemble/history.py
+++ b/libensemble/history.py
@@ -2,7 +2,7 @@ import numpy as np
import time
import logging
-from libensemble.tools.fields_keys import libE_fields
+from libensemble.tools.fields_keys import libE_fields, protected_libE_fields
logger = logging.getLogger(__name__)
@@ -96,7 +96,7 @@ class History:
for j, ind in enumerate(new_inds):
for field in returned_H.dtype.names:
-
+ assert field not in protected_libE_fields, "The field '" + field + "' is protected"
if np.isscalar(returned_H[field][j]):
self.H[field][ind] = returned_H[field][j]
else:
@@ -175,6 +175,7 @@ class History:
update_inds = D['sim_id']
for field in D.dtype.names:
+ assert field not in protected_libE_fields, "The field '" + field + "' is protected"
self.H[field][update_inds] = D[field]
self.H['gen_time'][update_inds] = time.time()
diff --git a/libensemble/libE.py b/libensemble/libE.py
index eaa1053b..a4bf816f 100644
--- a/libensemble/libE.py
+++ b/libensemble/libE.py
@@ -16,7 +16,6 @@ is dumped to file, and MPI abort is called.
__all__ = ['libE']
import os
-import platform
import logging
import random
import socket
@@ -34,7 +33,7 @@ from libensemble.comms.comms import QCommProcess, Timeout
from libensemble.comms.logs import manager_logging_config
from libensemble.comms.tcp_mgr import ServerQCommManager, ClientQCommManager
from libensemble.executors.executor import Executor
-from libensemble.tools.tools import _USER_SIM_ID_WARNING
+from libensemble.tools.tools import _USER_SIM_ID_WARNING, osx_set_mp_method
from libensemble.tools.check_inputs import check_inputs
logger = logging.getLogger(__name__)
@@ -314,10 +313,8 @@ def libE_local(sim_specs, gen_specs, exit_criteria,
# switched to 'spawn' by default due to 'fork' potentially causing crashes.
# These crashes haven't yet been observed with libE, but with 'spawn' runs,
# warnings about leaked semaphore objects are displayed instead.
- # The next several statements enforce 'fork' on macOS (Python 3.8)
- if platform.system() == 'Darwin':
- from multiprocessing import set_start_method
- set_start_method('fork', force=True)
+ # This function enforces 'fork' on macOS (Python 3.8)
+ osx_set_mp_method()
# Launch worker team and set up logger
wcomms = start_proc_team(nworkers, sim_specs, gen_specs, libE_specs)
@@ -434,6 +431,8 @@ def libE_tcp_mgr(sim_specs, gen_specs, exit_criteria,
port = libE_specs.get('port', 0)
authkey = libE_specs.get('authkey', libE_tcp_authkey())
+ osx_set_mp_method()
+
with ServerQCommManager(port, authkey.encode('utf-8')) as manager:
# Get port if needed because of auto-assignment
diff --git a/libensemble/libE_manager.py b/libensemble/libE_manager.py
index 452c3779..e594dc75 100644
--- a/libensemble/libE_manager.py
+++ b/libensemble/libE_manager.py
@@ -21,9 +21,10 @@ from libensemble.message_numbers import \
from libensemble.comms.comms import CommFinishedException
from libensemble.libE_worker import WorkerErrMsg
from libensemble.tools.tools import _USER_CALC_DIR_WARNING
-from libensemble.tools.fields_keys import libE_spec_calc_dir_combined
+from libensemble.tools.fields_keys import libE_spec_calc_dir_combined, protected_libE_fields
import cProfile
import pstats
+import copy
if tuple(np.__version__.split('.')) >= ('1', '15'):
from numpy.lib.recfunctions import repack_fields
@@ -130,6 +131,7 @@ class Manager:
timer = Timer()
timer.start()
self.date_start = timer.date_start.replace(' ', '_')
+ self.safe_mode = libE_specs.get('safe_mode', True)
self.hist = hist
self.libE_specs = libE_specs
self.alloc_specs = alloc_specs
@@ -261,7 +263,7 @@ class Manager:
self.wcomms[w-1].send(Work['tag'], Work)
work_rows = Work['libE_info']['H_rows']
if len(work_rows):
- if 'repack_fields' in dir():
+ if 'repack_fields' in globals():
self.wcomms[w-1].send(0, repack_fields(self.hist.H[Work['H_fields']][work_rows]))
else:
self.wcomms[w-1].send(0, self.hist.H[Work['H_fields']][work_rows])
@@ -428,10 +430,19 @@ class Manager:
# --- Main loop
def _alloc_work(self, H, persis_info):
- "Calls work allocation function from alloc_specs"
+ """
+ Calls work allocation function from alloc_specs. Copies protected libE
+ fields before the alloc_f call and ensures they weren't modified
+ """
+ if self.safe_mode:
+ saveH = copy.deepcopy(H[protected_libE_fields])
+
alloc_f = self.alloc_specs['alloc_f']
output = alloc_f(self.W, H, self.sim_specs, self.gen_specs, self.alloc_specs, persis_info)
+ if self.safe_mode:
+ assert np.array_equal(saveH, H[protected_libE_fields]), "The allocation function modified protected fields"
+
if len(output) == 2:
output = output + ((0,))
diff --git a/libensemble/tools/fields_keys.py b/libensemble/tools/fields_keys.py
index 0c454970..bc0d3761 100644
--- a/libensemble/tools/fields_keys.py
+++ b/libensemble/tools/fields_keys.py
@@ -12,6 +12,13 @@ libE_fields = [('sim_id', int), # Unique id of entry in H that was genera
]
# end_libE_fields_rst_tag
+protected_libE_fields = ['gen_worker',
+ 'gen_time',
+ 'given',
+ 'returned',
+ 'given_time',
+ 'sim_worker']
+
allowed_sim_spec_keys = ['sim_f', #
'in', #
'out', #
diff --git a/libensemble/tools/tools.py b/libensemble/tools/tools.py
index 96cfa3bd..4cfd1fd0 100644
--- a/libensemble/tools/tools.py
+++ b/libensemble/tools/tools.py
@@ -6,6 +6,7 @@ and user functions.
import os
import sys
import logging
+import platform
import numpy as np
import pickle
@@ -148,3 +149,15 @@ def add_unique_random_streams(persis_info, nstreams):
def eprint(*args, **kwargs):
"""Prints a user message to standard error"""
print(*args, file=sys.stderr, **kwargs)
+
+
+# ===================== OSX set multiprocessing start =======================
+# On Python 3.8 on macOS, the default start method for new processes was
+# switched to 'spawn' by default due to 'fork' potentially causing crashes.
+# These crashes haven't yet been observed with libE, but with 'spawn' runs,
+# warnings about leaked semaphore objects are displayed instead.
+# The next several statements enforce 'fork' on macOS (Python 3.8)
+def osx_set_mp_method():
+ if platform.system() == 'Darwin':
+ from multiprocessing import set_start_method
+ set_start_method('fork', force=True)
| Libensemble/libensemble | 49b28360d595c6396ab21159f2f8cf10522c4c71 | diff --git a/libensemble/tests/unit_tests/test_comms.py b/libensemble/tests/unit_tests/test_comms.py
index 87baf3b3..6c40afa2 100644
--- a/libensemble/tests/unit_tests/test_comms.py
+++ b/libensemble/tests/unit_tests/test_comms.py
@@ -7,12 +7,16 @@ Unit test of comms for libensemble.
import time
import queue
import logging
+from libensemble.tools.tools import osx_set_mp_method
import numpy as np
import libensemble.comms.comms as comms
import libensemble.comms.logs as commlogs
+osx_set_mp_method()
+
+
def test_qcomm():
"Test queue-based bidirectional communicator."
diff --git a/libensemble/tests/unit_tests/test_history.py b/libensemble/tests/unit_tests/test_history.py
index ac55d778..b7602811 100644
--- a/libensemble/tests/unit_tests/test_history.py
+++ b/libensemble/tests/unit_tests/test_history.py
@@ -188,6 +188,15 @@ def test_update_history_x_in():
assert hist.index == 10
assert hist.sim_count == 0
+ # Force assertion error when a libE protected field appears in gen_worker
+ H_o = np.zeros(size, dtype=gen_specs['out'] + [('given', bool)])
+ try:
+ hist.update_history_x_in(gen_worker, H_o)
+ except AssertionError:
+ assert 1, "Failed like it should have"
+ else:
+ assert 0, "Didn't fail like it should have"
+
def test_update_history_x_in_sim_ids():
hist, _, gen_specs, _, _ = setup.hist_setup2A_genout_sim_ids(7)
diff --git a/libensemble/tests/unit_tests_nompi/test_aaa_comms.py b/libensemble/tests/unit_tests_nompi/test_aaa_comms.py
index fc356267..7ce35bb5 100644
--- a/libensemble/tests/unit_tests_nompi/test_aaa_comms.py
+++ b/libensemble/tests/unit_tests_nompi/test_aaa_comms.py
@@ -10,8 +10,11 @@ since pytest slurps up everything (including all the modules) in one go.
import time
import signal
+from libensemble.tools.tools import osx_set_mp_method
import libensemble.comms.comms as comms
+osx_set_mp_method()
+
def test_qcomm_proc_terminate1():
"Test that an already-done QCommProcess gracefully handles terminate()."
| Investigate dependency issues for Python 3.8 on macOS Travis jobs
See: https://travis-ci.org/github/Libensemble/libensemble/builds/731087713 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"libensemble/tests/unit_tests/test_comms.py::test_qcomm",
"libensemble/tests/unit_tests/test_comms.py::test_missing_handler",
"libensemble/tests/unit_tests/test_comms.py::test_gen_comm_handler",
"libensemble/tests/unit_tests/test_comms.py::test_sim_comm_handler",
"libensemble/tests/unit_tests/test_comms.py::test_comm_eval",
"libensemble/tests/unit_tests/test_comms.py::test_qcomm_threadproc",
"libensemble/tests/unit_tests/test_comms.py::test_comm_logging",
"libensemble/tests/unit_tests/test_history.py::test_hist_init_1A_H0",
"libensemble/tests/unit_tests/test_history.py::test_update_history_x_in",
"libensemble/tests/unit_tests/test_history.py::test_update_history_x_in_sim_ids",
"libensemble/tests/unit_tests/test_history.py::test_update_history_f",
"libensemble/tests/unit_tests/test_history.py::test_update_history_f_vec",
"libensemble/tests/unit_tests_nompi/test_aaa_comms.py::test_qcomm_proc_terminate1",
"libensemble/tests/unit_tests_nompi/test_aaa_comms.py::test_qcomm_proc_terminate2",
"libensemble/tests/unit_tests_nompi/test_aaa_comms.py::test_qcomm_proc_terminate3",
"libensemble/tests/unit_tests_nompi/test_aaa_comms.py::test_qcomm_proc_terminate4"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-29T19:29:59Z" | bsd-3-clause |
|
Lucas-C__pre-commit-hooks-10 | diff --git a/pre_commit_hooks/insert_license.py b/pre_commit_hooks/insert_license.py
index ecd7dfa..8e77863 100755
--- a/pre_commit_hooks/insert_license.py
+++ b/pre_commit_hooks/insert_license.py
@@ -22,10 +22,12 @@ def main(argv=None):
prefixed_license = ['{}{}{}'.format(comment_prefix, ' ' if line.strip() else '', line)
for line in license_file.readlines()]
eol = '\r\n' if prefixed_license[0][-2:] == '\r\n' else '\n'
+ if not prefixed_license[-1].endswith(eol):
+ prefixed_license[-1] += eol
if comment_start:
prefixed_license = [comment_start + eol] + prefixed_license
if comment_end:
- prefixed_license = prefixed_license + [eol + comment_end]
+ prefixed_license = prefixed_license + [comment_end + eol]
changes_made = False
for src_filepath in args.filenames:
@@ -42,7 +44,7 @@ def main(argv=None):
src_file.write(''.join(src_file_content))
changes_made = True
elif not args.remove_header:
- src_file_content = prefixed_license + [eol + eol] + src_file_content
+ src_file_content = prefixed_license + [eol] + src_file_content
with open(src_filepath, 'w') as src_file:
src_file.write(''.join(src_file_content))
changes_made = True
| Lucas-C/pre-commit-hooks | 01b0f0e1ebb161dc0b653a11d8dab96f7b379531 | diff --git a/tests/insert_license_test.py b/tests/insert_license_test.py
index 06522c7..6743d97 100755
--- a/tests/insert_license_test.py
+++ b/tests/insert_license_test.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
from contextlib import contextmanager
+from itertools import product
import os, pytest, shutil
from pre_commit_hooks.insert_license import main as insert_license
@@ -8,20 +9,26 @@ from pre_commit_hooks.insert_license import find_license_header_index
@pytest.mark.parametrize(
- ('src_file_path', 'comment_prefix', 'new_src_file_expected'),
- (
- ('module_without_license.py', '#', 'module_with_license.py'),
- ('module_with_license.py', '#', False),
- ('module_with_license_and_shebang.py', '#', False),
- ('module_without_license.groovy', '//', 'module_with_license.groovy'),
- ('module_with_license.groovy', '//', False),
- ),
+ ('license_file_path', 'src_file_path', 'comment_prefix', 'new_src_file_expected'),
+ map(lambda a: a[:1] + a[1], product( # combine license files with other args
+ ('LICENSE_with_trailing_newline.txt', 'LICENSE_without_trailing_newline.txt'),
+ (
+ ('module_without_license.py', '#', 'module_with_license.py'),
+ ('module_with_license.py', '#', False),
+ ('module_with_license_and_shebang.py', '#', False),
+ ('module_without_license.groovy', '//', 'module_with_license.groovy'),
+ ('module_with_license.groovy', '//', False),
+ ('module_without_license.css', '/*| *| */', 'module_with_license.css'),
+ ('module_with_license.css', '/*| *| */', False),
+ ),
+ )),
)
-def test_insert_license(src_file_path, comment_prefix, new_src_file_expected, tmpdir):
+def test_insert_license(license_file_path, src_file_path, comment_prefix, new_src_file_expected, tmpdir):
with chdir_to_test_resources():
path = tmpdir.join('src_file_path')
shutil.copy(src_file_path, path.strpath)
- assert insert_license(['--comment-style', comment_prefix, path.strpath]) == (1 if new_src_file_expected else 0)
+ args = ['--license-filepath', license_file_path, '--comment-style', comment_prefix, path.strpath]
+ assert insert_license(args) == (1 if new_src_file_expected else 0)
if new_src_file_expected:
with open(new_src_file_expected) as expected_content_file:
expected_content = expected_content_file.read()
@@ -42,18 +49,21 @@ def test_is_license_present(src_file_content, expected_index):
@pytest.mark.parametrize(
- ('src_file_path', 'is_python', 'new_src_file_expected'),
- (
- ('module_with_license.css', False, 'module_without_license.css'),
- ('module_without_license.css', False, False),
- ('module_with_license_and_shebang.py', True, 'module_without_license_and_shebang.py'),
- ),
+ ('license_file_path', 'src_file_path', 'is_python', 'new_src_file_expected'),
+ map(lambda a: a[:1] + a[1], product( # combine license files with other args
+ ('LICENSE_with_trailing_newline.txt', 'LICENSE_without_trailing_newline.txt'),
+ (
+ ('module_with_license.css', False, 'module_without_license.css'),
+ ('module_without_license.css', False, False),
+ ('module_with_license_and_shebang.py', True, 'module_without_license_and_shebang.py'),
+ ),
+ )),
)
-def test_remove_license(src_file_path, is_python, new_src_file_expected, tmpdir):
+def test_remove_license(license_file_path, src_file_path, is_python, new_src_file_expected, tmpdir):
with chdir_to_test_resources():
path = tmpdir.join('src_file_path')
shutil.copy(src_file_path, path.strpath)
- argv = ['--remove-header', path.strpath]
+ argv = ['--license-filepath', license_file_path, '--remove-header', path.strpath]
if is_python:
argv = ['--comment-style', '#'] + argv
else:
diff --git a/tests/resources/LICENSE_with_trailing_newline.txt b/tests/resources/LICENSE_with_trailing_newline.txt
new file mode 100755
index 0000000..cbbe3c0
--- /dev/null
+++ b/tests/resources/LICENSE_with_trailing_newline.txt
@@ -0,0 +1,3 @@
+Copyright (C) 2017 Teela O'Malley
+
+Licensed under the Apache License, Version 2.0 (the "License");
| Trailing newline handling in insert-license
Currently there is a rather annoying issue in how a LICENCE.txt file with a trailing newline is handled.
Our `LICENSE.txt` contains, in abstract, with newlines made explicit:
```
line1\n
line2\n
line3\n
```
so the last line ends in a newline. Nothing unusual here.
But this causes the hook to want to insert license headers **twice** when using a three-element comment style header, like `/*| *| */`.
That's because the above `LICENSE.txt` content is then transformed to the `prefixed_license` list:
```python
['/*\n',
' * line1\n',
' * line2\n',
' * line3\n',
'\n */']
```
Note the `\n` character on the last line. The first time you add a licence header things go sort-of fine, and the files end up with
```
/*\n
* line1\n
* line2\n
* line3\n
\n
*/\n
File contents start here
```
Note the extra, empty newline in the header. That by itself is a little irksome, we didn't need an extra newline there.
But next time such a file is modified at the commit hook runs again, you now can't detect that the header is *already present*, because now the `find_license_header_index()` function fails to account for the extra newline. There isn't an extra element in the `prefixed_license` for that line, it's a prefixed newline in the wrong place, the start of an entry.
That's because the source lines `\n` (the empty line inserted by the script), and `\n #}` (the line in the `prefixed_license` list) can never line up. The newline exists in `prefixed_license`, but not on its own.
The work-around is to commit a LICENSE.txt file with no trailing newline.
A proper fix would add the newline to the last line of the license lines only if there isn't one there already, rather than prefix `comment_end`. `comment_end` itself also needs a newline character though:
```python
if not prefixed_license[-1].endswith(eol):
prefixed_license[-1] += eol # make sure the license text ends in a newline
if comment_start:
prefixed_license = [comment_start + eol] + prefixed_license
if comment_end:
prefixed_license = prefixed_license + [comment_end + eol]
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/insert_license_test.py::test_insert_license[LICENSE_with_trailing_newline.txt-module_without_license.py-#-module_with_license.py]",
"tests/insert_license_test.py::test_insert_license[LICENSE_with_trailing_newline.txt-module_without_license.groovy-//-module_with_license.groovy]",
"tests/insert_license_test.py::test_insert_license[LICENSE_with_trailing_newline.txt-module_without_license.css-/*|"
] | [
"tests/insert_license_test.py::test_insert_license[LICENSE_with_trailing_newline.txt-module_with_license.py-#-False]",
"tests/insert_license_test.py::test_insert_license[LICENSE_with_trailing_newline.txt-module_with_license_and_shebang.py-#-False]",
"tests/insert_license_test.py::test_insert_license[LICENSE_with_trailing_newline.txt-module_with_license.groovy-//-False]",
"tests/insert_license_test.py::test_insert_license[LICENSE_with_trailing_newline.txt-module_with_license.css-/*|",
"tests/insert_license_test.py::test_is_license_present[src_file_content0-None]",
"tests/insert_license_test.py::test_is_license_present[src_file_content1-0]",
"tests/insert_license_test.py::test_is_license_present[src_file_content2-1]",
"tests/insert_license_test.py::test_remove_license[LICENSE_with_trailing_newline.txt-module_with_license.css-False-module_without_license.css]",
"tests/insert_license_test.py::test_remove_license[LICENSE_with_trailing_newline.txt-module_without_license.css-False-False]",
"tests/insert_license_test.py::test_remove_license[LICENSE_with_trailing_newline.txt-module_with_license_and_shebang.py-True-module_without_license_and_shebang.py]"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2018-08-14T17:26:04Z" | mit |
|
LukeCarrier__mkdocs-drawio-exporter-15 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2b50461..43a45bf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,7 @@
## 0.6.0: ease containerisation
* New `drawio_args` option allows passing additional args to the Draw.io CLI
+* Improve handling of cases where Draw.io reports a successful export but doesn't write an output file
## 0.5.0: support MkDocs 1.1
diff --git a/mkdocsdrawioexporter/exporter.py b/mkdocsdrawioexporter/exporter.py
index 465e621..74352bd 100644
--- a/mkdocsdrawioexporter/exporter.py
+++ b/mkdocsdrawioexporter/exporter.py
@@ -199,13 +199,15 @@ class DrawIoExporter:
:param list(str) drawio_args: Additional arguments to append to the Draw.io export command.
:param str cache_dir: Export cache directory.
:param str format: Desired export format.
- :return str: Cached export filename.
+ :return tuple(str, int): Cached export filename.
"""
+ cache_filename = self.make_cache_filename(source_rel, page_index, cache_dir)
+ exit_status = None
+
if not drawio_executable:
self.log.warn('Skipping build of "{}" as Draw.io executable not available'.format(source))
- return
+ return (cache_filename, exit_status)
- cache_filename = self.make_cache_filename(source_rel, page_index, cache_dir)
if self.use_cached_file(source, cache_filename):
self.log.debug('Source file appears unchanged; using cached copy from "{}"'.format(cache_filename))
else:
@@ -213,11 +215,8 @@ class DrawIoExporter:
exit_status = self.export_file(
source, page_index, cache_filename,
drawio_executable, drawio_args, format)
- if exit_status != 0:
- self.log.error('Export failed with exit status {}'.format(exit_status))
- return
- return cache_filename
+ return (cache_filename, exit_status)
def make_cache_filename(self, source, page_index, cache_dir):
"""Make the cached filename.
diff --git a/mkdocsdrawioexporter/plugin.py b/mkdocsdrawioexporter/plugin.py
index ae02db1..6ebaae5 100644
--- a/mkdocsdrawioexporter/plugin.py
+++ b/mkdocsdrawioexporter/plugin.py
@@ -78,12 +78,16 @@ class DrawIoExporterPlugin(mkdocs.plugins.BasePlugin):
source.source_rel, source.page_index, self.config['format'])
abs_src_path = os.path.join(config['docs_dir'], source.source_rel)
abs_dest_path = os.path.join(config['site_dir'], dest_rel_path)
- cache_filename = self.exporter.ensure_file_cached(
+ cache_filename, exit_status = self.exporter.ensure_file_cached(
abs_src_path, source.source_rel, source.page_index,
self.config['drawio_executable'], self.config['drawio_args'],
self.config['cache_dir'], self.config['format'])
+ if exit_status != 0:
+ log.error('Export failed with exit status {}; skipping copy'.format(exit_status))
+ continue
+
try:
copy_file(cache_filename, abs_dest_path)
except FileNotFoundError:
- log.exception('Output file not created in cache')
+ log.warn('Export successful, but wrote no output file')
| LukeCarrier/mkdocs-drawio-exporter | 87ba1bc5281b3297fb93c59403eb17526bfe2f7d | diff --git a/mkdocsdrawioexporter/tests/exporter.py b/mkdocsdrawioexporter/tests/exporter.py
index 40de740..d8ccf66 100644
--- a/mkdocsdrawioexporter/tests/exporter.py
+++ b/mkdocsdrawioexporter/tests/exporter.py
@@ -109,9 +109,10 @@ class ExporterTests(unittest.TestCase):
self.exporter.export_file = MagicMock()
self.exporter.export_file.return_value = 0
- result = self.exporter.ensure_file_cached(
+ cache_filename, exit_status = self.exporter.ensure_file_cached(
source, source_rel, 0, drawio_executable, [], cache_dir, 'svg')
- assert result == self.exporter.make_cache_filename.return_value
+ assert cache_filename == self.exporter.make_cache_filename.return_value
+ assert exit_status == 0
def test_ensure_file_cached_aborts_if_drawio_executable_unavailable(self):
source = sep + join('docs', 'diagram.drawio')
@@ -124,10 +125,10 @@ class ExporterTests(unittest.TestCase):
self.log.warn = MagicMock()
- result = self.exporter.ensure_file_cached(
+ cache_filename, exit_status = self.exporter.ensure_file_cached(
source, source_rel, 0, drawio_executable, [], cache_dir, 'svg')
- assert result == None
+ assert exit_status == None
self.log.warn.assert_called_once()
def test_ensure_file_cached_skips_export_if_cache_fresh(self):
@@ -145,14 +146,15 @@ class ExporterTests(unittest.TestCase):
self.exporter.export_file = MagicMock()
self.exporter.export_file.return_value = 0
- result = self.exporter.ensure_file_cached(
+ cache_filename, exit_status = self.exporter.ensure_file_cached(
source, source_rel, 0, drawio_executable, [], cache_dir, 'svg')
- assert result == self.exporter.make_cache_filename.return_value
+ assert cache_filename == self.exporter.make_cache_filename.return_value
+ assert exit_status == None
self.exporter.use_cached_file.assert_called_once()
assert not self.exporter.export_file.called
- def test_ensure_file_cached_logs_error_if_export_fails(self):
+ def test_ensure_file_cached_returns_exit_status_if_non_zero(self):
source = sep + join('docs', 'diagram.drawio')
source_rel = 'diagram.drawio'
drawio_executable = sep + join('bin', 'drawio')
@@ -169,11 +171,10 @@ class ExporterTests(unittest.TestCase):
self.log.error = MagicMock()
- result = self.exporter.ensure_file_cached(
+ cache_filename, exit_status = self.exporter.ensure_file_cached(
source, source_rel, 0, drawio_executable, [], cache_dir, 'svg')
- assert result == None
- self.log.error.assert_called_once()
+ assert exit_status == 1
def test_make_cache_filename(self):
cache_dir = sep + 'docs'
| Empty document crashes build
Hi, this might be per design currently, but referring to an empty tab in a multipage document causes a crash
```
Error: Export failed: /docs/content/technical/network/site_design.drawio
ERROR - Output file not created in cache
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/mkdocsdrawioexporter/plugin.py", line 85, in on_post_build
copy_file(cache_filename, abs_dest_path)
File "/usr/local/lib/python3.7/site-packages/mkdocs/utils/__init__.py", line 100, in copy_file
shutil.copyfile(source_path, output_path)
File "/usr/lib64/python3.7/shutil.py", line 120, in copyfile
with open(src, 'rb') as fsrc:
FileNotFoundError: [Errno 2] No such file or directory: '/docs/content/cache/9a7d5d6802558d95399cdc2f0d36a830a92be749-3'
```
As a workaround I could just put some object in the tab but ... I think as a user I'd expect an empty tab to just produce empty output, somehow? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_ensure_file_cached",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_ensure_file_cached_aborts_if_drawio_executable_unavailable",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_ensure_file_cached_returns_exit_status_if_non_zero",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_ensure_file_cached_skips_export_if_cache_fresh"
] | [
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_drawio_executable_paths_warns_on_unknown_platform",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_export_file",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_export_file_honours_drawio_args",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_export_file_logs_exc_on_raise",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_filter_cache_files",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_make_cache_filename",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_prepare_cache_dir_defaults",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_prepare_cache_dir_resolves_relative_path",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_prepare_drawio_executable_aborts_on_missing_executable",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_prepare_drawio_executable_logs_error_on_failure",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_prepare_drawio_executable_on_path",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_prepare_drawio_executable_platform_specific",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_rewrite_image_embeds",
"mkdocsdrawioexporter/tests/exporter.py::ExporterTests::test_use_cached_file"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-04-19T00:10:35Z" | mit |
|
M0r13n__pyais-108 | diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index ca7d803..7e46af0 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -1,6 +1,11 @@
====================
pyais CHANGELOG
====================
+-------------------------------------------------------------------------------
+ Version 2.5.1 26 Feb 2023
+-------------------------------------------------------------------------------
+* closes https://github.com/M0r13n/pyais/issues/107
+ * fixes a rounding error for lat/lon values
-------------------------------------------------------------------------------
Version 2.5.0 14 Jan 2023
-------------------------------------------------------------------------------
diff --git a/README.md b/README.md
index eda8c0d..08dc62b 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@
[![Documentation Status](https://readthedocs.org/projects/pyais/badge/?version=latest)](https://pyais.readthedocs.io/en/latest/?badge=latest)
AIS message encoding and decoding. 100% pure Python. Supports AIVDM/AIVDO messages. Supports single messages, files and
-TCP/UDP sockets.
+TCP/UDP sockets. This library has been used and tested extensively in representative real-world scenarios. This includes tests with live feeds from [Spire](https://spire.com/maritime/), the [Norwegian Coastal Administration](https://kystverket.no/navigasjonstjenester/ais/tilgang-pa-ais-data/) and others. I test each major release against a selection of public and non-public data sources to ensure the broadest possible compatibility.
You can find the full documentation on [readthedocs](https://pyais.readthedocs.io/en/latest/).
diff --git a/pyais/__init__.py b/pyais/__init__.py
index 6c0568e..48f0804 100644
--- a/pyais/__init__.py
+++ b/pyais/__init__.py
@@ -5,7 +5,7 @@ from pyais.decode import decode
from pyais.tracker import AISTracker, AISTrack
__license__ = 'MIT'
-__version__ = '2.5.0'
+__version__ = '2.5.1'
__author__ = 'Leon Morten Richter'
__all__ = (
diff --git a/pyais/messages.py b/pyais/messages.py
index 527ef6c..b7616e8 100644
--- a/pyais/messages.py
+++ b/pyais/messages.py
@@ -742,7 +742,7 @@ def to_speed(v: typing.Union[int, float]) -> float:
def from_lat_lon(v: typing.Union[int, float]) -> float:
- return float(v) * 600000.0
+ return round(float(v) * 600000.0)
def to_lat_lon(v: typing.Union[int, float]) -> float:
@@ -750,7 +750,7 @@ def to_lat_lon(v: typing.Union[int, float]) -> float:
def from_lat_lon_600(v: typing.Union[int, float]) -> float:
- return float(v) * 600.0
+ return round(float(v) * 600.0)
def to_lat_lon_600(v: typing.Union[int, float]) -> float:
| M0r13n/pyais | 3447a48c4be534ec2220db8ac216b47314182b65 | diff --git a/tests/test_decode.py b/tests/test_decode.py
index b4d6c32..cea212b 100644
--- a/tests/test_decode.py
+++ b/tests/test_decode.py
@@ -6,7 +6,7 @@ import textwrap
import typing
import unittest
-from pyais import NMEAMessage, encode_dict
+from pyais import NMEAMessage, encode_dict, encode_msg
from pyais.ais_types import AISType
from pyais.constants import (EpfdType, ManeuverIndicator, NavAid,
NavigationStatus, ShipType, StationType, SyncState,
@@ -1542,3 +1542,16 @@ class TestAIS(unittest.TestCase):
with self.assertRaises(UnknownMessageException):
decode_nmea_line(b",n:4,r:35435435435,foo bar 200")
+
+ def test_that_lat_and_long_are_rounded_correctly(self):
+ """Original Issue: https://github.com/M0r13n/pyais/issues/107
+ TL;DR: There was a rounding issue with certain values for lat and lon.
+ Decoding, encoding and then decoding again led to slight changes to lat/lon."""
+
+ orig = '!AIVDM,1,1,,A,100u3g@0291Q1>BW6uDUwDk00LE@,0*74'
+
+ first_decode = decode(orig)
+ encoded = encode_msg(first_decode)[0]
+ second_decode = decode(encoded)
+
+ self.assertEqual(first_decode, second_decode)
diff --git a/tests/test_tag_block.py b/tests/test_tag_block.py
index 2f19462..bc107e2 100644
--- a/tests/test_tag_block.py
+++ b/tests/test_tag_block.py
@@ -35,6 +35,29 @@ class TagBlockTestCase(unittest.TestCase):
self.assertEqual(tb.relative_time, None)
self.assertEqual(tb.text, None)
+ def test_spire_maritime_format(self):
+ """https://documentation.spire.com/tcp-stream-v2/the-nmea-message-encoding-format/"""
+ text = textwrap.dedent("""
+ \\c:1503079517*55\\!AIVDM,1,1,,B,C6:b0Kh09b3t1K4ChsS2FK008NL>`2CT@2N000000000S4h8S400,0*50
+ \\c:1503079517*53\\!AIVDM,1,1,,B,16:Vk1h00g8O=vRBDhNp0nKp0000,0*40
+ \\c:1503079517*53\\!AIVDM,1,1,,B,18155hh00u0DEU`N1F@Bg22R06@D,0*60
+ \\c:1503079517*53\\!AIVDM,1,1,,A,83aGFQ@j2ddtMH1b@g?b`7mL0,0*55
+ \\c:1503079517*53\\!AIVDM,2,1,9,A,53m@FJ400000hT5<0008E8q@TpF000000000000T2P3425rg0:53kThQDQh0,0*48
+ \\c:1503079517*53\\!AIVDM,2,2,9,A,00000000000,2*2D
+ \\c:1503079517*52\\!AIVDM,1,1,,A,13oP50Oi420UAtPgp@UPrP1d01,0*1A
+ \\c:1503079517*52\\!AIVDM,1,1,,B,B3mISo000H;wsB8SetMnww`5oP06,0*7C
+ \\c:1503079517*53\\!AIVDM,2,1,0,B,53aIjwh000010CSK7R04lu8F222222222222221?9@<297?o060@C51D`888,0*1B
+ """)
+
+ messages = [line.encode() for line in text.split() if line]
+
+ with IterMessages(messages) as s:
+ for msg in s:
+ msg.tag_block.init()
+ decoded = msg.decode()
+ self.assertIsNotNone(decoded.mmsi)
+ self.assertEqual(msg.tag_block.receiver_timestamp, '1503079517')
+
def test_multiple_messages(self):
text = textwrap.dedent("""
\\s:2573535,c:1671533231*08\\!BSVDM,2,2,8,B,00000000000,2*36
| lat lon converters float accuracy
for the example sentence:
!AIVDM,1,1,,A,100u3g@0291Q1>BW6uDUwDk00LE@,0*74
decode and then encode will results in a change to lat lon
the problem line is:
```
elif d_type == float:
val = int(val)
bits = int_to_bin(val, width, signed=signed)
```
Suggested resolution:
```
elif d_type == float:
val = int(round(val))
bits = int_to_bin(val, width, signed=signed)
```
Kind Regards
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_decode.py::TestAIS::test_that_lat_and_long_are_rounded_correctly"
] | [
"tests/test_decode.py::TestAIS::test_b64encode_str",
"tests/test_decode.py::TestAIS::test_b64encode_str_empty",
"tests/test_decode.py::TestAIS::test_bits2bytes",
"tests/test_decode.py::TestAIS::test_broken_messages",
"tests/test_decode.py::TestAIS::test_byte_stream",
"tests/test_decode.py::TestAIS::test_bytes2bits",
"tests/test_decode.py::TestAIS::test_common_invalid_inputs_to_the_decode_function",
"tests/test_decode.py::TestAIS::test_decode_1_speed",
"tests/test_decode.py::TestAIS::test_decode_and_merge",
"tests/test_decode.py::TestAIS::test_decode_does_not_raise_an_error_by_default",
"tests/test_decode.py::TestAIS::test_decode_does_not_raise_an_error_if_error_if_checksum_invalid_is_false",
"tests/test_decode.py::TestAIS::test_decode_does_raise_an_error_if_error_if_checksum_invalid_is_true",
"tests/test_decode.py::TestAIS::test_decode_into_bit_array_with_non_printable_characters",
"tests/test_decode.py::TestAIS::test_decode_out_of_order",
"tests/test_decode.py::TestAIS::test_decode_pos_1_2_3",
"tests/test_decode.py::TestAIS::test_empty_channel",
"tests/test_decode.py::TestAIS::test_get_comm_state_type_18_itdma_base_indirect",
"tests/test_decode.py::TestAIS::test_get_comm_state_type_18_sotdma_base_inidrect",
"tests/test_decode.py::TestAIS::test_get_comm_state_type_18_sotdma_utc_direct",
"tests/test_decode.py::TestAIS::test_get_sotdma_comm_state_utc_direct",
"tests/test_decode.py::TestAIS::test_get_sotdma_comm_state_utc_direct_slot_number",
"tests/test_decode.py::TestAIS::test_get_sotdma_comm_state_utc_direct_slot_timeout",
"tests/test_decode.py::TestAIS::test_gh_ais_message_decode",
"tests/test_decode.py::TestAIS::test_invalid_timestamp_message",
"tests/test_decode.py::TestAIS::test_issue_46_a",
"tests/test_decode.py::TestAIS::test_issue_46_b",
"tests/test_decode.py::TestAIS::test_issue_50",
"tests/test_decode.py::TestAIS::test_issue_88",
"tests/test_decode.py::TestAIS::test_messages_with_proprietary_suffix",
"tests/test_decode.py::TestAIS::test_msg_too_short_enum_is_none",
"tests/test_decode.py::TestAIS::test_msg_type",
"tests/test_decode.py::TestAIS::test_msg_type_10_a",
"tests/test_decode.py::TestAIS::test_msg_type_10_b",
"tests/test_decode.py::TestAIS::test_msg_type_11",
"tests/test_decode.py::TestAIS::test_msg_type_12_a",
"tests/test_decode.py::TestAIS::test_msg_type_12_b",
"tests/test_decode.py::TestAIS::test_msg_type_13",
"tests/test_decode.py::TestAIS::test_msg_type_14",
"tests/test_decode.py::TestAIS::test_msg_type_15_a",
"tests/test_decode.py::TestAIS::test_msg_type_15_b",
"tests/test_decode.py::TestAIS::test_msg_type_16",
"tests/test_decode.py::TestAIS::test_msg_type_17_a",
"tests/test_decode.py::TestAIS::test_msg_type_17_b",
"tests/test_decode.py::TestAIS::test_msg_type_17_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_18",
"tests/test_decode.py::TestAIS::test_msg_type_18_speed",
"tests/test_decode.py::TestAIS::test_msg_type_19",
"tests/test_decode.py::TestAIS::test_msg_type_1_a",
"tests/test_decode.py::TestAIS::test_msg_type_1_b",
"tests/test_decode.py::TestAIS::test_msg_type_1_c",
"tests/test_decode.py::TestAIS::test_msg_type_20",
"tests/test_decode.py::TestAIS::test_msg_type_21",
"tests/test_decode.py::TestAIS::test_msg_type_22_addressed",
"tests/test_decode.py::TestAIS::test_msg_type_22_broadcast",
"tests/test_decode.py::TestAIS::test_msg_type_23",
"tests/test_decode.py::TestAIS::test_msg_type_24",
"tests/test_decode.py::TestAIS::test_msg_type_25_a",
"tests/test_decode.py::TestAIS::test_msg_type_25_b",
"tests/test_decode.py::TestAIS::test_msg_type_25_c",
"tests/test_decode.py::TestAIS::test_msg_type_25_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_26_a",
"tests/test_decode.py::TestAIS::test_msg_type_26_b",
"tests/test_decode.py::TestAIS::test_msg_type_26_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_27",
"tests/test_decode.py::TestAIS::test_msg_type_27_signed",
"tests/test_decode.py::TestAIS::test_msg_type_3",
"tests/test_decode.py::TestAIS::test_msg_type_4_a",
"tests/test_decode.py::TestAIS::test_msg_type_4_b",
"tests/test_decode.py::TestAIS::test_msg_type_5",
"tests/test_decode.py::TestAIS::test_msg_type_6",
"tests/test_decode.py::TestAIS::test_msg_type_6_json_reverse",
"tests/test_decode.py::TestAIS::test_msg_type_6_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_7",
"tests/test_decode.py::TestAIS::test_msg_type_8",
"tests/test_decode.py::TestAIS::test_msg_type_8_multipart",
"tests/test_decode.py::TestAIS::test_msg_type_8_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_9",
"tests/test_decode.py::TestAIS::test_msg_with_more_that_82_chars_payload",
"tests/test_decode.py::TestAIS::test_multiline_message",
"tests/test_decode.py::TestAIS::test_nmea_decode",
"tests/test_decode.py::TestAIS::test_nmea_decode_unknown_msg",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_creation",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_decoding",
"tests/test_decode.py::TestAIS::test_none_values_converter_for_all_messages",
"tests/test_decode.py::TestAIS::test_rot_decode_yields_expected_values",
"tests/test_decode.py::TestAIS::test_rot_encode_yields_expected_values",
"tests/test_decode.py::TestAIS::test_special_position_report",
"tests/test_decode.py::TestAIS::test_static_data_report",
"tests/test_decode.py::TestAIS::test_that_the_payload_does_not_change_when_encoding_decoding",
"tests/test_decode.py::TestAIS::test_timestamp_message",
"tests/test_decode.py::TestAIS::test_to_dict_non_enum",
"tests/test_decode.py::TestAIS::test_to_json",
"tests/test_decode.py::TestAIS::test_type_22_very_short",
"tests/test_decode.py::TestAIS::test_type_25_very_short",
"tests/test_decode.py::TestAIS::test_type_26_very_short",
"tests/test_decode.py::TestAIS::test_types_for_messages",
"tests/test_tag_block.py::TagBlockTestCase::test_multiple_messages",
"tests/test_tag_block.py::TagBlockTestCase::test_spire_maritime_format",
"tests/test_tag_block.py::TagBlockTestCase::test_tag_block_with_line_count",
"tests/test_tag_block.py::TagBlockTestCase::test_tag_block_with_multiple_unknown_fields",
"tests/test_tag_block.py::TagBlockTestCase::test_that_a_tag_block_is_lazily_evaluated",
"tests/test_tag_block.py::TagBlockTestCase::test_that_the_factory_is_gentle_with_malformed_tag_blocks",
"tests/test_tag_block.py::TagBlockTestCase::test_that_the_factory_pre_processes_correctly",
"tests/test_tag_block.py::TagBlockTestCase::test_that_the_factory_removes_the_leading_tag_block",
"tests/test_tag_block.py::TagBlockTestCase::test_that_unknown_tag_blocks_are_ignored",
"tests/test_tag_block.py::TagBlockTestCase::test_that_unknown_tag_blocks_can_exported_as_dicts"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2023-02-26T10:35:09Z" | mit |
|
M0r13n__pyais-41 | diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 38e9c26..019cc19 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -1,6 +1,15 @@
====================
pyais CHANGELOG
====================
+-------------------------------------------------------------------------------
+ Version 1.6.3 5 Dec 2021
+-------------------------------------------------------------------------------
+
+* Correctly handles variable length messages (https://github.com/M0r13n/pyais/issues/40)
+ - prior versions were missing required length checks
+ - therefore some messages were not correctly decoded
+ - this affects Type 7, 15, 16, 20, 22, 24, 25 and 26
+
-------------------------------------------------------------------------------
Version 1.6.2 2 May 2021
-------------------------------------------------------------------------------
diff --git a/pyais/decode.py b/pyais/decode.py
index c067c2b..f8e8357 100644
--- a/pyais/decode.py
+++ b/pyais/decode.py
@@ -15,7 +15,7 @@ from pyais.constants import (
NavAid
)
from pyais.exceptions import UnknownMessageException, MissingMultipartMessageException, TooManyMessagesException
-from pyais.util import get_int, encode_bin_as_ascii6, get_mmsi
+from pyais.util import get_int, encode_bin_as_ascii6, get_mmsi, binary_data
def decode_msg_1(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
@@ -128,7 +128,7 @@ def decode_msg_6(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
'retransmit': bit_arr[70],
'dac': get_int_from_data(72, 82),
'fid': get_int_from_data(82, 88),
- 'data': bit_arr[88:].to01()
+ 'data': binary_data(bit_arr, 88)
}
@@ -138,19 +138,39 @@ def decode_msg_7(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_7_binary_acknowledge
"""
get_int_from_data = partial(get_int, bit_arr)
- return {
+ length = len(bit_arr)
+ # Total length varies between 72 and 168 bits depending on the number of addressed ships
+ # Each address requires 32 bit
+ data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_mmsi(bit_arr, 8, 38),
'mmsi1': get_mmsi(bit_arr, 40, 70),
'mmsiseq1': get_int_from_data(70, 72),
- 'mmsi2': get_mmsi(bit_arr, 72, 102),
- 'mmsiseq2': get_int_from_data(102, 104),
- 'mmsi3': get_mmsi(bit_arr, 104, 134),
- 'mmsiseq3': get_int_from_data(134, 136),
- 'mmsi4': get_mmsi(bit_arr, 136, 166),
- 'mmsiseq4': get_int_from_data(166, 168)
+ 'mmsi2': None,
+ 'mmsiseq2': None,
+ 'mmsi3': None,
+ 'mmsiseq3': None,
+ 'mmsi4': None,
+ 'mmsiseq4': None,
}
+ if 72 < length <= 104:
+ data['mmsi2'] = get_mmsi(bit_arr, 72, 102)
+ data['mmsiseq2'] = get_int_from_data(102, 104)
+ elif 104 < length <= 136:
+ data['mmsi2'] = get_mmsi(bit_arr, 72, 102)
+ data['mmsiseq2'] = get_int_from_data(102, 104)
+ data['mmsi3'] = get_mmsi(bit_arr, 104, 134)
+ data['mmsiseq3'] = get_int_from_data(134, 136)
+ if 136 < length:
+ data['mmsi2'] = get_mmsi(bit_arr, 72, 102)
+ data['mmsiseq2'] = get_int_from_data(102, 104)
+ data['mmsi3'] = get_mmsi(bit_arr, 104, 134)
+ data['mmsiseq3'] = get_int_from_data(134, 136)
+ data['mmsi4'] = get_mmsi(bit_arr, 136, 166)
+ data['mmsiseq4'] = get_int_from_data(166, 168)
+
+ return data
def decode_msg_8(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
@@ -165,7 +185,7 @@ def decode_msg_8(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
'mmsi': get_mmsi(bit_arr, 8, 38),
'dac': get_int_from_data(40, 50),
'fid': get_int_from_data(50, 56),
- 'data': bit_arr[56:].to01()
+ 'data': binary_data(bit_arr, 56)
}
@@ -259,7 +279,7 @@ def decode_msg_15(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_15_interrogation
"""
get_int_from_data = partial(get_int, bit_arr)
- return {
+ data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_mmsi(bit_arr, 8, 38),
@@ -268,30 +288,46 @@ def decode_msg_15(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
'offset1_1': get_int_from_data(76, 88),
'type1_2': get_int_from_data(90, 96),
'offset1_2': get_int_from_data(96, 108),
- 'mmsi2': get_mmsi(bit_arr, 110, 140),
- 'type2_1': get_int_from_data(140, 146),
- 'offset2_1': get_int_from_data(146, 157),
+ 'mmsi2': None,
+ 'type2_1': None,
+ 'offset2_1': None,
}
+ if len(bit_arr) > 88:
+ # TODO (richter): there are more edge cases
+ data['mmsi2'] = get_mmsi(bit_arr, 110, 140)
+ data['type2_1'] = get_int_from_data(140, 146)
+ data['offset2_1'] = get_int_from_data(146, 157)
+
+ return data
+
def decode_msg_16(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
"""
Assignment Mode Command
- Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_16_assignment_mode_command
+ Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_16_assignment_mode_command
"""
get_int_from_data = partial(get_int, bit_arr)
- return {
+ data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_mmsi(bit_arr, 8, 38),
'mmsi1': get_mmsi(bit_arr, 40, 70),
'offset1': get_int_from_data(70, 82),
'increment1': get_int_from_data(82, 92),
- 'mmsi2': get_mmsi(bit_arr, 92, 122),
- 'offset2': get_int_from_data(122, 134),
- 'increment2': get_int_from_data(134, 144)
+ 'mmsi2': None,
+ 'offset2': None,
+ 'increment2': None
}
+ if len(data) > 96:
+ # If the message is 96 bits long it should be interpreted as a channel assignment for two stations
+ data['mmsi2'] = get_mmsi(bit_arr, 92, 122)
+ data['offset2'] = get_int_from_data(122, 134)
+ data['increment2'] = get_int_from_data(134, 144)
+
+ return data
+
def decode_msg_17(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
"""
@@ -305,7 +341,7 @@ def decode_msg_17(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
'mmsi': get_mmsi(bit_arr, 8, 38),
'lon': get_int_from_data(40, 58, signed=True),
'lat': get_int_from_data(58, 75, signed=True),
- 'data': get_int_from_data(80, 816)
+ 'data': binary_data(bit_arr, 80)
}
@@ -375,7 +411,7 @@ def decode_msg_20(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_20_data_link_management_message
"""
get_int_from_data = partial(get_int, bit_arr)
- return {
+ data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_mmsi(bit_arr, 8, 38),
@@ -385,22 +421,41 @@ def decode_msg_20(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
'timeout1': get_int_from_data(56, 59),
'increment1': get_int_from_data(59, 70),
- 'offset2': get_int_from_data(70, 82),
- 'number2': get_int_from_data(82, 86),
- 'timeout2': get_int_from_data(86, 89),
- 'increment2': get_int_from_data(89, 100),
+ 'offset2': None,
+ 'number2': None,
+ 'timeout2': None,
+ 'increment2': None,
- 'offset3': get_int_from_data(100, 112),
- 'number3': get_int_from_data(112, 116),
- 'timeout3': get_int_from_data(116, 119),
- 'increment3': get_int_from_data(110, 130),
+ 'offset3': None,
+ 'number3': None,
+ 'timeout3': None,
+ 'increment3': None,
- 'offset4': get_int_from_data(130, 142),
- 'number4': get_int_from_data(142, 146),
- 'timeout4': get_int_from_data(146, 149),
- 'increment4': get_int_from_data(149, 160),
+ 'offset4': None,
+ 'number4': None,
+ 'timeout4': None,
+ 'increment4': None,
}
+ length = len(bit_arr)
+ if 100 <= length:
+ data['offset2'] = get_int_from_data(70, 82)
+ data['number2'] = get_int_from_data(82, 86)
+ data['timeout2'] = get_int_from_data(86, 89)
+ data['increment2'] = get_int_from_data(89, 100)
+ if 130 <= length:
+ data['offset3'] = get_int_from_data(100, 112)
+ data['number3'] = get_int_from_data(112, 116)
+ data['timeout3'] = get_int_from_data(116, 119)
+ data['increment3'] = get_int_from_data(119, 130)
+ if 160 <= length:
+ data['offset4'] = get_int_from_data(130, 142)
+ data['number4'] = get_int_from_data(142, 146)
+ data['timeout4'] = get_int_from_data(146, 149)
+ data['increment4'] = get_int_from_data(149, 160)
+
+ return data
+
def decode_msg_21(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
"""
@@ -446,7 +501,6 @@ def decode_msg_22(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_mmsi(bit_arr, 8, 38),
-
'channel_a': get_int_from_data(40, 52),
'channel_b': get_int_from_data(52, 64),
'txrx': get_int_from_data(64, 68),
@@ -570,11 +624,11 @@ def decode_msg_25(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
- 'data': bit_arr[hi_ix:].to01()
+ 'data': binary_data(bit_arr, hi_ix)
}
else:
d = {
- 'data': bit_arr[lo_ix:].to01()
+ 'data': binary_data(bit_arr, lo_ix)
}
data.update(d)
return data
@@ -615,11 +669,11 @@ def decode_msg_26(bit_arr: bitarray.bitarray) -> Dict[str, Any]:
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
- 'data': bit_arr[hi_ix:radio_status_offset].to01()
+ 'data': binary_data(bit_arr, hi_ix, radio_status_offset)
}
else:
d = {
- 'data': bit_arr[lo_ix:radio_status_offset].to01()
+ 'data': binary_data(bit_arr, lo_ix, radio_status_offset)
}
data.update(d)
diff --git a/pyais/util.py b/pyais/util.py
index 5ef6529..a7cefd9 100644
--- a/pyais/util.py
+++ b/pyais/util.py
@@ -2,7 +2,7 @@ import warnings
from collections import OrderedDict
from functools import partial, reduce
from operator import xor
-from typing import Any, Generator, Hashable, TYPE_CHECKING, Callable
+from typing import Any, Generator, Hashable, TYPE_CHECKING, Callable, Optional
from bitarray import bitarray
@@ -88,7 +88,7 @@ def get_int(data: bitarray, ix_low: int, ix_high: int, signed: bool = False) ->
:param ix_low: the lower index of the sub-array
:param ix_high: the upper index of the sub-array
:param signed: True if the value should be interpreted as a signed integer
- :return: a normal integer (int)
+ :return: The integer value of the sub-array data[ix_low:ix_high]
"""
shift: int = (8 - ((ix_high - ix_low) % 8)) % 8
data = data[ix_low:ix_high]
@@ -96,13 +96,40 @@ def get_int(data: bitarray, ix_low: int, ix_high: int, signed: bool = False) ->
return i >> shift
+def binary_data(data: bitarray, ix_low: int, ix_high: Optional[int] = None) -> Optional[str]:
+ """
+ Get a sub_array of a bitarray as bitstring.
+
+ :param data: some bitarray
+ :param ix_low: the lower index of the sub-array
+ :param ix_high: the upper index of the sub-array
+ :return: The integer value of the sub-array data[ix_low:ix_high]
+ """
+ length = len(data)
+ if ix_high is None:
+ ix_high = length
+ if ix_low >= length or ix_high > length:
+ # Indices out of bounds
+ return None
+
+ return data[ix_low:ix_high].to01()
+
+
def get_mmsi(data: bitarray, ix_low: int, ix_high: int) -> str:
"""
A Maritime Mobile Service Identity (MMSI) is a series of nine digits.
Every digit is required and therefore we can NOT use a int.
See: issue #6
"""
+
mmsi_int: int = get_int(data, ix_low, ix_high)
+ if len(data) < ix_high:
+ # Remove padding from MMSIs shorter than 30 bits
+ mask = 0x3fffffff
+ d = ix_high - len(data)
+ mask ^= int(d * "1", 2)
+ mmsi_int &= mask
+
return str(mmsi_int).zfill(9)
| M0r13n/pyais | 552783096e0758df2e5f4b3f01af18bb94422c68 | diff --git a/tests/test_ais.py b/tests/test_ais.py
index 940a09a..9b355a4 100644
--- a/tests/test_ais.py
+++ b/tests/test_ais.py
@@ -197,6 +197,24 @@ class TestAIS(unittest.TestCase):
assert msg['destination'] == "NEW YORK"
assert msg['dte'] == 0
+ def test_msg_type_5_a(self):
+ content = decode_msg(
+ "!AIVDM,2,1,9,B,53nFBv01SJ<thHp6220H4heHTf2222222222221?50:454o<`9QSlUDp,0*09",
+ "!AIVDM,2,2,9,B,888888888888880,2*2E"
+ )
+
+ assert content['type'] == 5
+ assert content['mmsi'] == "258315000"
+ assert content['imo'] == 6514895
+ assert content['callsign'] == "LFNA"
+ assert content['shipname'] == "FALKVIK"
+ assert content['shiptype'].value == 79
+ assert content['to_bow'] == 40
+ assert content['to_stern'] == 10
+ assert content['to_port'] == 4
+ assert content['to_starboard'] == 5
+ assert content['destination'] == "FORUS"
+
def test_msg_type_6(self):
msg = NMEAMessage(b"!AIVDM,1,1,,B,6B?n;be:cbapalgc;i6?Ow4,2*4A").decode()
assert msg['seqno'] == 3
@@ -204,9 +222,27 @@ class TestAIS(unittest.TestCase):
assert msg['dac'] == 669
assert msg['fid'] == 11
- def test_msg_type_7(self):
+ def test_msg_type_7_a(self):
msg = NMEAMessage(b"!AIVDM,1,1,,A,702R5`hwCjq8,0*6B").decode()
+ assert msg['mmsi'] == "002655651"
assert msg['mmsi1'] == "265538450"
+ assert msg['mmsi2'] is None
+ assert msg['mmsi3'] is None
+ assert msg['mmsi4'] is None
+
+ def test_msg_type_7_b(self):
+ msg = NMEAMessage(b"!AIVDM,1,1,,B,7`0Pv1L:Ac8rbgPKHA8`P,2*56").decode()
+ assert msg['mmsi'] == "537411077"
+ assert msg['mmsi1'] == "043101326"
+ assert msg['mmsi2'] == "717096664"
+
+ def test_msg_type_7_c(self):
+ msg = NMEAMessage(b"!AIVDM,1,1,,A,7IiQ4T`UjA9lC;b:M<MWE@,4*01").decode()
+ assert msg['mmsi'] == "655901842"
+ assert msg['mmsi1'] == "158483613"
+ assert msg['mmsi2'] == "321823389"
+ assert msg['mmsi3'] == "836359488"
+ assert msg['mmsi4'] is None
def test_msg_type_8(self):
msg = NMEAMessage(b"!AIVDM,1,1,,A,85Mwp`1Kf3aCnsNvBWLi=wQuNhA5t43N`5nCuI=p<IBfVqnMgPGs,0*47").decode()
@@ -307,7 +343,7 @@ class TestAIS(unittest.TestCase):
assert msg['offset1_2'] == 617
assert msg['offset1_1'] == 516
- def test_msg_type_16(self):
+ def test_msg_type_16_b(self):
msg = NMEAMessage(b"!AIVDM,1,1,,A,@01uEO@mMk7P<P00,0*18").decode()
assert msg['type'] == 16
assert msg['repeat'] == 0
@@ -316,21 +352,30 @@ class TestAIS(unittest.TestCase):
assert msg['offset1'] == 200
assert msg['increment1'] == 0
- assert msg['offset2'] == 0
+ assert msg['offset2'] is None
assert msg['increment1'] == 0
+ def test_msg_type_16_a(self):
+ msg = NMEAMessage(b"!AIVDM,1,1,,A,@TFtghNJ4G5?C7mV,0*3D").decode()
+ assert msg['type'] == 16
+ assert msg['mmsi'] == "292499393"
+ assert msg['increment1'] == 982
+
def test_msg_type_17(self):
msg = NMEAMessage.assemble_from_iterable(messages=[
NMEAMessage(b"!AIVDM,2,1,5,A,A02VqLPA4I6C07h5Ed1h<OrsuBTTwS?r:C?w`?la<gno1RTRwSP9:BcurA8a,0*3A"),
NMEAMessage(b"!AIVDM,2,2,5,A,:Oko02TSwu8<:Jbb,0*11")
]).decode()
- n = 0x7c0556c07031febbf52924fe33fa2933ffa0fd2932fdb7062922fe3809292afde9122929fcf7002923ffd20c29aaaa
assert msg['type'] == 17
assert msg['repeat'] == 0
assert msg['mmsi'] == "002734450"
assert msg['lon'] == 17478
assert msg['lat'] == 35992
- assert msg['data'] == n
+ assert msg['data'] == "0111110000000101010101101100000001110000001100011111111010111011111101010010100" \
+ "1001001001111111000110011111110100010100100110011111111111010000011111101001010" \
+ "0100110010111111011011011100000110001010010010001011111110001110000000100100101" \
+ "0010010101011111101111010010001001000101001001010011111110011110111000000000010" \
+ "100100100011111111111101001000001100001010011010101010101010"
msg = NMEAMessage(b"!AIVDM,1,1,,A,A0476BQ>J8`<h2JpH:4P0?j@2mTEw8`=DP1DEnqvj0,0*79").decode()
assert msg['type'] == 17
@@ -338,7 +383,9 @@ class TestAIS(unittest.TestCase):
assert msg['mmsi'] == "004310602"
assert msg['lat'] == 20582
assert msg['lon'] == 80290
- assert msg['data'] == 14486955885545814640451754168044205828166539334830080
+ assert msg['data'] == "001001101011100001100000101000010010000000000000111111001001000000001011010110" \
+ "010001010111111100100010100000110101010010000000000101010001010111011011100111" \
+ "1110110010000000"
def test_msg_type_18(self):
msg = NMEAMessage(b"!AIVDM,1,1,,A,B5NJ;PP005l4ot5Isbl03wsUkP06,0*76").decode()
@@ -390,6 +437,27 @@ class TestAIS(unittest.TestCase):
if k not in ('type', 'mmsi', 'offset1', 'number1', 'timeout1', 'increment1'):
assert not v
+ def test_msg_type_20_a(self):
+ msg = NMEAMessage(b"!AIVDM,1,1,,B,D030p8@2tN?b<`O6DmQO6D0,2*5D").decode()
+ assert msg['type'] == 20
+ assert msg['mmsi'] == "003160097"
+ assert msg['offset1'] == 47
+ assert msg['number1'] == 1
+ assert msg['timeout1'] == 7
+ assert msg['increment1'] == 250
+
+ assert msg['offset2'] == 2250
+ assert msg['number2'] == 1
+ assert msg['timeout2'] == 7
+ assert msg['increment2'] == 1125
+
+ assert msg['offset3'] == 856
+ assert msg['number3'] == 5
+ assert msg['timeout3'] == 7
+ assert msg['increment3'] == 1125
+
+ assert msg['offset4'] is None
+
def test_msg_type_21(self):
msg = NMEAMessage.assemble_from_iterable(messages=[
NMEAMessage(b"!AIVDM,2,1,7,B,E4eHJhPR37q0000000000000000KUOSc=rq4h00000a,0*4A"),
@@ -632,3 +700,13 @@ class TestAIS(unittest.TestCase):
self.assertEqual(content["minute"], 0)
self.assertEqual(content["draught"], 4.7)
self.assertEqual(content["destination"], "VIANA DO CASTELO")
+
+ def test_misc_messages(self):
+ content = decode_msg(
+ "!AIVDM,1,1,,A,13aEOK?P00PD2wVMdLDRhgvL289?,0*26"
+ )
+
+ assert content['type'] == 1
+ assert content['mmsi'] == "244670316"
+ assert content['lon'] == 4.379285
+ assert content['lat'] == 51.89475
diff --git a/tests/test_decode_raw.py b/tests/test_decode_raw.py
index 781e2c5..da24b93 100644
--- a/tests/test_decode_raw.py
+++ b/tests/test_decode_raw.py
@@ -1,7 +1,10 @@
import unittest
+from bitarray import bitarray
+
from pyais import decode_msg
from pyais.exceptions import InvalidNMEAMessageException, MissingMultipartMessageException, TooManyMessagesException
+from pyais.util import binary_data
class TestDecode(unittest.TestCase):
@@ -97,3 +100,20 @@ class TestDecode(unittest.TestCase):
msg_1,
msg_2
)
+
+ def test_binary_data_out_of_bounds(self):
+ b = bitarray('010100000000000010001101101001101111000000101110110100101110101110111000')
+
+ self.assertEqual("10000", binary_data(b, 35, 40))
+
+ # Lower index out of bounds
+ self.assertIsNone(binary_data(b, 72, 73))
+
+ # Upper index out of bounds
+ self.assertIsNone(binary_data(b, 0, 73))
+
+ # Lower and upper index out of bounds
+ self.assertIsNone(binary_data(b, 72, 72))
+
+ # Lower and upper index in bound
+ self.assertIsNotNone(binary_data(b, 71, 71))
diff --git a/tests/test_file_stream.py b/tests/test_file_stream.py
index 45bdb36..fc0bca4 100644
--- a/tests/test_file_stream.py
+++ b/tests/test_file_stream.py
@@ -6,7 +6,7 @@ from pyais.messages import NMEAMessage
class TestFileReaderStream(unittest.TestCase):
- FILENAME = "tests/ais_test_messages"
+ FILENAME = pathlib.Path(__file__).parent.joinpath("ais_test_messages")
def test_reader(self):
with FileReaderStream(self.FILENAME) as stream:
diff --git a/tests/test_main.py b/tests/test_main.py
index 0280554..3e3dedf 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1,3 +1,4 @@
+import pathlib
import sys
import unittest
@@ -27,7 +28,7 @@ class TestMainApp(unittest.TestCase):
def test_decode_from_file(self):
class DemoNamespace:
- in_file = open("tests/ais_test_messages", "rb")
+ in_file = open(pathlib.Path(__file__).parent.joinpath("ais_test_messages"), "rb")
out_file = None
assert decode_from_file(DemoNamespace()) == 0
@@ -41,9 +42,10 @@ class TestMainApp(unittest.TestCase):
assert ns.in_file is None
# But this can be overwritten to any file that exists
- ns = parser.parse_args(["-f", "tests/ais_test_messages"])
+ file = str(pathlib.Path(__file__).parent.joinpath("ais_test_messages"))
+ ns = parser.parse_args(["-f", file])
assert ns.func == decode_from_file
- assert ns.in_file.name == "tests/ais_test_messages"
+ assert ns.in_file.name == file
ns.in_file.close()
# If the file does not exist an error is thrown
| Non-deterministic results from 'decode_msg'
I am seeing some non-determinism in `decode_msg` output, e.g.:
```
>>> raw = b'!AIVDM,1,1,,A,D02=ag0flffp,0*11'
>>> pyais.decode_msg(raw)['offset2']
2
>>> pyais.decode_msg(raw)['offset2']
0
```
Memory overflow? This AIS message itself seems short but I would expect an error if it was unexpected length?
python: 3.9
pyais: 1.6.2
bitarray: 2.3.4 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_ais.py::TestAIS::test_broken_messages",
"tests/test_ais.py::TestAIS::test_byte_stream",
"tests/test_ais.py::TestAIS::test_decode_pos_1_2_3",
"tests/test_ais.py::TestAIS::test_empty_channel",
"tests/test_ais.py::TestAIS::test_fail_silently",
"tests/test_ais.py::TestAIS::test_misc_messages",
"tests/test_ais.py::TestAIS::test_msg_getitem",
"tests/test_ais.py::TestAIS::test_msg_type",
"tests/test_ais.py::TestAIS::test_msg_type_1",
"tests/test_ais.py::TestAIS::test_msg_type_10",
"tests/test_ais.py::TestAIS::test_msg_type_11",
"tests/test_ais.py::TestAIS::test_msg_type_12",
"tests/test_ais.py::TestAIS::test_msg_type_13",
"tests/test_ais.py::TestAIS::test_msg_type_14",
"tests/test_ais.py::TestAIS::test_msg_type_15",
"tests/test_ais.py::TestAIS::test_msg_type_16_a",
"tests/test_ais.py::TestAIS::test_msg_type_16_b",
"tests/test_ais.py::TestAIS::test_msg_type_17",
"tests/test_ais.py::TestAIS::test_msg_type_18",
"tests/test_ais.py::TestAIS::test_msg_type_19",
"tests/test_ais.py::TestAIS::test_msg_type_20",
"tests/test_ais.py::TestAIS::test_msg_type_20_a",
"tests/test_ais.py::TestAIS::test_msg_type_21",
"tests/test_ais.py::TestAIS::test_msg_type_22",
"tests/test_ais.py::TestAIS::test_msg_type_23",
"tests/test_ais.py::TestAIS::test_msg_type_24",
"tests/test_ais.py::TestAIS::test_msg_type_25",
"tests/test_ais.py::TestAIS::test_msg_type_26",
"tests/test_ais.py::TestAIS::test_msg_type_27",
"tests/test_ais.py::TestAIS::test_msg_type_3",
"tests/test_ais.py::TestAIS::test_msg_type_4",
"tests/test_ais.py::TestAIS::test_msg_type_5",
"tests/test_ais.py::TestAIS::test_msg_type_5_a",
"tests/test_ais.py::TestAIS::test_msg_type_6",
"tests/test_ais.py::TestAIS::test_msg_type_7_a",
"tests/test_ais.py::TestAIS::test_msg_type_7_b",
"tests/test_ais.py::TestAIS::test_msg_type_7_c",
"tests/test_ais.py::TestAIS::test_msg_type_8",
"tests/test_ais.py::TestAIS::test_msg_type_9",
"tests/test_ais.py::TestAIS::test_msg_with_more_that_82_chars_payload",
"tests/test_ais.py::TestAIS::test_multiline_message",
"tests/test_ais.py::TestAIS::test_nmea",
"tests/test_ais.py::TestAIS::test_to_json",
"tests/test_decode_raw.py::TestDecode::test_binary_data_out_of_bounds",
"tests/test_decode_raw.py::TestDecode::test_bytes_invalid",
"tests/test_decode_raw.py::TestDecode::test_bytes_valid",
"tests/test_decode_raw.py::TestDecode::test_decode_multiline_message",
"tests/test_decode_raw.py::TestDecode::test_decode_total_garbage",
"tests/test_decode_raw.py::TestDecode::test_multipart_error_message",
"tests/test_decode_raw.py::TestDecode::test_str_invalid",
"tests/test_decode_raw.py::TestDecode::test_str_valid",
"tests/test_decode_raw.py::TestDecode::test_too_many_messages",
"tests/test_file_stream.py::TestFileReaderStream::test_invalid_filename",
"tests/test_file_stream.py::TestFileReaderStream::test_marine_traffic_sample",
"tests/test_file_stream.py::TestFileReaderStream::test_mixed_content",
"tests/test_file_stream.py::TestFileReaderStream::test_reader",
"tests/test_file_stream.py::TestFileReaderStream::test_reader_with_open",
"tests/test_file_stream.py::TestFileReaderStream::test_should_parse",
"tests/test_main.py::TestMainApp::test_decode_from_file",
"tests/test_main.py::TestMainApp::test_decode_single",
"tests/test_main.py::TestMainApp::test_decode_single_multi",
"tests/test_main.py::TestMainApp::test_parser"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-12-03T15:34:35Z" | mit |
|
M0r13n__pyais-56 | diff --git a/pyais/messages.py b/pyais/messages.py
index 07478b9..3cdd34d 100644
--- a/pyais/messages.py
+++ b/pyais/messages.py
@@ -563,7 +563,7 @@ class MessageType4(Payload):
lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
epfd = bit_field(4, int, default=0, from_converter=EpfdType.from_value, to_converter=EpfdType.from_value)
spare = bit_field(10, int, default=0)
- raim = bit_field(1, int, default=0)
+ raim = bit_field(1, bool, default=0)
radio = bit_field(19, int, default=0)
@@ -669,7 +669,7 @@ class MessageType9(Payload):
dte = bit_field(1, int, default=0)
spare = bit_field(3, int, default=0)
assigned = bit_field(1, int, default=0)
- raim = bit_field(1, int, default=0)
+ raim = bit_field(1, bool, default=0)
radio = bit_field(20, int, default=0)
@@ -1297,7 +1297,7 @@ class MessageType27(Payload):
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
accuracy = bit_field(1, int, default=0)
- raim = bit_field(1, int, default=0)
+ raim = bit_field(1, bool, default=0)
status = bit_field(4, int, default=0, from_converter=NavigationStatus, to_converter=NavigationStatus)
lon = bit_field(18, int, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
lat = bit_field(17, int, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
| M0r13n/pyais | cc47fb1bf56eaabceb1657770d986a3172bac19a | diff --git a/tests/test_decode.py b/tests/test_decode.py
index 8442ca3..c1ac917 100644
--- a/tests/test_decode.py
+++ b/tests/test_decode.py
@@ -101,6 +101,7 @@ class TestAIS(unittest.TestCase):
assert msg['second'] == 34
assert msg['maneuver'] == ManeuverIndicator.NotAvailable
assert msg['raim']
+ assert isinstance(msg['raim'], bool)
def test_msg_type_1_c(self):
msg = decode(b"!AIVDM,1,1,,B,181:Kjh01ewHFRPDK1s3IRcn06sd,0*08").asdict()
@@ -226,6 +227,7 @@ class TestAIS(unittest.TestCase):
assert msg['dte'] == 1
assert msg['radio'] == 33392
assert not msg['raim']
+ assert isinstance(msg['raim'], bool)
def test_msg_type_10_a(self):
msg = decode(b"!AIVDM,1,1,,B,:5MlU41GMK6@,0*6C").asdict()
@@ -360,6 +362,7 @@ class TestAIS(unittest.TestCase):
assert msg['msg22'] == 1
assert not msg['assigned']
assert not msg['raim']
+ assert isinstance(msg['raim'], bool)
def test_msg_type_19(self):
msg = decode(b"!AIVDM,1,1,,B,C5N3SRgPEnJGEBT>NhWAwwo862PaLELTBJ:V00000000S0D:R220,0*0B").asdict()
| RAIM field integer in message type 4, otherwise boolean?
I've found in messages of type 4 the RAIM flag is an integer. But for other messages it is a boolean? I'm not sure if this is a bug or by design? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_decode.py::TestAIS::test_msg_type_9"
] | [
"tests/test_decode.py::TestAIS::test_broken_messages",
"tests/test_decode.py::TestAIS::test_byte_stream",
"tests/test_decode.py::TestAIS::test_decode_and_merge",
"tests/test_decode.py::TestAIS::test_decode_out_of_order",
"tests/test_decode.py::TestAIS::test_decode_pos_1_2_3",
"tests/test_decode.py::TestAIS::test_empty_channel",
"tests/test_decode.py::TestAIS::test_issue_46_a",
"tests/test_decode.py::TestAIS::test_issue_46_b",
"tests/test_decode.py::TestAIS::test_issue_50",
"tests/test_decode.py::TestAIS::test_msg_too_short_enum_is_none",
"tests/test_decode.py::TestAIS::test_msg_type_10_a",
"tests/test_decode.py::TestAIS::test_msg_type_10_b",
"tests/test_decode.py::TestAIS::test_msg_type_11",
"tests/test_decode.py::TestAIS::test_msg_type_12_a",
"tests/test_decode.py::TestAIS::test_msg_type_12_b",
"tests/test_decode.py::TestAIS::test_msg_type_13",
"tests/test_decode.py::TestAIS::test_msg_type_14",
"tests/test_decode.py::TestAIS::test_msg_type_15_a",
"tests/test_decode.py::TestAIS::test_msg_type_15_b",
"tests/test_decode.py::TestAIS::test_msg_type_16",
"tests/test_decode.py::TestAIS::test_msg_type_17_a",
"tests/test_decode.py::TestAIS::test_msg_type_17_b",
"tests/test_decode.py::TestAIS::test_msg_type_18",
"tests/test_decode.py::TestAIS::test_msg_type_19",
"tests/test_decode.py::TestAIS::test_msg_type_1_a",
"tests/test_decode.py::TestAIS::test_msg_type_1_b",
"tests/test_decode.py::TestAIS::test_msg_type_1_c",
"tests/test_decode.py::TestAIS::test_msg_type_20",
"tests/test_decode.py::TestAIS::test_msg_type_21",
"tests/test_decode.py::TestAIS::test_msg_type_22_addressed",
"tests/test_decode.py::TestAIS::test_msg_type_22_broadcast",
"tests/test_decode.py::TestAIS::test_msg_type_23",
"tests/test_decode.py::TestAIS::test_msg_type_24",
"tests/test_decode.py::TestAIS::test_msg_type_25_a",
"tests/test_decode.py::TestAIS::test_msg_type_25_b",
"tests/test_decode.py::TestAIS::test_msg_type_25_c",
"tests/test_decode.py::TestAIS::test_msg_type_26_a",
"tests/test_decode.py::TestAIS::test_msg_type_26_b",
"tests/test_decode.py::TestAIS::test_msg_type_27",
"tests/test_decode.py::TestAIS::test_msg_type_3",
"tests/test_decode.py::TestAIS::test_msg_type_4_a",
"tests/test_decode.py::TestAIS::test_msg_type_4_b",
"tests/test_decode.py::TestAIS::test_msg_type_5",
"tests/test_decode.py::TestAIS::test_msg_type_6",
"tests/test_decode.py::TestAIS::test_msg_type_7",
"tests/test_decode.py::TestAIS::test_msg_type_8",
"tests/test_decode.py::TestAIS::test_msg_with_more_that_82_chars_payload",
"tests/test_decode.py::TestAIS::test_multiline_message",
"tests/test_decode.py::TestAIS::test_nmea_decode",
"tests/test_decode.py::TestAIS::test_nmea_decode_unknown_msg",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_creation",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_decoding",
"tests/test_decode.py::TestAIS::test_none_values_converter_for_all_messages",
"tests/test_decode.py::TestAIS::test_to_dict_non_enum",
"tests/test_decode.py::TestAIS::test_to_json",
"tests/test_decode.py::TestAIS::test_type_22_very_short",
"tests/test_decode.py::TestAIS::test_type_25_very_short",
"tests/test_decode.py::TestAIS::test_type_26_very_short"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2022-03-19T14:33:46Z" | mit |
|
M0r13n__pyais-57 | diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 4272b0e..6fb71fe 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -1,6 +1,15 @@
====================
pyais CHANGELOG
====================
+-------------------------------------------------------------------------------
+ Version 2.0.2 20 Mar 2022
+-------------------------------------------------------------------------------
+* Closes https://github.com/M0r13n/pyais/issues/55
+ * Makes the attribute `raim` always an boolean
+
+* Closes https://github.com/M0r13n/pyais/issues/54
+ * Ensure that the attributes speed, lat, lon and course are always `float`
+
-------------------------------------------------------------------------------
Version 2.0.1 6 Feb 2022
-------------------------------------------------------------------------------
diff --git a/Makefile b/Makefile
index 47a5acf..34ed535 100644
--- a/Makefile
+++ b/Makefile
@@ -21,4 +21,8 @@ clean:
rm coverage.xml
rm .coverage
-test: run_tests flake type-check
\ No newline at end of file
+test: run_tests flake type-check
+
+install:
+ pip install wheel
+ pip install -U .[dev]
diff --git a/docs/messages.rst b/docs/messages.rst
index 5bc0787..dfcc5ee 100644
--- a/docs/messages.rst
+++ b/docs/messages.rst
@@ -536,7 +536,7 @@ MessageType5
* default: 0
MessageType6
Binary Addresses Message
- Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_4_base_station_report
+ Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_6_binary_addressed_message
Attributes:
diff --git a/mypy.ini b/mypy.ini
index 04ebe32..848c9d0 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -13,6 +13,7 @@ warn_unused_ignores = True
warn_return_any = True
no_implicit_reexport = True
strict_equality = True
+strict_optional=True
[mypy-tests.*]
disallow_any_generics = False
diff --git a/pyais/__init__.py b/pyais/__init__.py
index 98652c3..ba747d1 100644
--- a/pyais/__init__.py
+++ b/pyais/__init__.py
@@ -4,7 +4,7 @@ from pyais.encode import encode_dict, encode_msg, ais_to_nmea_0183
from pyais.decode import decode
__license__ = 'MIT'
-__version__ = '2.0.1'
+__version__ = '2.0.2'
__author__ = 'Leon Morten Richter'
__all__ = (
diff --git a/pyais/messages.py b/pyais/messages.py
index 2deb72e..4c5f68c 100644
--- a/pyais/messages.py
+++ b/pyais/messages.py
@@ -354,6 +354,9 @@ class Payload(abc.ABC):
if d_type == int or d_type == bool:
bits = int_to_bin(val, width, signed=signed)
+ elif d_type == float:
+ val = int(val)
+ bits = int_to_bin(val, width, signed=signed)
elif d_type == str:
bits = str_to_bin(val, width)
else:
@@ -417,7 +420,7 @@ class Payload(abc.ABC):
val: typing.Any
# Get the correct data type and decoding function
- if d_type == int or d_type == bool:
+ if d_type == int or d_type == bool or d_type == float:
shift = (8 - ((end - cur) % 8)) % 8
if field.metadata['signed']:
val = from_bytes_signed(bits) >> shift
@@ -465,35 +468,35 @@ class Payload(abc.ABC):
# Conversion functions
#
-def from_speed(v: int) -> NavigationStatus:
- return NavigationStatus(int(v * 10.0))
+def from_speed(v: typing.Union[int, float]) -> float:
+ return v * 10.0
-def to_speed(v: int) -> float:
+def to_speed(v: typing.Union[int, float]) -> float:
return v / 10.0
-def from_lat_lon(v: int) -> float:
+def from_lat_lon(v: typing.Union[int, float]) -> float:
return float(v) * 600000.0
-def to_lat_lon(v: int) -> float:
+def to_lat_lon(v: typing.Union[int, float]) -> float:
return round(float(v) / 600000.0, 6)
-def from_lat_lon_600(v: int) -> float:
+def from_lat_lon_600(v: typing.Union[int, float]) -> float:
return float(v) * 600.0
-def to_lat_lon_600(v: int) -> float:
+def to_lat_lon_600(v: typing.Union[int, float]) -> float:
return round(float(v) / 600.0, 6)
-def from_course(v: int) -> float:
+def from_10th(v: typing.Union[int, float]) -> float:
return float(v) * 10.0
-def to_course(v: int) -> float:
+def to_10th(v: typing.Union[int, float]) -> float:
return v / 10.0
@@ -514,17 +517,17 @@ class MessageType1(Payload):
msg_type = bit_field(6, int, default=1, signed=False)
repeat = bit_field(2, int, default=0, signed=False)
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
- status = bit_field(4, int, default=0, converter=NavigationStatus.from_value)
+ status = bit_field(4, int, default=0, converter=NavigationStatus.from_value, signed=False)
turn = bit_field(8, int, default=0, signed=True)
- speed = bit_field(10, int, from_converter=from_speed, to_converter=to_speed, default=0, signed=False)
- accuracy = bit_field(1, int, default=0)
- lon = bit_field(28, int, from_converter=from_lat_lon, to_converter=to_lat_lon, default=0, signed=True)
- lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, default=0, signed=True)
- course = bit_field(12, int, from_converter=from_course, to_converter=to_course, default=0, signed=False)
+ speed = bit_field(10, float, from_converter=from_speed, to_converter=to_speed, default=0, signed=False)
+ accuracy = bit_field(1, int, default=0, signed=False)
+ lon = bit_field(28, float, from_converter=from_lat_lon, to_converter=to_lat_lon, default=0, signed=True)
+ lat = bit_field(27, float, from_converter=from_lat_lon, to_converter=to_lat_lon, default=0, signed=True)
+ course = bit_field(12, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=False)
heading = bit_field(9, int, default=0, signed=False)
second = bit_field(6, int, default=0, signed=False)
maneuver = bit_field(2, int, default=0, from_converter=ManeuverIndicator.from_value,
- to_converter=ManeuverIndicator.from_value)
+ to_converter=ManeuverIndicator.from_value, signed=False)
spare = bit_field(3, int, default=0)
raim = bit_field(1, bool, default=0)
radio = bit_field(19, int, default=0, signed=False)
@@ -562,9 +565,10 @@ class MessageType4(Payload):
minute = bit_field(6, int, default=0, signed=False)
second = bit_field(6, int, default=0, signed=False)
accuracy = bit_field(1, int, default=0, signed=False)
- lon = bit_field(28, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- epfd = bit_field(4, int, default=0, from_converter=EpfdType.from_value, to_converter=EpfdType.from_value)
+ lon = bit_field(28, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ lat = bit_field(27, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ epfd = bit_field(4, int, default=0, from_converter=EpfdType.from_value, to_converter=EpfdType.from_value,
+ signed=False)
spare = bit_field(10, int, default=0)
raim = bit_field(1, bool, default=0)
radio = bit_field(19, int, default=0, signed=False)
@@ -593,10 +597,10 @@ class MessageType5(Payload):
day = bit_field(5, int, default=0, signed=False)
hour = bit_field(5, int, default=0, signed=False)
minute = bit_field(6, int, default=0, signed=False)
- draught = bit_field(8, int, from_converter=from_course, to_converter=to_course, default=0, signed=False)
+ draught = bit_field(8, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=False)
destination = bit_field(120, str, default='')
- dte = bit_field(1, int, default=0)
- spare = bit_field(1, int, default=0)
+ dte = bit_field(1, bool, default=0, signed=False)
+ spare = bit_field(1, bool, default=0)
@attr.s(slots=True)
@@ -661,17 +665,20 @@ class MessageType9(Payload):
msg_type = bit_field(6, int, default=9, signed=False)
repeat = bit_field(2, int, default=0, signed=False)
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
+
alt = bit_field(12, int, default=0, signed=False)
+ # speed over ground is in knots, not deciknots
speed = bit_field(10, int, default=0, signed=False)
accuracy = bit_field(1, int, default=0, signed=False)
- lon = bit_field(28, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- course = bit_field(12, int, from_converter=from_course, to_converter=to_course, default=0, signed=False)
+ lon = bit_field(28, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ lat = bit_field(27, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ course = bit_field(12, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=False)
second = bit_field(6, int, default=0, signed=False)
+
reserved = bit_field(8, int, default=0)
- dte = bit_field(1, int, default=0)
+ dte = bit_field(1, bool, default=0)
spare = bit_field(3, int, default=0)
- assigned = bit_field(1, int, default=0)
+ assigned = bit_field(1, bool, default=0)
raim = bit_field(1, bool, default=0)
radio = bit_field(20, int, default=0, signed=False)
@@ -709,8 +716,8 @@ class MessageType12(Payload):
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
seqno = bit_field(2, int, default=0, signed=False)
dest_mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
- retransmit = bit_field(1, int, default=0, signed=False)
- spare = bit_field(1, int, default=0)
+ retransmit = bit_field(1, bool, default=False, signed=False)
+ spare = bit_field(1, int, default=0, signed=False)
text = bit_field(936, str, default='')
@@ -787,8 +794,9 @@ class MessageType17(Payload):
repeat = bit_field(2, int, default=0, signed=False)
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
spare_1 = bit_field(2, int, default=0)
- lon = bit_field(18, int, from_converter=from_course, to_converter=to_course, default=0)
- lat = bit_field(17, int, from_converter=from_course, to_converter=to_course, default=0)
+ # Note that latitude and longitude are in units of a tenth of a minute
+ lon = bit_field(18, float, from_converter=from_10th, to_converter=to_10th, default=0)
+ lat = bit_field(17, float, from_converter=from_10th, to_converter=to_10th, default=0)
spare_2 = bit_field(5, int, default=0)
data = bit_field(736, int, default=0, from_converter=int_to_bytes)
@@ -802,23 +810,24 @@ class MessageType18(Payload):
msg_type = bit_field(6, int, default=18, signed=False)
repeat = bit_field(2, int, default=0, signed=False)
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
+
reserved = bit_field(8, int, default=0, signed=False)
- speed = bit_field(10, int, default=0, signed=False)
- accuracy = bit_field(1, int, default=0)
- lon = bit_field(28, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- course = bit_field(12, int, from_converter=from_course, to_converter=to_course, default=0, signed=False)
+ speed = bit_field(10, float, from_converter=from_speed, to_converter=to_speed, default=0, signed=False)
+ accuracy = bit_field(1, int, default=0, signed=False)
+ lon = bit_field(28, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ lat = bit_field(27, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ course = bit_field(12, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=False)
heading = bit_field(9, int, default=0, signed=False)
second = bit_field(6, int, default=0, signed=False)
reserved_2 = bit_field(2, int, default=0, signed=False)
cs = bit_field(1, bool, default=0, signed=False)
- display = bit_field(1, bool, default=0, signed=False)
- dsc = bit_field(1, bool, default=0, signed=False)
- band = bit_field(1, bool, default=0, signed=False)
- msg22 = bit_field(1, bool, default=0, signed=False)
- assigned = bit_field(1, bool, default=0, signed=False)
- raim = bit_field(1, bool, default=0, signed=False)
- radio = bit_field(20, int, default=0, signed=False)
+ display = bit_field(1, bool, default=0)
+ dsc = bit_field(1, bool, default=0)
+ band = bit_field(1, bool, default=0)
+ msg22 = bit_field(1, bool, default=0)
+ assigned = bit_field(1, bool, default=0)
+ raim = bit_field(1, bool, default=0)
+ radio = bit_field(20, int, default=0)
@attr.s(slots=True)
@@ -831,11 +840,12 @@ class MessageType19(Payload):
repeat = bit_field(2, int, default=0, signed=False)
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
reserved = bit_field(8, int, default=0)
- speed = bit_field(10, int, from_converter=from_course, to_converter=to_course, default=0, signed=False)
- accuracy = bit_field(1, int, default=0)
- lon = bit_field(28, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- course = bit_field(12, int, from_converter=from_course, to_converter=to_course, default=0, signed=False)
+
+ speed = bit_field(10, float, from_converter=from_speed, to_converter=to_speed, default=0, signed=False)
+ accuracy = bit_field(1, int, default=0, signed=False)
+ lon = bit_field(28, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ lat = bit_field(27, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ course = bit_field(12, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=False)
heading = bit_field(9, int, default=0, signed=False)
second = bit_field(6, int, default=0, signed=False)
regional = bit_field(4, int, default=0, signed=False)
@@ -849,7 +859,7 @@ class MessageType19(Payload):
epfd = bit_field(4, int, default=0, from_converter=EpfdType.from_value, to_converter=EpfdType.from_value)
raim = bit_field(1, bool, default=0)
dte = bit_field(1, bool, default=0)
- assigned = bit_field(1, int, default=0, signed=False)
+ assigned = bit_field(1, bool, default=0, signed=False)
spare = bit_field(4, int, default=0)
@@ -898,13 +908,15 @@ class MessageType21(Payload):
aid_type = bit_field(5, int, default=0, from_converter=NavAid.from_value, to_converter=NavAid.from_value,
signed=False)
name = bit_field(120, str, default='')
- accuracy = bit_field(1, bool, default=0, signed=False)
- lon = bit_field(28, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
- lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+
+ accuracy = bit_field(1, int, default=0, signed=False)
+ lon = bit_field(28, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
+ lat = bit_field(27, float, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
to_bow = bit_field(9, int, default=0, signed=False)
to_stern = bit_field(9, int, default=0, signed=False)
to_port = bit_field(6, int, default=0, signed=False)
to_starboard = bit_field(6, int, default=0, signed=False)
+
epfd = bit_field(4, int, default=0, from_converter=EpfdType.from_value, to_converter=EpfdType.from_value)
second = bit_field(6, int, default=0, signed=False)
off_position = bit_field(1, bool, default=0)
@@ -966,10 +978,11 @@ class MessageType22Broadcast(Payload):
# If the message is broadcast (addressed field is 0),
# the ne_lon, ne_lat, sw_lon, and sw_lat fields are the
# corners of a rectangular jurisdiction area over which control parameter
- ne_lon = bit_field(18, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
- ne_lat = bit_field(17, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
- sw_lon = bit_field(18, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
- sw_lat = bit_field(17, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
+ # ne_lon, ne_lat, sw_lon, and sw_lat fields are in 0.1 minutes
+ ne_lon = bit_field(18, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
+ ne_lat = bit_field(17, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
+ sw_lon = bit_field(18, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
+ sw_lat = bit_field(17, float, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
addressed = bit_field(1, bool, default=0)
band_a = bit_field(1, bool, default=0)
@@ -1015,10 +1028,10 @@ class MessageType23(Payload):
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
spare_1 = bit_field(2, int, default=0)
- ne_lon = bit_field(18, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
- ne_lat = bit_field(17, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
- sw_lon = bit_field(18, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
- sw_lat = bit_field(17, int, from_converter=from_course, to_converter=to_course, default=0, signed=True)
+ ne_lon = bit_field(18, int, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
+ ne_lat = bit_field(17, int, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
+ sw_lon = bit_field(18, int, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
+ sw_lat = bit_field(17, int, from_converter=from_10th, to_converter=to_10th, default=0, signed=True)
station_type = bit_field(4, int, default=0, from_converter=StationType.from_value,
to_converter=StationType.from_value)
@@ -1305,8 +1318,8 @@ class MessageType27(Payload):
accuracy = bit_field(1, int, default=0, signed=False)
raim = bit_field(1, bool, default=0, signed=False)
status = bit_field(4, int, default=0, from_converter=NavigationStatus, to_converter=NavigationStatus, signed=False)
- lon = bit_field(18, int, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
- lat = bit_field(17, int, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
+ lon = bit_field(18, float, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
+ lat = bit_field(17, float, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
speed = bit_field(6, int, default=0, signed=False)
course = bit_field(9, int, default=0, signed=False)
gnss = bit_field(1, int, default=0, signed=False)
diff --git a/setup.py b/setup.py
index 17825d4..b732e89 100644
--- a/setup.py
+++ b/setup.py
@@ -40,6 +40,9 @@ setuptools.setup(
"bitarray",
"attrs"
],
+ extras_require={
+ 'dev': ['nose', 'mypy', 'flake8', 'coverage', 'twine', 'sphinx']
+ },
entry_points={
"console_scripts": [
'ais-decode=pyais.main:main'
| M0r13n/pyais | e001f9472a1d7081230bef2398c037e5350e4125 | diff --git a/tests/test_decode.py b/tests/test_decode.py
index c1ac917..d2a1b6d 100644
--- a/tests/test_decode.py
+++ b/tests/test_decode.py
@@ -1,5 +1,6 @@
import textwrap
import unittest
+from pprint import pprint
from pyais import NMEAMessage, encode_dict
from pyais.ais_types import AISType
@@ -123,6 +124,12 @@ class TestAIS(unittest.TestCase):
assert decode(b"!AIVDM,1,1,,B,0S9edj0P03PecbBN`ja@0?w42cFC,0*7C").to_json()
+ def test_decode_1_speed(self):
+ content = decode(b"!AIVDM,1,1,,A,13@nePh01>PjcO4PGReoJEmL0HJg,0*67").asdict()
+
+ assert content['speed'] == 7.8
+ assert content['msg_type'] == 1
+
def test_msg_type_3(self):
msg = decode(b"!AIVDM,1,1,,A,35NSH95001G?wopE`beasVk@0E5:,0*6F").asdict()
assert msg['msg_type'] == 3
@@ -344,10 +351,9 @@ class TestAIS(unittest.TestCase):
def test_msg_type_18(self):
msg = decode(b"!AIVDM,1,1,,A,B5NJ;PP005l4ot5Isbl03wsUkP06,0*76").asdict()
- print(msg)
assert msg['msg_type'] == 18
assert msg['mmsi'] == "367430530"
- assert msg['speed'] == 0
+ assert msg['speed'] == 0.0
assert msg['accuracy'] == 0
assert round(msg['lat'], 2) == 37.79
assert round(msg['lon'], 2) == -122.27
@@ -364,6 +370,17 @@ class TestAIS(unittest.TestCase):
assert not msg['raim']
assert isinstance(msg['raim'], bool)
+ assert isinstance(msg['lat'], float)
+ assert isinstance(msg['lon'], float)
+ assert isinstance(msg['speed'], float)
+ assert isinstance(msg['course'], float)
+
+ def test_msg_type_18_speed(self):
+ msg = decode(b"!AIVDO,1,1,,A,B5NJ;PP2aUl4ot5Isbl6GwsUkP06,0*35").asdict()
+
+ assert msg['speed'] == 67.8
+ assert msg['course'] == 10.1
+
def test_msg_type_19(self):
msg = decode(b"!AIVDM,1,1,,B,C5N3SRgPEnJGEBT>NhWAwwo862PaLELTBJ:V00000000S0D:R220,0*0B").asdict()
assert msg['msg_type'] == 19
@@ -437,7 +454,7 @@ class TestAIS(unittest.TestCase):
assert msg['ne_lon'] == -7710.0
assert msg['ne_lat'] == 3300.0
assert msg['sw_lon'] == -8020.0
- assert msg['sw_lat'] == 3210
+ assert msg['sw_lat'] == 3210.0
assert msg['band_a'] == 0
assert msg['band_b'] == 0
@@ -446,6 +463,11 @@ class TestAIS(unittest.TestCase):
assert 'dest1' not in msg.keys()
assert 'dest2' not in msg.keys()
+ assert isinstance(msg['ne_lon'], float)
+ assert isinstance(msg['ne_lat'], float)
+ assert isinstance(msg['sw_lon'], float)
+ assert isinstance(msg['sw_lat'], float)
+
def test_msg_type_22_addressed(self):
# Addressed
msg = decode(b"!AIVDM,1,1,,A,F@@W>gOP00PH=JrN9l000?wB2HH;,0*44").asdict()
@@ -597,7 +619,7 @@ class TestAIS(unittest.TestCase):
self.assertEqual(content["msg_type"], 18)
self.assertEqual(content["repeat"], 0)
self.assertEqual(content["mmsi"], "1000000000")
- self.assertEqual(content["speed"], 1023)
+ self.assertEqual(content["speed"], 102.3)
self.assertEqual(content["accuracy"], 0)
self.assertEqual(str(content["lon"]), "181.0")
self.assertEqual(str(content["lat"]), "91.0")
@@ -906,3 +928,25 @@ class TestAIS(unittest.TestCase):
decoded = decode(short_msg)
self.assertEqual(decoded.mmsi, '000000001')
+
+ def test_types_for_messages(self):
+ """Make sure that the types are consistent for all messages"""
+ types = {}
+ for typ, msg in MSG_CLASS.items():
+ for field in msg.fields():
+ d_type = field.metadata['d_type']
+ f_name = field.name
+ if f_name in types:
+ if typ == 9 and f_name == 'speed' and d_type == int:
+ continue
+ if f_name == 'spare':
+ continue
+ if typ == 27 and f_name == 'speed' and d_type == int:
+ continue
+ if typ == 27 and f_name == 'course' and d_type == int:
+ continue
+ assert d_type == types[f_name], f"{typ}.{f_name}: {d_type} vs. {types[f_name]}"
+ else:
+ types[f_name] = d_type
+
+ pprint(types)
diff --git a/tests/test_encode.py b/tests/test_encode.py
index 16e4070..64c5105 100644
--- a/tests/test_encode.py
+++ b/tests/test_encode.py
@@ -517,6 +517,33 @@ def test_encode_type_19():
assert encoded[0] == "!AIVDO,1,1,,A,C5N3SRP0=nJGEBT>NhWAwwo862PaLELTBJ:V0000000000D:R220,0*46"
+def test_encode_type_18_with_speed_and_course():
+ data = {
+ 'accuracy': 0,
+ 'assigned': 0,
+ 'band': 1,
+ 'course': 10.1,
+ 'cs': 1,
+ 'display': 0,
+ 'dsc': 1,
+ 'heading': 511,
+ 'lat': 37.785035,
+ 'lon': -122.26732,
+ 'mmsi': '367430530',
+ 'msg22': 1,
+ 'radio': 917510,
+ 'raim': 0,
+ 'regional': 0,
+ 'repeat': 0,
+ 'second': 55,
+ 'speed': 67.85,
+ 'type': 18
+ }
+
+ encoded = encode_dict(data)
+ assert encoded[0] == "!AIVDO,1,1,,A,B5NJ;PP2aUl4ot5Isbl6GwsUkP06,0*35"
+
+
def test_encode_type_18():
data = {
'accuracy': 0,
@@ -953,17 +980,17 @@ def test_encode_type_1():
'raim': 0,
'repeat': 0,
'second': 59,
- 'speed': 0.0,
+ 'speed': 7.8,
'status': 3,
'turn': 0,
'type': 1
}
encoded = encode_dict(data, radio_channel="B", talker_id="AIVDM")[0]
- assert encoded == "!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C"
+ assert encoded == "!AIVDM,1,1,,B,15M67FC01>G?ufbE`FepT@3n00Sa,0*53"
encoded = encode_dict(data, radio_channel="B")[0]
- assert encoded == "!AIVDO,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5E"
+ assert encoded == "!AIVDO,1,1,,B,15M67FC01>G?ufbE`FepT@3n00Sa,0*51"
def test_mmsi_too_long():
| Speed and other fields returning unexpected values
For some fields I'm getting speed and other field values as a integer rather than a float.
Looking at this segment https://github.com/M0r13n/pyais/blob/cc47fb1bf56eaabceb1657770d986a3172bac19a/pyais/messages.py#L794-L818
I'm somewhat confused as the type given to `bit_field` is `int` for everything while properties like speed/lat/lon etc should all be floats.
Also shouldn't the defaults be `None` for missing values, or am I misinterpreting what the default property represents?
If it is meant to be 0 and not None the very least it should be 0.0 for floats and not 0
Please can you explain the use of `default` in the `bit_field` function. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_decode.py::TestAIS::test_empty_channel",
"tests/test_decode.py::TestAIS::test_msg_type_18",
"tests/test_decode.py::TestAIS::test_msg_type_18_speed",
"tests/test_decode.py::TestAIS::test_types_for_messages",
"tests/test_encode.py::test_encode_type_18_with_speed_and_course",
"tests/test_encode.py::test_encode_type_1"
] | [
"tests/test_decode.py::TestAIS::test_broken_messages",
"tests/test_decode.py::TestAIS::test_byte_stream",
"tests/test_decode.py::TestAIS::test_decode_1_speed",
"tests/test_decode.py::TestAIS::test_decode_and_merge",
"tests/test_decode.py::TestAIS::test_decode_out_of_order",
"tests/test_decode.py::TestAIS::test_decode_pos_1_2_3",
"tests/test_decode.py::TestAIS::test_issue_46_a",
"tests/test_decode.py::TestAIS::test_issue_46_b",
"tests/test_decode.py::TestAIS::test_issue_50",
"tests/test_decode.py::TestAIS::test_msg_too_short_enum_is_none",
"tests/test_decode.py::TestAIS::test_msg_type_10_a",
"tests/test_decode.py::TestAIS::test_msg_type_10_b",
"tests/test_decode.py::TestAIS::test_msg_type_11",
"tests/test_decode.py::TestAIS::test_msg_type_12_a",
"tests/test_decode.py::TestAIS::test_msg_type_12_b",
"tests/test_decode.py::TestAIS::test_msg_type_13",
"tests/test_decode.py::TestAIS::test_msg_type_14",
"tests/test_decode.py::TestAIS::test_msg_type_15_a",
"tests/test_decode.py::TestAIS::test_msg_type_15_b",
"tests/test_decode.py::TestAIS::test_msg_type_16",
"tests/test_decode.py::TestAIS::test_msg_type_17_a",
"tests/test_decode.py::TestAIS::test_msg_type_17_b",
"tests/test_decode.py::TestAIS::test_msg_type_19",
"tests/test_decode.py::TestAIS::test_msg_type_1_a",
"tests/test_decode.py::TestAIS::test_msg_type_1_b",
"tests/test_decode.py::TestAIS::test_msg_type_1_c",
"tests/test_decode.py::TestAIS::test_msg_type_20",
"tests/test_decode.py::TestAIS::test_msg_type_21",
"tests/test_decode.py::TestAIS::test_msg_type_22_addressed",
"tests/test_decode.py::TestAIS::test_msg_type_22_broadcast",
"tests/test_decode.py::TestAIS::test_msg_type_23",
"tests/test_decode.py::TestAIS::test_msg_type_24",
"tests/test_decode.py::TestAIS::test_msg_type_25_a",
"tests/test_decode.py::TestAIS::test_msg_type_25_b",
"tests/test_decode.py::TestAIS::test_msg_type_25_c",
"tests/test_decode.py::TestAIS::test_msg_type_26_a",
"tests/test_decode.py::TestAIS::test_msg_type_26_b",
"tests/test_decode.py::TestAIS::test_msg_type_27",
"tests/test_decode.py::TestAIS::test_msg_type_3",
"tests/test_decode.py::TestAIS::test_msg_type_4_a",
"tests/test_decode.py::TestAIS::test_msg_type_4_b",
"tests/test_decode.py::TestAIS::test_msg_type_5",
"tests/test_decode.py::TestAIS::test_msg_type_6",
"tests/test_decode.py::TestAIS::test_msg_type_7",
"tests/test_decode.py::TestAIS::test_msg_type_8",
"tests/test_decode.py::TestAIS::test_msg_type_9",
"tests/test_decode.py::TestAIS::test_msg_with_more_that_82_chars_payload",
"tests/test_decode.py::TestAIS::test_multiline_message",
"tests/test_decode.py::TestAIS::test_nmea_decode",
"tests/test_decode.py::TestAIS::test_nmea_decode_unknown_msg",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_creation",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_decoding",
"tests/test_decode.py::TestAIS::test_none_values_converter_for_all_messages",
"tests/test_decode.py::TestAIS::test_to_dict_non_enum",
"tests/test_decode.py::TestAIS::test_to_json",
"tests/test_decode.py::TestAIS::test_type_22_very_short",
"tests/test_decode.py::TestAIS::test_type_25_very_short",
"tests/test_decode.py::TestAIS::test_type_26_very_short",
"tests/test_encode.py::test_widths",
"tests/test_encode.py::test_variable_message_length_width",
"tests/test_encode.py::test_encode_msg_table",
"tests/test_encode.py::test_invalid_talker_id",
"tests/test_encode.py::test_encode_payload_invalid_talker_id",
"tests/test_encode.py::test_invalid_radio_channel",
"tests/test_encode.py::test_encode_payload_error_radio",
"tests/test_encode.py::test_data_to_payload",
"tests/test_encode.py::test_get_ais_type",
"tests/test_encode.py::test_str_to_bin",
"tests/test_encode.py::test_int_to_bin",
"tests/test_encode.py::test_encode_type_27",
"tests/test_encode.py::test_encode_type_26",
"tests/test_encode.py::test_encode_type_25_b",
"tests/test_encode.py::test_encode_type_25_a",
"tests/test_encode.py::test_encode_type_24_partno_invalid",
"tests/test_encode.py::test_encode_type_24_a",
"tests/test_encode.py::test_encode_type_24_b",
"tests/test_encode.py::test_encode_type_23",
"tests/test_encode.py::test_encode_type_22_b",
"tests/test_encode.py::test_encode_type_22_a",
"tests/test_encode.py::test_encode_type_21",
"tests/test_encode.py::test_encode_type_20",
"tests/test_encode.py::test_encode_type_19",
"tests/test_encode.py::test_encode_type_18",
"tests/test_encode.py::test_encode_type_17_b",
"tests/test_encode.py::test_encode_type_17_a",
"tests/test_encode.py::test_encode_type_16",
"tests/test_encode.py::test_encode_type_15_a",
"tests/test_encode.py::test_encode_type_15",
"tests/test_encode.py::test_encode_type_14",
"tests/test_encode.py::test_encode_type_13",
"tests/test_encode.py::test_encode_type_12",
"tests/test_encode.py::test_encode_type_11",
"tests/test_encode.py::test_encode_type_10",
"tests/test_encode.py::test_encode_type_9",
"tests/test_encode.py::test_encode_type_8",
"tests/test_encode.py::test_encode_type_7",
"tests/test_encode.py::test_encode_type_6_bytes",
"tests/test_encode.py::test_encode_type_6",
"tests/test_encode.py::test_encode_type_4",
"tests/test_encode.py::test_encode_type_5_issue_59",
"tests/test_encode.py::test_encode_type_5",
"tests/test_encode.py::test_encode_type_5_default",
"tests/test_encode.py::test_encode_msg_type2",
"tests/test_encode.py::test_encode_msg_type_3",
"tests/test_encode.py::test_encode_type_1_default",
"tests/test_encode.py::test_mmsi_too_long",
"tests/test_encode.py::test_lon_too_large",
"tests/test_encode.py::test_ship_name_too_lon",
"tests/test_encode.py::test_int_to_bytes",
"tests/test_encode.py::test_to_six_bit",
"tests/test_encode.py::test_encode_ascii_6_bit"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-03-20T11:53:24Z" | mit |
|
M0r13n__pyais-60 | diff --git a/pyais/messages.py b/pyais/messages.py
index 3cdd34d..6154e5a 100644
--- a/pyais/messages.py
+++ b/pyais/messages.py
@@ -11,7 +11,7 @@ from pyais.constants import TalkerID, NavigationStatus, ManeuverIndicator, EpfdT
from pyais.exceptions import InvalidNMEAMessageException, UnknownMessageException, UnknownPartNoException, \
InvalidDataTypeException
from pyais.util import decode_into_bit_array, compute_checksum, int_to_bin, str_to_bin, \
- encode_ascii_6, from_bytes, int_to_bytes, from_bytes_signed, decode_bin_as_ascii6, get_int
+ encode_ascii_6, from_bytes, int_to_bytes, from_bytes_signed, decode_bin_as_ascii6, get_int, chk_to_int
NMEA_VALUE = typing.Union[str, float, int, bool, bytes]
@@ -195,10 +195,12 @@ class NMEAMessage(object):
self.channel: str = channel.decode('ascii')
# Decoded message payload as byte string
self.payload: bytes = payload
+
+ fill, check = chk_to_int(checksum)
# Fill bits (0 to 5)
- self.fill_bits: int = int(chr(checksum[0]))
+ self.fill_bits: int = fill
# Message Checksum (hex value)
- self.checksum = int(checksum[2:], 16)
+ self.checksum = check
# Finally decode bytes into bits
self.bit_array: bitarray = decode_into_bit_array(self.payload, self.fill_bits)
diff --git a/pyais/util.py b/pyais/util.py
index 846c89d..06147bc 100644
--- a/pyais/util.py
+++ b/pyais/util.py
@@ -229,3 +229,20 @@ def str_to_bin(val: str, width: int) -> bitarray:
out += bitarray(txt)
return out
+
+
+def chk_to_int(chk_str: bytes) -> typing.Tuple[int, int]:
+ """
+ Converts a checksum string to a tuple of (fillbits, checksum).
+ >>> chk_to_int(b"0*1B")
+ (0, 27)
+ """
+ if not len(chk_str):
+ return 0, -1
+
+ fill_bits: int = int(chr(chk_str[0]))
+ try:
+ checksum = int(chk_str[2:], 16)
+ except (IndexError, ValueError):
+ checksum = -1
+ return fill_bits, checksum
| M0r13n/pyais | e833f46cbaa157aa022113c10a5630c002104459 | diff --git a/tests/test_nmea.py b/tests/test_nmea.py
index bacc0fb..e274ed4 100644
--- a/tests/test_nmea.py
+++ b/tests/test_nmea.py
@@ -5,6 +5,7 @@ from bitarray import bitarray
from pyais.exceptions import InvalidNMEAMessageException
from pyais.messages import NMEAMessage
+from pyais.util import chk_to_int
class TestNMEA(unittest.TestCase):
@@ -173,3 +174,25 @@ class TestNMEA(unittest.TestCase):
with self.assertRaises(TypeError):
_ = msg[1:3]
+
+ def test_missing_checksum(self):
+ msg = b"!AIVDM,1,1,,A,100u3FP04r28t0<WcshcQI<H0H79,0"
+ NMEAMessage(msg)
+
+ def test_chk_to_int_with_valid_checksum(self):
+ self.assertEqual(chk_to_int(b"0*1B"), (0, 27))
+ self.assertEqual(chk_to_int(b"0*FF"), (0, 255))
+ self.assertEqual(chk_to_int(b"0*00"), (0, 0))
+
+ def test_chk_to_int_with_fill_bits(self):
+ self.assertEqual(chk_to_int(b"1*1B"), (1, 27))
+ self.assertEqual(chk_to_int(b"5*1B"), (5, 27))
+
+ def test_chk_to_int_with_missing_checksum(self):
+ self.assertEqual(chk_to_int(b"1"), (1, -1))
+ self.assertEqual(chk_to_int(b"5*"), (5, -1))
+
+ def test_chk_to_int_with_missing_fill_bits(self):
+ self.assertEqual(chk_to_int(b""), (0, -1))
+ with self.assertRaises(ValueError):
+ self.assertEqual(chk_to_int(b"*1B"), (0, 24))
| ValueError: invalid literal for int() with base 16: b''
Maybe this is not as intended?
I am parsing a file of all sorts of bad data and decoding it; I handle the exceptions; however this seems like a bad way to handle a missing checksum?
!AIVDM,1,1,,A,100u3FP04r28t0<WcshcQI<H0H79,0
`Traceback (most recent call last):
File "pyais_test.py", line 14, in <module>
decoded = pyais.decode(i)
File "c:\Python38\lib\site-packages\pyais\decode.py", line 34, in decode
nmea = _assemble_messages(*parts)
File "c:\Python38\lib\site-packages\pyais\decode.py", line 13, in _assemble_messages
nmea = NMEAMessage(msg)
File "c:\Python38\lib\site-packages\pyais\messages.py", line 201, in __init__
self.checksum = int(checksum[2:], 16)
ValueError: invalid literal for int() with base 16: b''`
All the best | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_nmea.py::TestNMEA::test_attrs",
"tests/test_nmea.py::TestNMEA::test_chk_to_int_with_fill_bits",
"tests/test_nmea.py::TestNMEA::test_chk_to_int_with_missing_checksum",
"tests/test_nmea.py::TestNMEA::test_chk_to_int_with_missing_fill_bits",
"tests/test_nmea.py::TestNMEA::test_chk_to_int_with_valid_checksum",
"tests/test_nmea.py::TestNMEA::test_dict",
"tests/test_nmea.py::TestNMEA::test_from_bytes",
"tests/test_nmea.py::TestNMEA::test_from_str",
"tests/test_nmea.py::TestNMEA::test_get_item",
"tests/test_nmea.py::TestNMEA::test_get_item_raises_key_error",
"tests/test_nmea.py::TestNMEA::test_get_item_raises_type_error",
"tests/test_nmea.py::TestNMEA::test_message_assembling",
"tests/test_nmea.py::TestNMEA::test_message_eq_method",
"tests/test_nmea.py::TestNMEA::test_missing_checksum",
"tests/test_nmea.py::TestNMEA::test_single",
"tests/test_nmea.py::TestNMEA::test_talker",
"tests/test_nmea.py::TestNMEA::test_type",
"tests/test_nmea.py::TestNMEA::test_validity",
"tests/test_nmea.py::TestNMEA::test_values",
"tests/test_nmea.py::TestNMEA::test_wrong_type"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2022-03-26T12:45:17Z" | mit |
|
M0r13n__pyais-67 | diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index a76ed59..93ccff4 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -1,6 +1,14 @@
====================
pyais CHANGELOG
====================
+-------------------------------------------------------------------------------
+ Version 2.1.1 24 Apr 2022
+-------------------------------------------------------------------------------
+* Closes https://github.com/M0r13n/pyais/issues/65
+ * makes lat and lon signed for type 27 messages
+* drops nosetest as a development dependency
+
+
-------------------------------------------------------------------------------
Version 2.1.0 14 Apr 2022
-------------------------------------------------------------------------------
diff --git a/pyais/__init__.py b/pyais/__init__.py
index 47ae7a0..ee21463 100644
--- a/pyais/__init__.py
+++ b/pyais/__init__.py
@@ -4,7 +4,7 @@ from pyais.encode import encode_dict, encode_msg, ais_to_nmea_0183
from pyais.decode import decode
__license__ = 'MIT'
-__version__ = '2.1.0'
+__version__ = '2.1.1'
__author__ = 'Leon Morten Richter'
__all__ = (
diff --git a/pyais/messages.py b/pyais/messages.py
index 03233c5..a370668 100644
--- a/pyais/messages.py
+++ b/pyais/messages.py
@@ -1359,8 +1359,8 @@ class MessageType27(Payload):
accuracy = bit_field(1, bool, default=0, signed=False)
raim = bit_field(1, bool, default=0, signed=False)
status = bit_field(4, int, default=0, from_converter=NavigationStatus, to_converter=NavigationStatus, signed=False)
- lon = bit_field(18, float, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
- lat = bit_field(17, float, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
+ lon = bit_field(18, float, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0, signed=True)
+ lat = bit_field(17, float, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0, signed=True)
speed = bit_field(6, float, default=0, signed=False)
course = bit_field(9, float, default=0, signed=False)
gnss = bit_field(1, bool, default=0, signed=False)
| M0r13n/pyais | 57b1f8f880e97475775d75b9ae94e89a71759a43 | diff --git a/tests/test_decode.py b/tests/test_decode.py
index 196f458..1f670b2 100644
--- a/tests/test_decode.py
+++ b/tests/test_decode.py
@@ -8,15 +8,25 @@ from pprint import pprint
from pyais import NMEAMessage, encode_dict
from pyais.ais_types import AISType
-from pyais.constants import ManeuverIndicator, NavigationStatus, ShipType, NavAid, EpfdType, StationType, TransmitMode
+from pyais.constants import (EpfdType, ManeuverIndicator, NavAid,
+ NavigationStatus, ShipType, StationType,
+ TransmitMode)
from pyais.decode import decode
from pyais.exceptions import UnknownMessageException
-from pyais.messages import MessageType18, MessageType5, MessageType6, MSG_CLASS, MessageType24PartA, MessageType24PartB, \
- MessageType25AddressedStructured, MessageType25BroadcastStructured, MessageType25AddressedUnstructured, \
- MessageType25BroadcastUnstructured, MessageType26AddressedStructured, MessageType26BroadcastStructured, \
- MessageType26BroadcastUnstructured, MessageType22Addressed, MessageType22Broadcast, to_turn, from_turn
+from pyais.messages import (MSG_CLASS, MessageType5, MessageType6,
+ MessageType18, MessageType22Addressed,
+ MessageType22Broadcast, MessageType24PartA,
+ MessageType24PartB,
+ MessageType25AddressedStructured,
+ MessageType25AddressedUnstructured,
+ MessageType25BroadcastStructured,
+ MessageType25BroadcastUnstructured,
+ MessageType26AddressedStructured,
+ MessageType26BroadcastStructured,
+ MessageType26BroadcastUnstructured, from_turn,
+ to_turn)
from pyais.stream import ByteStream
-from pyais.util import bytes2bits, bits2bytes, b64encode_str
+from pyais.util import b64encode_str, bits2bytes, bytes2bits
def ensure_type_for_msg_dict(msg_dict: typing.Dict[str, typing.Any]) -> None:
@@ -679,6 +689,12 @@ class TestAIS(unittest.TestCase):
ensure_type_for_msg_dict(msg)
+ def test_msg_type_27_signed(self):
+ msg = decode('!AIVDO,1,1,,A,K01;FQh?PbtE3P00,0*75').asdict()
+ assert msg['mmsi'] == 1234567
+ assert msg['lon'] == -13.368333
+ assert msg['lat'] == -50.121667
+
def test_broken_messages(self):
# Undefined epfd
assert decode(b"!AIVDM,1,1,,B,4>O7m7Iu@<9qUfbtm`vSnwvH20S8,0*46").asdict()['epfd'] == EpfdType.Undefined
| Messagetype27 lat/long should be signed
https://github.com/M0r13n/pyais/blob/21da419607a512256e3cfe9abc852738aa8c2599/pyais/messages.py#L1362-L1363
Discovered this while testing, seemed my numbers were off by half and never negative. Doing more testing today. I'm no ais expert but it seems like a bug. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_decode.py::TestAIS::test_msg_type_27_signed"
] | [
"tests/test_decode.py::TestAIS::test_b64encode_str",
"tests/test_decode.py::TestAIS::test_b64encode_str_empty",
"tests/test_decode.py::TestAIS::test_bits2bytes",
"tests/test_decode.py::TestAIS::test_broken_messages",
"tests/test_decode.py::TestAIS::test_byte_stream",
"tests/test_decode.py::TestAIS::test_bytes2bits",
"tests/test_decode.py::TestAIS::test_decode_1_speed",
"tests/test_decode.py::TestAIS::test_decode_and_merge",
"tests/test_decode.py::TestAIS::test_decode_out_of_order",
"tests/test_decode.py::TestAIS::test_decode_pos_1_2_3",
"tests/test_decode.py::TestAIS::test_empty_channel",
"tests/test_decode.py::TestAIS::test_issue_46_a",
"tests/test_decode.py::TestAIS::test_issue_46_b",
"tests/test_decode.py::TestAIS::test_issue_50",
"tests/test_decode.py::TestAIS::test_msg_too_short_enum_is_none",
"tests/test_decode.py::TestAIS::test_msg_type",
"tests/test_decode.py::TestAIS::test_msg_type_10_a",
"tests/test_decode.py::TestAIS::test_msg_type_10_b",
"tests/test_decode.py::TestAIS::test_msg_type_11",
"tests/test_decode.py::TestAIS::test_msg_type_12_a",
"tests/test_decode.py::TestAIS::test_msg_type_12_b",
"tests/test_decode.py::TestAIS::test_msg_type_13",
"tests/test_decode.py::TestAIS::test_msg_type_14",
"tests/test_decode.py::TestAIS::test_msg_type_15_a",
"tests/test_decode.py::TestAIS::test_msg_type_15_b",
"tests/test_decode.py::TestAIS::test_msg_type_16",
"tests/test_decode.py::TestAIS::test_msg_type_17_a",
"tests/test_decode.py::TestAIS::test_msg_type_17_b",
"tests/test_decode.py::TestAIS::test_msg_type_17_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_18",
"tests/test_decode.py::TestAIS::test_msg_type_18_speed",
"tests/test_decode.py::TestAIS::test_msg_type_19",
"tests/test_decode.py::TestAIS::test_msg_type_1_a",
"tests/test_decode.py::TestAIS::test_msg_type_1_b",
"tests/test_decode.py::TestAIS::test_msg_type_1_c",
"tests/test_decode.py::TestAIS::test_msg_type_20",
"tests/test_decode.py::TestAIS::test_msg_type_21",
"tests/test_decode.py::TestAIS::test_msg_type_22_addressed",
"tests/test_decode.py::TestAIS::test_msg_type_22_broadcast",
"tests/test_decode.py::TestAIS::test_msg_type_23",
"tests/test_decode.py::TestAIS::test_msg_type_24",
"tests/test_decode.py::TestAIS::test_msg_type_25_a",
"tests/test_decode.py::TestAIS::test_msg_type_25_b",
"tests/test_decode.py::TestAIS::test_msg_type_25_c",
"tests/test_decode.py::TestAIS::test_msg_type_25_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_26_a",
"tests/test_decode.py::TestAIS::test_msg_type_26_b",
"tests/test_decode.py::TestAIS::test_msg_type_26_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_27",
"tests/test_decode.py::TestAIS::test_msg_type_3",
"tests/test_decode.py::TestAIS::test_msg_type_4_a",
"tests/test_decode.py::TestAIS::test_msg_type_4_b",
"tests/test_decode.py::TestAIS::test_msg_type_5",
"tests/test_decode.py::TestAIS::test_msg_type_6",
"tests/test_decode.py::TestAIS::test_msg_type_6_json_reverse",
"tests/test_decode.py::TestAIS::test_msg_type_6_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_7",
"tests/test_decode.py::TestAIS::test_msg_type_8",
"tests/test_decode.py::TestAIS::test_msg_type_8_to_json",
"tests/test_decode.py::TestAIS::test_msg_type_9",
"tests/test_decode.py::TestAIS::test_msg_with_more_that_82_chars_payload",
"tests/test_decode.py::TestAIS::test_multiline_message",
"tests/test_decode.py::TestAIS::test_nmea_decode",
"tests/test_decode.py::TestAIS::test_nmea_decode_unknown_msg",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_creation",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_decoding",
"tests/test_decode.py::TestAIS::test_none_values_converter_for_all_messages",
"tests/test_decode.py::TestAIS::test_rot_decode_yields_expected_values",
"tests/test_decode.py::TestAIS::test_rot_encode_yields_expected_values",
"tests/test_decode.py::TestAIS::test_to_dict_non_enum",
"tests/test_decode.py::TestAIS::test_to_json",
"tests/test_decode.py::TestAIS::test_turn_is_none_for_127_or_128",
"tests/test_decode.py::TestAIS::test_type_22_very_short",
"tests/test_decode.py::TestAIS::test_type_25_very_short",
"tests/test_decode.py::TestAIS::test_type_26_very_short",
"tests/test_decode.py::TestAIS::test_types_for_messages"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2022-04-24T12:16:39Z" | mit |
|
M3t0r__tpl-12 | diff --git a/tpl/__init__.py b/tpl/__init__.py
index 96b6c41..32297a0 100644
--- a/tpl/__init__.py
+++ b/tpl/__init__.py
@@ -61,15 +61,19 @@ def main(*args):
for data in loaded_data:
collated_data = merge_data(collated_data, data)
+ # set up Jinja2 environment
+ j_env = jinja2.Environment(
+ keep_trailing_newline=True
+ )
+
# create template
with open_file(arguments[0]) as template_stream:
- template = jinja2.Template(template_stream.read())
+ template = j_env.from_string(template_stream.read())
template.filename = arguments[0]
# and render to output
with open_file(arguments[1], "w") as output:
template.stream(collated_data).dump(output)
- output.write("\n") # does the template eat this or the dump call?
return os.EX_OK
| M3t0r/tpl | b534fa59fb808b10031869fe51f5e6382a1055dd | diff --git a/tests/cli/test_faulty_invocations.py b/tests/cli/test_faulty_invocations.py
index d81fbd8..d538ec6 100644
--- a/tests/cli/test_faulty_invocations.py
+++ b/tests/cli/test_faulty_invocations.py
@@ -9,7 +9,7 @@ def test_key_does_not_exist(cli):
cli.path_for_content("{{FOO}}"),
env={}
)
- assert p.stdout == "\n"
+ assert p.stdout == ""
def test_corrupt_yaml(cli):
diff --git a/tests/cli/test_standard_usecases.py b/tests/cli/test_standard_usecases.py
index c3c9426..90d0c68 100644
--- a/tests/cli/test_standard_usecases.py
+++ b/tests/cli/test_standard_usecases.py
@@ -3,12 +3,12 @@ from . import cli
def test_source_environment(cli):
p = cli("-e", cli.path_for_content("{{FOO}}"), env={"FOO": "bar"})
- assert p.stdout == "bar\n"
+ assert p.stdout == "bar"
def test_unicode_var(cli):
p = cli("-e", cli.path_for_content("{{FOO}}"), env={"FOO": "🐍"})
- assert p.stdout == "🐍\n"
+ assert p.stdout == "🐍"
def test_shadowing_json_env(cli):
@@ -18,7 +18,7 @@ def test_shadowing_json_env(cli):
cli.path_for_content("{{FOO}}"),
env={"FOO": "env"}
)
- assert p.stdout == "env\n"
+ assert p.stdout == "env"
def test_shadowing_yaml_env(cli):
@@ -28,7 +28,7 @@ def test_shadowing_yaml_env(cli):
cli.path_for_content("{{FOO}}"),
env={"FOO": "env"}
)
- assert p.stdout == "env\n"
+ assert p.stdout == "env"
def test_yaml_flow_style(cli):
@@ -36,7 +36,7 @@ def test_yaml_flow_style(cli):
"--yaml", cli.path_for_content('{"FOO": "yaml"}'),
cli.path_for_content("{{FOO}}")
)
- assert p.stdout == "yaml\n"
+ assert p.stdout == "yaml"
def test_environment_by_default(cli):
@@ -44,7 +44,7 @@ def test_environment_by_default(cli):
cli.path_for_content("{{FOO}}"),
env={"FOO": "bar"}
)
- assert p.stdout == "bar\n"
+ assert p.stdout == "bar"
def test_sub_dict_shadowing(cli):
@@ -53,7 +53,7 @@ def test_sub_dict_shadowing(cli):
"--json", cli.path_for_json({"FOO": {"BAR": "second"}}),
cli.path_for_content("{{FOO['BAR']}}")
)
- assert p.stdout == "second\n"
+ assert p.stdout == "second"
def test_sub_dict_merging(cli):
@@ -62,7 +62,7 @@ def test_sub_dict_merging(cli):
"--json", cli.path_for_json({"merge": {"BAR": "bar"}}),
cli.path_for_content("{{merge['FOO']}}{{merge['BAR']}}")
)
- assert p.stdout == "foobar\n"
+ assert p.stdout == "foobar"
def test_second_sub_dict_shadowing(cli):
@@ -71,7 +71,7 @@ def test_second_sub_dict_shadowing(cli):
"--json", cli.path_for_json({"merge": {"deeper": {"overwritten": "bar"}}}),
cli.path_for_content("{{merge.deeper.overwritten}}")
)
- assert p.stdout == "bar\n"
+ assert p.stdout == "bar"
def test_second_sub_dict_merging(cli):
@@ -80,7 +80,7 @@ def test_second_sub_dict_merging(cli):
"--json", cli.path_for_json({"merge": {"deeper": {"BAR": "bar"}}}),
cli.path_for_content("{{merge.deeper.FOO}}{{merge.deeper.BAR}}")
)
- assert p.stdout == "foobar\n"
+ assert p.stdout == "foobar"
def test_shadowing_of_dict(cli):
@@ -89,4 +89,29 @@ def test_shadowing_of_dict(cli):
"--json", cli.path_for_json({"merge": 'bar'}),
cli.path_for_content("{{merge}}")
)
+ assert p.stdout == "bar"
+
+
+def test_keep_no_newline_at_end(cli):
+ p = cli(cli.path_for_content("{{FOO}}"), env={"FOO": "bar"})
+ assert p.stdout == "bar"
+
+
+def test_keep_one_newline_at_end(cli):
+ p = cli(cli.path_for_content("{{FOO}}\n"), env={"FOO": "bar"})
assert p.stdout == "bar\n"
+
+
+def test_keep_two_newlines_at_end(cli):
+ p = cli(cli.path_for_content("{{FOO}}\n\n"), env={"FOO": "bar"})
+ assert p.stdout == "bar\n\n"
+
+
+def test_keep_one_newline_at_beginning(cli):
+ p = cli(cli.path_for_content("\n{{FOO}}"), env={"FOO": "bar"})
+ assert p.stdout == "\nbar"
+
+
+def test_keep_two_newlines_at_beginning(cli):
+ p = cli(cli.path_for_content("\n\n{{FOO}}"), env={"FOO": "bar"})
+ assert p.stdout == "\n\nbar"
| Fix trailing newline issue
In https://github.com/M3t0r/tpl/blob/feceeed182f1c2553b827d8f431f6be800204250/tpl/__init__.py#L72 `tpl` always adds a trailing newline instead of respecting what the template has. I added this to have nicer output on the command line, but it turns out Jinja has a setting if it should keep the last newline, if any. It's called `keep_trailing_newline` and is part of the [`jinja2.Environment`](http://jinja.pocoo.org/docs/2.10/api/#jinja2.Environment).
We should add tests to see if `tpl` only prints a newline if one is actually present in the template. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/cli/test_faulty_invocations.py::test_key_does_not_exist",
"tests/cli/test_standard_usecases.py::test_source_environment",
"tests/cli/test_standard_usecases.py::test_unicode_var",
"tests/cli/test_standard_usecases.py::test_shadowing_json_env",
"tests/cli/test_standard_usecases.py::test_shadowing_yaml_env",
"tests/cli/test_standard_usecases.py::test_yaml_flow_style",
"tests/cli/test_standard_usecases.py::test_environment_by_default",
"tests/cli/test_standard_usecases.py::test_sub_dict_shadowing",
"tests/cli/test_standard_usecases.py::test_sub_dict_merging",
"tests/cli/test_standard_usecases.py::test_second_sub_dict_shadowing",
"tests/cli/test_standard_usecases.py::test_second_sub_dict_merging",
"tests/cli/test_standard_usecases.py::test_shadowing_of_dict",
"tests/cli/test_standard_usecases.py::test_keep_no_newline_at_end",
"tests/cli/test_standard_usecases.py::test_keep_one_newline_at_beginning",
"tests/cli/test_standard_usecases.py::test_keep_two_newlines_at_beginning"
] | [
"tests/cli/test_faulty_invocations.py::test_corrupt_yaml",
"tests/cli/test_faulty_invocations.py::test_corrupt_json",
"tests/cli/test_standard_usecases.py::test_keep_one_newline_at_end",
"tests/cli/test_standard_usecases.py::test_keep_two_newlines_at_end"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2018-10-12T19:00:06Z" | mit |
|
MGough__sensorhub-2 | diff --git a/sensorhub/hub.py b/sensorhub/hub.py
index c5b0f31..d1c72d8 100644
--- a/sensorhub/hub.py
+++ b/sensorhub/hub.py
@@ -29,13 +29,13 @@ class StatusRegisterErrorCode(Enum):
class SensorHub:
- bus: SMBus
+ _bus: SMBus
- def __init__(self):
- self.bus = SMBus(DEVICE_BUS)
+ def __init__(self, system_management_bus: SMBus):
+ self._bus = system_management_bus or SMBus(DEVICE_BUS)
def _read_sensor_board_register(self, buffer: SensorRegister) -> int:
- return self.bus.read_byte_data(DEVICE_ADDR, buffer.value)
+ return self._bus.read_byte_data(DEVICE_ADDR, buffer.value)
def _get_error_codes(self) -> int:
"""
diff --git a/setup.py b/setup.py
index 581900e..5b1420c 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="sensorhub",
- version="1.0.3",
+ version="2.0.0",
author="Merlin Gough",
author_email="[email protected]",
description="A simple library to use with the DockerPi SensorHub (EP-0106)",
@@ -14,7 +14,7 @@ setuptools.setup(
url="https://github.com/MGough/sensorhub",
packages=["sensorhub"],
install_requires=["smbus2>=0.3.0"],
- tests_require=["pytest>=5.3.5"],
+ tests_require=["pytest>=5.4.3"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
| MGough/sensorhub | 6b871396bd96541a55a07789f62ba5ab6dbf2ae5 | diff --git a/test/test_hub.py b/test/test_hub.py
index 03e45a2..4d639e6 100644
--- a/test/test_hub.py
+++ b/test/test_hub.py
@@ -1,11 +1,9 @@
from pytest import fixture, raises, mark
-from unittest.mock import patch, call
+from unittest.mock import call, Mock
-from sensorhub.hub import SensorHub, SensorRegister
+from smbus2 import SMBus
-@fixture
-def device_bus():
- return 1
+from sensorhub.hub import SensorHub, SensorRegister
@fixture
@@ -14,17 +12,13 @@ def device_address():
@fixture
-def sensor_hub():
- with patch("sensorhub.hub.SMBus", autospec=True):
- return SensorHub()
-
+def bus():
+ return Mock(SMBus, autospec=True)
-def test_correct_bus_is_created(device_bus):
- with patch("sensorhub.hub.SMBus", autospec=True) as bus:
- sensor_hub = SensorHub()
- bus.assert_called_once_with(device_bus)
- assert sensor_hub.bus == bus()
+@fixture
+def sensor_hub(bus):
+ return SensorHub(bus)
@mark.parametrize("error_code", [
@@ -33,12 +27,12 @@ def test_correct_bus_is_created(device_bus):
0b1001,
0b1101
])
-def test_off_board_temperature_out_of_range_returns_minus_1(error_code, sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = 1
+def test_off_board_temperature_out_of_range_returns_minus_1(error_code, sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = 1
temperature = sensor_hub.get_off_board_temperature()
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
assert temperature == -1
@@ -48,13 +42,13 @@ def test_off_board_temperature_out_of_range_returns_minus_1(error_code, sensor_h
0b1110,
0b1010
])
-def test_off_board_temperature_sensor_io_error(error_code, sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = error_code
+def test_off_board_temperature_sensor_io_error(error_code, sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = error_code
with raises(IOError, match="Sensor Missing"):
sensor_hub.get_off_board_temperature()
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
@mark.parametrize("error_code", [
@@ -63,67 +57,72 @@ def test_off_board_temperature_sensor_io_error(error_code, sensor_hub, device_ad
0b1000, # brightness sensor error
0b1100 # brightness out of range AND sensor error (just in case...)
])
-def test_off_board_temperature_sensor_returns_temperature(error_code, sensor_hub, device_address):
+def test_off_board_temperature_sensor_returns_temperature(error_code, sensor_hub, bus, device_address):
expected_temperature = 9001
- sensor_hub.bus.read_byte_data.side_effect = [error_code, 9001]
+ bus.read_byte_data.side_effect = [error_code, 9001]
temperature = sensor_hub.get_off_board_temperature()
assert temperature == expected_temperature
- sensor_hub.bus.read_byte_data.assert_has_calls([
+ bus.read_byte_data.assert_has_calls([
call(device_address, SensorRegister.STATUS.value),
call(device_address, SensorRegister.OFF_BOARD_TEMPERATURE.value)
])
-def test_humidity_is_not_up_to_date(sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = 1
+def test_humidity_is_not_up_to_date(sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = 1
humidity = sensor_hub.get_humidity()
assert humidity == -1
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value)
-def test_humidity_returned_when_it_is_up_to_date(sensor_hub, device_address):
+def test_humidity_returned_when_it_is_up_to_date(sensor_hub, bus, device_address):
expected_humidity = 33
- sensor_hub.bus.read_byte_data.side_effect = [0, expected_humidity]
+ bus.read_byte_data.side_effect = [0, expected_humidity]
humidity = sensor_hub.get_humidity()
assert humidity == expected_humidity
- sensor_hub.bus.read_byte_data.assert_has_calls([call(device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value),
- call(device_address, SensorRegister.ON_BOARD_HUMIDITY.value)])
+ bus.read_byte_data.assert_has_calls([
+ call(device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value),
+ call(device_address, SensorRegister.ON_BOARD_HUMIDITY.value)
+ ])
-def test_on_board_temperature_is_not_up_to_date(sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = 1
+def test_on_board_temperature_is_not_up_to_date(sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = 1
temperature = sensor_hub.get_temperature()
assert temperature == -1
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value)
+ bus.read_byte_data.assert_called_once_with(
+ device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value
+ )
-def test_on_board_temperature_returned_when_it_is_up_to_date(sensor_hub, device_address):
+def test_on_board_temperature_returned_when_it_is_up_to_date(sensor_hub, bus, device_address):
expected_temperature = 33
- sensor_hub.bus.read_byte_data.side_effect = [0, expected_temperature]
+ bus.read_byte_data.side_effect = [0, expected_temperature]
temperature = sensor_hub.get_temperature()
assert temperature == expected_temperature
- sensor_hub.bus.read_byte_data.assert_has_calls(
- [call(device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value),
- call(device_address, SensorRegister.ON_BOARD_TEMPERATURE.value)])
+ bus.read_byte_data.assert_has_calls([
+ call(device_address, SensorRegister.ON_BOARD_SENSOR_OUT_OF_DATE.value),
+ call(device_address, SensorRegister.ON_BOARD_TEMPERATURE.value)
+ ])
@mark.parametrize("motion_detected, register_reading", [(True, 1), (False, 0)])
-def test_motion_detection(motion_detected, register_reading, sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = register_reading
+def test_motion_detection(motion_detected, register_reading, sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = register_reading
assert sensor_hub.is_motion_detected() is motion_detected
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.MOTION.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.MOTION.value)
@mark.parametrize("error_code", [
@@ -132,13 +131,13 @@ def test_motion_detection(motion_detected, register_reading, sensor_hub, device_
0b1001,
0b1011
])
-def test_brightness_sensor_error(error_code, sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = 1000
+def test_brightness_sensor_error(error_code, sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = 1000
with raises(IOError, match="Error accessing light sensor"):
sensor_hub.get_brightness()
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
@mark.parametrize("error_code", [
@@ -147,12 +146,12 @@ def test_brightness_sensor_error(error_code, sensor_hub, device_address):
0b0111,
0b0101
])
-def test_brightness_out_of_range_returns_minus_1(error_code, sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = 100
+def test_brightness_out_of_range_returns_minus_1(error_code, sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = 100
brightness = sensor_hub.get_brightness()
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.STATUS.value)
assert brightness == -1
@@ -162,56 +161,56 @@ def test_brightness_out_of_range_returns_minus_1(error_code, sensor_hub, device_
0b0010, # temperature sensor error
0b0011 # temperature out of range AND sensor error (just in case...)
])
-def test_brightness_is_returned(error_code, sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.side_effect = [error_code, 1, 39]
+def test_brightness_is_returned(error_code, sensor_hub, bus, device_address):
+ bus.read_byte_data.side_effect = [error_code, 1, 39]
brightness = sensor_hub.get_brightness()
assert brightness == 295
- sensor_hub.bus.read_byte_data.assert_has_calls([
+ bus.read_byte_data.assert_has_calls([
call(device_address, SensorRegister.LIGHT_HIGH.value),
call(device_address, SensorRegister.LIGHT_LOW.value)
])
-def test_barometer_temperature_hardware_error(sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = 1
+def test_barometer_temperature_hardware_error(sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = 1
with raises(IOError, match="Barometric Sensor Error"):
sensor_hub.get_barometer_temperature()
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.BAROMETRIC_SENSOR_STATUS.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.BAROMETRIC_SENSOR_STATUS.value)
-def test_barometer_temperature_returns_correct_reading(sensor_hub, device_address):
+def test_barometer_temperature_returns_correct_reading(sensor_hub, bus, device_address):
expected_temperature = 36
- sensor_hub.bus.read_byte_data.side_effect = [0, expected_temperature]
+ bus.read_byte_data.side_effect = [0, expected_temperature]
temperature = sensor_hub.get_barometer_temperature()
assert temperature == expected_temperature
- sensor_hub.bus.read_byte_data.assert_has_calls([
+ bus.read_byte_data.assert_has_calls([
call(device_address, SensorRegister.BAROMETRIC_SENSOR_STATUS.value),
call(device_address, SensorRegister.BAROMETRIC_TEMPERATURE.value)
])
-def test_barometer_pressure_hardware_error(sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.return_value = 1
+def test_barometer_pressure_hardware_error(sensor_hub, bus, device_address):
+ bus.read_byte_data.return_value = 1
with raises(IOError, match="Barometric Sensor Error"):
sensor_hub.get_barometer_pressure()
- sensor_hub.bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.BAROMETRIC_SENSOR_STATUS.value)
+ bus.read_byte_data.assert_called_once_with(device_address, SensorRegister.BAROMETRIC_SENSOR_STATUS.value)
-def test_barometer_pressure_returns_expected_reading(sensor_hub, device_address):
- sensor_hub.bus.read_byte_data.side_effect = [0, 3, 5, 45]
+def test_barometer_pressure_returns_expected_reading(sensor_hub, bus, device_address):
+ bus.read_byte_data.side_effect = [0, 3, 5, 45]
pressure = sensor_hub.get_barometer_pressure()
assert pressure == 29504.03
- sensor_hub.bus.read_byte_data.assert_has_calls([
+ bus.read_byte_data.assert_has_calls([
call(device_address, SensorRegister.BAROMETRIC_SENSOR_STATUS.value),
call(device_address, SensorRegister.BAROMETRIC_PRESSURE_LOW.value),
call(device_address, SensorRegister.BAROMETRIC_PRESSURE_MIDDLE.value),
| Refactor class to use dependency injection & remove mock usage
This would be nicer with dependency injection of SMBus, and a dummy object used in the tests, rather than a mock. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[1]",
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[5]",
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[9]",
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[13]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[2]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[6]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[14]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[10]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[0]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[4]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[8]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[12]",
"test/test_hub.py::test_humidity_is_not_up_to_date",
"test/test_hub.py::test_humidity_returned_when_it_is_up_to_date",
"test/test_hub.py::test_on_board_temperature_is_not_up_to_date",
"test/test_hub.py::test_on_board_temperature_returned_when_it_is_up_to_date",
"test/test_hub.py::test_motion_detection[True-1]",
"test/test_hub.py::test_motion_detection[False-0]",
"test/test_hub.py::test_brightness_sensor_error[8]",
"test/test_hub.py::test_brightness_sensor_error[10]",
"test/test_hub.py::test_brightness_sensor_error[9]",
"test/test_hub.py::test_brightness_sensor_error[11]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[4]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[6]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[7]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[5]",
"test/test_hub.py::test_brightness_is_returned[0]",
"test/test_hub.py::test_brightness_is_returned[1]",
"test/test_hub.py::test_brightness_is_returned[2]",
"test/test_hub.py::test_brightness_is_returned[3]",
"test/test_hub.py::test_barometer_temperature_hardware_error",
"test/test_hub.py::test_barometer_temperature_returns_correct_reading",
"test/test_hub.py::test_barometer_pressure_hardware_error",
"test/test_hub.py::test_barometer_pressure_returns_expected_reading"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2020-06-13T17:09:43Z" | mit |
|
MGough__sensorhub-4 | diff --git a/sensorhub/hub.py b/sensorhub/hub.py
index d1c72d8..baf98d3 100644
--- a/sensorhub/hub.py
+++ b/sensorhub/hub.py
@@ -31,7 +31,7 @@ class StatusRegisterErrorCode(Enum):
class SensorHub:
_bus: SMBus
- def __init__(self, system_management_bus: SMBus):
+ def __init__(self, system_management_bus: SMBus = None):
self._bus = system_management_bus or SMBus(DEVICE_BUS)
def _read_sensor_board_register(self, buffer: SensorRegister) -> int:
diff --git a/setup.py b/setup.py
index 5b1420c..750c44f 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="sensorhub",
- version="2.0.0",
+ version="2.0.1",
author="Merlin Gough",
author_email="[email protected]",
description="A simple library to use with the DockerPi SensorHub (EP-0106)",
| MGough/sensorhub | 139eb4578136a92d6e9a8661afe383857c402c0e | diff --git a/test/test_hub.py b/test/test_hub.py
index 4d639e6..bf40723 100644
--- a/test/test_hub.py
+++ b/test/test_hub.py
@@ -1,9 +1,9 @@
from pytest import fixture, raises, mark
-from unittest.mock import call, Mock
+from unittest.mock import call, Mock, patch
from smbus2 import SMBus
-from sensorhub.hub import SensorHub, SensorRegister
+from sensorhub.hub import SensorHub, SensorRegister, DEVICE_BUS
@fixture
@@ -216,3 +216,12 @@ def test_barometer_pressure_returns_expected_reading(sensor_hub, bus, device_add
call(device_address, SensorRegister.BAROMETRIC_PRESSURE_MIDDLE.value),
call(device_address, SensorRegister.BAROMETRIC_PRESSURE_HIGH.value)
])
+
+
+@patch("sensorhub.hub.SMBus", autospec=True)
+def test_can_be_created_with_default_smbus(sm_bus):
+ hub = SensorHub()
+
+ assert hub._bus == sm_bus.return_value
+ sm_bus.assert_called_once_with(DEVICE_BUS)
+
| Constructor argument
Hi, thanks for this, it's nicer than the code sample provided by the manufacturer.
I've noticed that the constructor now requires a positional argument, you might want to update the README's "how to use" paragraph perhaps?
I'm using `hub = SensorHub(None)` but I'm sure there are better ways to handle that.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_hub.py::test_can_be_created_with_default_smbus"
] | [
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[1]",
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[5]",
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[9]",
"test/test_hub.py::test_off_board_temperature_out_of_range_returns_minus_1[13]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[2]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[6]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[14]",
"test/test_hub.py::test_off_board_temperature_sensor_io_error[10]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[0]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[4]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[8]",
"test/test_hub.py::test_off_board_temperature_sensor_returns_temperature[12]",
"test/test_hub.py::test_humidity_is_not_up_to_date",
"test/test_hub.py::test_humidity_returned_when_it_is_up_to_date",
"test/test_hub.py::test_on_board_temperature_is_not_up_to_date",
"test/test_hub.py::test_on_board_temperature_returned_when_it_is_up_to_date",
"test/test_hub.py::test_motion_detection[True-1]",
"test/test_hub.py::test_motion_detection[False-0]",
"test/test_hub.py::test_brightness_sensor_error[8]",
"test/test_hub.py::test_brightness_sensor_error[10]",
"test/test_hub.py::test_brightness_sensor_error[9]",
"test/test_hub.py::test_brightness_sensor_error[11]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[4]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[6]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[7]",
"test/test_hub.py::test_brightness_out_of_range_returns_minus_1[5]",
"test/test_hub.py::test_brightness_is_returned[0]",
"test/test_hub.py::test_brightness_is_returned[1]",
"test/test_hub.py::test_brightness_is_returned[2]",
"test/test_hub.py::test_brightness_is_returned[3]",
"test/test_hub.py::test_barometer_temperature_hardware_error",
"test/test_hub.py::test_barometer_temperature_returns_correct_reading",
"test/test_hub.py::test_barometer_pressure_hardware_error",
"test/test_hub.py::test_barometer_pressure_returns_expected_reading"
] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-01-23T13:06:36Z" | mit |
|
Ma-r-co__savoia-11 | diff --git a/src/savoia/datafeed/datafeed.py b/src/savoia/datafeed/datafeed.py
index 1fd53f0..942ca41 100644
--- a/src/savoia/datafeed/datafeed.py
+++ b/src/savoia/datafeed/datafeed.py
@@ -11,7 +11,7 @@ from savoia.event.event import Event, TickEvent
from savoia.types.types import Pair
from logging import getLogger, Logger
-from typing import List, Dict, Iterator
+from typing import List, Iterator, Tuple
from queue import Queue
from abc import ABCMeta, abstractmethod
import time
@@ -57,7 +57,7 @@ class HistoricCSVDataFeeder(DataFeeder):
pairs: List[Pair]
feed_q: 'Queue[Event]'
csv_dir: str
- pair_frames: Dict[Pair, pd.DataFrame]
+ pair_frames: List[str]
file_dates: List[str]
cur_date_idx: int
cur_date_pairs: pd.DataFrame
@@ -82,7 +82,6 @@ class HistoricCSVDataFeeder(DataFeeder):
self.pairs = pairs
self.feed_q = feed_q
self.csv_dir = csv_dir
- self.pair_frames = {}
self.file_dates = self._list_all_file_dates()
self.continue_backtest = True
self.cur_date_idx = 0
@@ -111,7 +110,7 @@ class HistoricCSVDataFeeder(DataFeeder):
return de_dup_csv
def _open_convert_csv_files_for_day(self, date_str: str) \
- -> Iterator[pd.DataFrame]:
+ -> Iterator[Tuple[str, str, str, str]]:
"""
Opens the CSV files from the data directory, converting
them into pandas DataFrames within a pairs dictionary.
@@ -121,20 +120,40 @@ class HistoricCSVDataFeeder(DataFeeder):
ordered, allowing tick data events to be added to the queue
in a chronological fashion.
"""
+ # for p in self.pairs:
+ # pair_path = os.path.join(self.csv_dir, '%s_%s.csv' % (p, date_str))
+ # self.logger.info("start read: %s", str(pair_path))
+ # with open(pair_path, 'r') as f:
+
+ # self.pair_frames[p] = pd.read_csv(
+ # pair_path,
+ # header=0,
+ # index_col=0,
+ # parse_dates=["Time"],
+ # dayfirst=True,
+ # names=("Time", "Ask", "Bid", "AskVolume", "BidVolume")
+ # )
+ # self.pair_frames[p]["Pair"] = p
+ # self.logger.info("end read: %s", str(pair_path))
+ # return pd.concat(self.pair_frames.values()).sort_index().iterrows()
+ self.pair_frames = []
for p in self.pairs:
pair_path = os.path.join(self.csv_dir, '%s_%s.csv' % (p, date_str))
self.logger.info("start read: %s", str(pair_path))
- self.pair_frames[p] = pd.io.parsers.read_csv(
- pair_path,
- header=0,
- index_col=0,
- parse_dates=["Time"],
- dayfirst=True,
- names=("Time", "Ask", "Bid", "AskVolume", "BidVolume")
- )
+ with open(pair_path, 'r') as f:
+ f.__next__()
+ for line in f:
+ self.pair_frames.append(line + f',{p}')
self.logger.info("end read: %s", str(pair_path))
- self.pair_frames[p]["Pair"] = p
- return pd.concat(self.pair_frames.values()).sort_index().iterrows()
+ self.logger.info('start sort')
+ self.pair_frames.sort()
+
+ def _gen() -> Iterator[Tuple[str, str, str, str]]:
+ for row in self.pair_frames:
+ date, ask, bid, ask_volume, bid_volume, pair = row.split(',')
+ yield date, ask, bid, pair
+
+ return _gen()
def _update_csv_for_day(self) -> bool:
try:
@@ -148,19 +167,19 @@ class HistoricCSVDataFeeder(DataFeeder):
def _stream_next_tick(self) -> None:
try:
- index, row = next(self.cur_date_pairs)
+ date, ask, bid, pair = next(self.cur_date_pairs)
except StopIteration:
# End of the current days data
if self._update_csv_for_day():
- index, row = next(self.cur_date_pairs)
+ date, ask, bid, pair = next(self.cur_date_pairs)
else: # End of the data
self.continue_backtest = False
return
- pair = row["Pair"]
- bid = Decimal(str(row["Bid"])).quantize(DECIMAL_PLACES)
- ask = Decimal(str(row["Ask"])).quantize(DECIMAL_PLACES)
+ date = pd.Timestamp(date)
+ bid = Decimal(bid).quantize(DECIMAL_PLACES)
+ ask = Decimal(ask).quantize(DECIMAL_PLACES)
- tev = TickEvent(pair, index, bid, ask)
+ tev = TickEvent(pair, date, bid, ask)
self.feed_q.put(tev)
def run(self) -> None:
diff --git a/src/savoia/performance/performance.py b/src/savoia/performance/performance.py
index 49b1d68..a5dfa8b 100644
--- a/src/savoia/performance/performance.py
+++ b/src/savoia/performance/performance.py
@@ -26,7 +26,7 @@ def create_drawdowns(pnl: pd.Series) -> Tuple[pd.Series, float, int]:
# Loop over the index range
for t in range(1, len(idx)):
- hwm.append(max(hwm[t - 1], pnl.ix[t]))
- drawdown.ix[t] = (hwm[t] - pnl.ix[t])
- duration.ix[t] = (0 if drawdown.ix[t] == 0 else duration.ix[t - 1] + 1)
+ hwm.append(max(hwm[t - 1], pnl.iloc[t]))
+ drawdown.iloc[t] = (hwm[t] - pnl.iloc[t])
+ duration.iloc[t] = (0 if drawdown.iloc[t] == 0 else duration.iloc[t - 1] + 1)
return drawdown, drawdown.max(), duration.max()
diff --git a/src/savoia/portfolio/portfolio.py b/src/savoia/portfolio/portfolio.py
index 1d9ad1a..c7b668f 100644
--- a/src/savoia/portfolio/portfolio.py
+++ b/src/savoia/portfolio/portfolio.py
@@ -54,7 +54,7 @@ class Portfolio(object):
def _create_equity_file(self) -> TextIO:
filename: str = "backtest.csv"
out_file: TextIO = open(os.path.join(OUTPUT_RESULTS_DIR, filename), "w")
- header: str = "Timestamp,Balance"
+ header: str = "Timestamp,Equity"
for pair in self.ticker.pairs:
header += ",%s" % pair
header += "\n"
@@ -102,7 +102,7 @@ class Portfolio(object):
if self.isBacktest:
out_line = f'{event.time}, {self.equity}'
for pair in self.ticker.pairs:
- out_line += ",{self.positions[pair].upl}"
+ out_line += f",{self.positions[pair].upl}"
out_line += "\n"
self.backtest_file.write(out_line)
| Ma-r-co/savoia | f3fc5321dfa8b9626e6871ec47e606c497ccaa5e | diff --git a/tests/datafeed/datafeed_test.py b/tests/datafeed/datafeed_test.py
index 083bb66..847de37 100644
--- a/tests/datafeed/datafeed_test.py
+++ b/tests/datafeed/datafeed_test.py
@@ -54,18 +54,13 @@ def test_open_convert_csv_files_for_day(
frame including all the ticks of a specified date.
"""
ticker, df = setupDataFeeder
- expected_frame = pd.io.parsers.read_csv(
+ expected_frame = open(
os.path.join(df.csv_dir, 'expected_frame_%s.csv' % (date)),
- header=0,
- index_col=0,
- parse_dates=["Time"],
- dayfirst=True,
- names=("Time", "Ask", "Bid", "AskVolume", "BidVolume", "Pair")
+ 'r'
)
actual_frame = df._open_convert_csv_files_for_day(date)
- for (a0, a1), (e0, e1) in zip(actual_frame, expected_frame.iterrows()):
- assert a0 == e0
- pd.testing.assert_series_equal(a1, e1)
+ for a0, e0 in zip(actual_frame, expected_frame):
+ assert a0 == tuple(e0[:-1].split(','))
def test_update_csv_for_day(setupDataFeeder: Tuple[Ticker, DataFeeder]) -> None:
@@ -75,18 +70,12 @@ def test_update_csv_for_day(setupDataFeeder: Tuple[Ticker, DataFeeder]) -> None:
ticker, df = setupDataFeeder
assert df._update_csv_for_day() is True
- expected_frame = pd.io.parsers.read_csv(
+ expected_frame = open(
os.path.join(df.csv_dir, 'expected_frame_%s.csv' % (20140102)),
- header=0,
- index_col=0,
- parse_dates=["Time"],
- dayfirst=True,
- names=("Time", "Ask", "Bid", "AskVolume", "BidVolume", "Pair")
+ 'r'
)
- for (a0, a1), (e0, e1) in zip(df.cur_date_pairs,
- expected_frame.iterrows()):
- assert a0 == e0
- pd.testing.assert_series_equal(a1, e1)
+ for a0, e0 in zip(df.cur_date_pairs, expected_frame):
+ assert a0 == tuple(e0[:-1].split(','))
assert df._update_csv_for_day() is False
@@ -117,4 +106,4 @@ def test_run(setupDataFeeder: Tuple[Ticker, DataFeeder]) -> None:
ticker, df = setupDataFeeder
df.run()
assert df.continue_backtest is False
- assert df.feed_q.qsize() == 10
+ assert df.feed_q.qsize() == 11
diff --git a/tests/datafeed/expected_frame_20140101.csv b/tests/datafeed/expected_frame_20140101.csv
index 3cdb2e6..38e794a 100644
--- a/tests/datafeed/expected_frame_20140101.csv
+++ b/tests/datafeed/expected_frame_20140101.csv
@@ -1,6 +1,5 @@
-Time,Ask,Bid,AskVolume,BidVolume,Pair
-01.01.2014 00:02:24.967,1.50054,1.49854,2.46,2.2,GBPUSD
-01.01.2014 00:03:24.967,120.005,119.985,2.46,2.2,USDJPY
-01.01.2014 00:04:42.625,1.49979,1.49779,1.12,2.73,GBPUSD
-01.01.2014 00:06:42.625,119.998,119.978,1.12,2.73,USDJPY
-01.01.2014 00:07:18.417,1.50256,1.50056,1.04,2.94,GBPUSD
\ No newline at end of file
+01.01.2014 00:02:24.967,1.50054,1.49854,GBPUSD
+01.01.2014 00:03:24.967,120.005,119.985,USDJPY
+01.01.2014 00:04:42.625,1.49979,1.49779,GBPUSD
+01.01.2014 00:06:42.625,119.998,119.978,USDJPY
+01.01.2014 00:07:18.417,1.50256,1.50056,GBPUSD
diff --git a/tests/datafeed/expected_frame_20140101_old.csv b/tests/datafeed/expected_frame_20140101_old.csv
new file mode 100644
index 0000000..3cdb2e6
--- /dev/null
+++ b/tests/datafeed/expected_frame_20140101_old.csv
@@ -0,0 +1,6 @@
+Time,Ask,Bid,AskVolume,BidVolume,Pair
+01.01.2014 00:02:24.967,1.50054,1.49854,2.46,2.2,GBPUSD
+01.01.2014 00:03:24.967,120.005,119.985,2.46,2.2,USDJPY
+01.01.2014 00:04:42.625,1.49979,1.49779,1.12,2.73,GBPUSD
+01.01.2014 00:06:42.625,119.998,119.978,1.12,2.73,USDJPY
+01.01.2014 00:07:18.417,1.50256,1.50056,1.04,2.94,GBPUSD
\ No newline at end of file
diff --git a/tests/datafeed/expected_frame_20140102.csv b/tests/datafeed/expected_frame_20140102.csv
index 68e3163..3743930 100644
--- a/tests/datafeed/expected_frame_20140102.csv
+++ b/tests/datafeed/expected_frame_20140102.csv
@@ -1,6 +1,5 @@
-Time,Ask,Bid,AskVolume,BidVolume,Pair
-02.01.2014 00:02:24.967,1.50054,1.49854,2.46,2.2,GBPUSD
-02.01.2014 00:03:24.967,120.005,119.985,2.46,2.2,USDJPY
-02.01.2014 00:04:42.625,1.49979,1.49779,1.12,2.73,GBPUSD
-02.01.2014 00:06:42.625,119.998,119.978,1.12,2.73,USDJPY
-02.01.2014 00:07:18.417,1.50256,1.50056,1.04,2.94,GBPUSD
\ No newline at end of file
+02.01.2014 00:02:24.967,1.50054,1.49854,GBPUSD
+02.01.2014 00:03:24.967,120.005,119.985,USDJPY
+02.01.2014 00:04:42.625,1.49979,1.49779,GBPUSD
+02.01.2014 00:06:42.625,119.998,119.978,USDJPY
+02.01.2014 00:07:18.417,1.50256,1.50056,GBPUSD
diff --git a/tests/datafeed/expected_frame_20140102_old.csv b/tests/datafeed/expected_frame_20140102_old.csv
new file mode 100644
index 0000000..68e3163
--- /dev/null
+++ b/tests/datafeed/expected_frame_20140102_old.csv
@@ -0,0 +1,6 @@
+Time,Ask,Bid,AskVolume,BidVolume,Pair
+02.01.2014 00:02:24.967,1.50054,1.49854,2.46,2.2,GBPUSD
+02.01.2014 00:03:24.967,120.005,119.985,2.46,2.2,USDJPY
+02.01.2014 00:04:42.625,1.49979,1.49779,1.12,2.73,GBPUSD
+02.01.2014 00:06:42.625,119.998,119.978,1.12,2.73,USDJPY
+02.01.2014 00:07:18.417,1.50256,1.50056,1.04,2.94,GBPUSD
\ No newline at end of file
diff --git a/tests/engine/engine_test.py b/tests/engine/engine_test.py
index 9011228..a74e11e 100644
--- a/tests/engine/engine_test.py
+++ b/tests/engine/engine_test.py
@@ -7,53 +7,60 @@ import logging.config
import os
import json
+import pytest
-def setup_logging() -> None:
- """Setup logging configuration"""
- path = os.path.join('/Users/makoto/Pywork/savoia/src/savoia/config',
- 'logging.json')
- if os.path.exists(path):
- with open(path, 'rt') as f:
- config = json.load(f)
- logging.config.dictConfig(config)
- else:
- raise FileNotFoundError('Not exist : %s' % path)
-
-
-datafeed: datafeed_params = {
- 'module_name': 'HistoricCSVDataFeeder',
- 'params': {
- 'csv_dir': CSV_DATA_DIR,
+
[email protected]()
+def test_engine_run() -> None:
+ def setup_logging() -> None:
+ """Setup logging configuration"""
+ path = os.path.join('/Users/makoto/Pywork/savoia/src/savoia/config',
+ 'logging.json')
+ if os.path.exists(path):
+ with open(path, 'rt') as f:
+ config = json.load(f)
+ logging.config.dictConfig(config)
+ else:
+ raise FileNotFoundError('Not exist : %s' % path)
+
+ datafeed: datafeed_params = {
+ 'module_name': 'HistoricCSVDataFeeder',
+ 'params': {
+ 'csv_dir': CSV_DATA_DIR,
+ }
+ }
+
+ execution: execution_params = {
+ 'module_name': 'SimulatedExecution',
+ 'params': {
+ 'heartbeat': 0
+ }
+ }
+
+ strategy: strategy_params = {
+ 'module_name': 'DummyStrategy',
+ 'params': {}
}
-}
-execution: execution_params = {
- 'module_name': 'SimulatedExecution',
- 'params': {
- 'heartbeat': 0
+ engine: engine_params = {
+ 'pairs': ['GBPUSD', 'USDJPY'],
+ 'home_currency': 'JPY',
+ 'equity': Decimal(10 ** 6),
+ 'isBacktest': True,
+ 'max_iters': 10 ** 7,
+ 'heart_beat': 0
}
-}
-
-strategy: strategy_params = {
- 'module_name': 'DummyStrategy',
- 'params': {}
-}
-
-engine: engine_params = {
- 'pairs': ['GBPUSD', 'USDJPY'],
- 'home_currency': 'JPY',
- 'equity': Decimal(10 ** 6),
- 'isBacktest': True,
- 'max_iters': 10 ** 7,
- 'heart_beat': 0
-}
-
-eg = Engine(
- engine=engine,
- datafeed=datafeed,
- execution=execution,
- strategy=strategy
-)
-
-setup_logging()
-eg.run()
+
+ eg = Engine(
+ engine=engine,
+ datafeed=datafeed,
+ execution=execution,
+ strategy=strategy
+ )
+
+ setup_logging()
+ eg.run()
+
+
+if __name__ == '__main__':
+ test_engine_run()
diff --git a/tests/portfolio/portfolio_test.py b/tests/portfolio/portfolio_test.py
index 05939df..12f1982 100644
--- a/tests/portfolio/portfolio_test.py
+++ b/tests/portfolio/portfolio_test.py
@@ -64,7 +64,7 @@ def test_create_equity_file(port: Portfolio) -> None:
out_file.close()
with open(filepath, "r") as f:
- assert f.read() == "Timestamp,Balance,GBPUSD,USDJPY\n"
+ assert f.read() == "Timestamp,Equity,GBPUSD,USDJPY\n"
# ================================================================
| Poor performance of HistoricCSVDataFeeder
Poor performance of HistoricCSVDataFeeder
- Refine the usage of pandas | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/datafeed/datafeed_test.py::test_open_convert_csv_files_for_day[20140101]",
"tests/datafeed/datafeed_test.py::test_open_convert_csv_files_for_day[20140102]",
"tests/datafeed/datafeed_test.py::test_update_csv_for_day"
] | [
"tests/datafeed/datafeed_test.py::test_list_all_csv_files",
"tests/datafeed/datafeed_test.py::test_list_all_file_dates",
"tests/datafeed/datafeed_test.py::test_stream_next_tick",
"tests/datafeed/datafeed_test.py::test_run"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-07-12T18:45:36Z" | mit |
|
MagicStack__immutables-13 | diff --git a/immutables/_map.c b/immutables/_map.c
index f9c8772..7e63562 100644
--- a/immutables/_map.c
+++ b/immutables/_map.c
@@ -3363,6 +3363,12 @@ map_reduce(MapObject *self)
return tup;
}
+static PyObject *
+map_py_class_getitem(PyObject *type, PyObject *item)
+{
+ Py_INCREF(type);
+ return type;
+}
static PyMethodDef Map_methods[] = {
{"set", (PyCFunction)map_py_set, METH_VARARGS, NULL},
@@ -3375,6 +3381,12 @@ static PyMethodDef Map_methods[] = {
{"update", (PyCFunction)map_py_update, METH_VARARGS | METH_KEYWORDS, NULL},
{"__reduce__", (PyCFunction)map_reduce, METH_NOARGS, NULL},
{"__dump__", (PyCFunction)map_py_dump, METH_NOARGS, NULL},
+ {
+ "__class_getitem__",
+ (PyCFunction)map_py_class_getitem,
+ METH_O|METH_CLASS,
+ NULL
+ },
{NULL, NULL}
};
diff --git a/immutables/_map.pyi b/immutables/_map.pyi
new file mode 100644
index 0000000..863d911
--- /dev/null
+++ b/immutables/_map.pyi
@@ -0,0 +1,87 @@
+from typing import Any
+from typing import Generic
+from typing import Hashable
+from typing import Iterable
+from typing import Iterator
+from typing import Literal
+from typing import Mapping
+from typing import MutableMapping
+from typing import NoReturn
+from typing import Tuple
+from typing import Type
+from typing import TypeVar
+from typing import Union
+
+
+K = TypeVar('K', bound=Hashable)
+V = TypeVar('V', bound=Any)
+D = TypeVar('D', bound=Any)
+
+
+class BitmapNode: ...
+
+
+class MapKeys(Generic[K]):
+ def __init__(self, c: int, m: BitmapNode) -> None: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[K]: ...
+
+
+class MapValues(Generic[V]):
+ def __init__(self, c: int, m: BitmapNode) -> None: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[V]: ...
+
+
+class MapItems(Generic[K, V]):
+ def __init__(self, c: int, m: BitmapNode) -> None: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[Tuple[K, V]]: ...
+
+
+class Map(Mapping[K, V]):
+ def __init__(
+ self, col: Union[Mapping[K, V], Iterable[Tuple[K, V]]] = ..., **kw: V
+ ): ...
+ def __reduce__(self) -> NoReturn: ...
+ def __len__(self) -> int: ...
+ def __eq__(self, other: Any) -> bool: ...
+ def update(
+ self, col: Union[Mapping[K, V], Iterable[Tuple[K, V]]] = ..., **kw: V
+ ) -> Map[K, V]: ...
+ def mutate(self) -> MapMutation[K, V]: ...
+ def set(self, key: K, val: V) -> Map[K, V]: ...
+ def delete(self, key: K) -> Map[K, V]: ...
+ def get(self, key: K, default: D = ...) -> Union[V, D]: ...
+ def __getitem__(self, key: K) -> V: ...
+ def __contains__(self, key: object) -> bool: ...
+ def __iter__(self) -> Iterator[K]: ...
+ def keys(self) -> MapKeys[K]: ...
+ def values(self) -> MapValues[V]: ...
+ def items(self) -> MapItems[K, V]: ...
+ def __hash__(self) -> int: ...
+ def __dump__(self) -> str: ...
+ def __class_getitem__(cls, item: Any) -> Type[Map]: ...
+
+
+S = TypeVar('S', bound='MapMutation')
+
+
+class MapMutation(MutableMapping[K, V]):
+ def __init__(self, count: int, root: BitmapNode) -> None: ...
+ def set(self, key: K, val: V) -> None: ...
+ def __enter__(self: S) -> S: ...
+ def __exit__(self, *exc: Any) -> Literal[False]: ...
+ def __iter__(self) -> NoReturn: ...
+ def __delitem__(self, key: K) -> None: ...
+ def __setitem__(self, key: K, val: V) -> None: ...
+ def pop(self, __key: K, __default: D = ...) -> Union[V, D]: ...
+ def get(self, key: K, default: D = ...) -> Union[V, D]: ...
+ def __getitem__(self, key: K) -> V: ...
+ def __contains__(self, key: Any) -> bool: ...
+ def update(
+ self, col: Union[Mapping[K, V], Iterable[Tuple[K, V]]] = ..., **kw: V
+ ): ...
+ def finish(self) -> Map[K, V]: ...
+ def __len__(self) -> int: ...
+ def __eq__(self, other: Any) -> bool: ...
diff --git a/immutables/map.py b/immutables/map.py
index 4c5cd49..3ea4656 100644
--- a/immutables/map.py
+++ b/immutables/map.py
@@ -630,6 +630,9 @@ class Map:
self.__root.dump(buf, 0)
return '\n'.join(buf)
+ def __class_getitem__(cls, item):
+ return cls
+
class MapMutation:
diff --git a/immutables/py.typed b/immutables/py.typed
new file mode 100644
index 0000000..b648ac9
--- /dev/null
+++ b/immutables/py.typed
@@ -0,0 +1,1 @@
+partial
| MagicStack/immutables | 1a51a34154654285e017e6006d16c03cbee92637 | diff --git a/tests/test_map.py b/tests/test_map.py
index a99b856..8d629f8 100644
--- a/tests/test_map.py
+++ b/tests/test_map.py
@@ -2,6 +2,7 @@ import collections.abc
import gc
import pickle
import random
+import sys
import unittest
import weakref
@@ -1296,6 +1297,10 @@ class BaseMapTest:
with self.assertRaisesRegex(TypeError, "can't pickle"):
pickle.dumps(h.mutate())
+ @unittest.skipIf(sys.version_info < (3, 7, 0), "__class_getitem__ is not available")
+ def test_map_is_subscriptable(self):
+ self.assertIs(self.Map[int, str], self.Map)
+
class PyMapTest(BaseMapTest, unittest.TestCase):
| mypy integration
Collections should implement [mypy Mapping generic](https://mypy.readthedocs.io/en/stable/generics.html#defining-sub-classes-of-generic-classes) to be properly typed | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_map.py::PyMapTest::test_map_is_subscriptable"
] | [
"tests/test_map.py::PyMapTest::test_abc_1",
"tests/test_map.py::PyMapTest::test_hash_1",
"tests/test_map.py::PyMapTest::test_hash_2",
"tests/test_map.py::PyMapTest::test_hashkey_helper_1",
"tests/test_map.py::PyMapTest::test_map_basics_1",
"tests/test_map.py::PyMapTest::test_map_basics_2",
"tests/test_map.py::PyMapTest::test_map_basics_3",
"tests/test_map.py::PyMapTest::test_map_basics_4",
"tests/test_map.py::PyMapTest::test_map_collision_1",
"tests/test_map.py::PyMapTest::test_map_collision_2",
"tests/test_map.py::PyMapTest::test_map_delete_1",
"tests/test_map.py::PyMapTest::test_map_delete_2",
"tests/test_map.py::PyMapTest::test_map_delete_3",
"tests/test_map.py::PyMapTest::test_map_delete_4",
"tests/test_map.py::PyMapTest::test_map_delete_5",
"tests/test_map.py::PyMapTest::test_map_delete_6",
"tests/test_map.py::PyMapTest::test_map_eq_1",
"tests/test_map.py::PyMapTest::test_map_eq_2",
"tests/test_map.py::PyMapTest::test_map_eq_3",
"tests/test_map.py::PyMapTest::test_map_gc_1",
"tests/test_map.py::PyMapTest::test_map_gc_2",
"tests/test_map.py::PyMapTest::test_map_getitem_1",
"tests/test_map.py::PyMapTest::test_map_in_1",
"tests/test_map.py::PyMapTest::test_map_items_1",
"tests/test_map.py::PyMapTest::test_map_items_2",
"tests/test_map.py::PyMapTest::test_map_items_3",
"tests/test_map.py::PyMapTest::test_map_items_4",
"tests/test_map.py::PyMapTest::test_map_keys_1",
"tests/test_map.py::PyMapTest::test_map_keys_2",
"tests/test_map.py::PyMapTest::test_map_mut_1",
"tests/test_map.py::PyMapTest::test_map_mut_10",
"tests/test_map.py::PyMapTest::test_map_mut_11",
"tests/test_map.py::PyMapTest::test_map_mut_12",
"tests/test_map.py::PyMapTest::test_map_mut_13",
"tests/test_map.py::PyMapTest::test_map_mut_14",
"tests/test_map.py::PyMapTest::test_map_mut_15",
"tests/test_map.py::PyMapTest::test_map_mut_16",
"tests/test_map.py::PyMapTest::test_map_mut_17",
"tests/test_map.py::PyMapTest::test_map_mut_18",
"tests/test_map.py::PyMapTest::test_map_mut_19",
"tests/test_map.py::PyMapTest::test_map_mut_2",
"tests/test_map.py::PyMapTest::test_map_mut_3",
"tests/test_map.py::PyMapTest::test_map_mut_4",
"tests/test_map.py::PyMapTest::test_map_mut_5",
"tests/test_map.py::PyMapTest::test_map_mut_6",
"tests/test_map.py::PyMapTest::test_map_mut_7",
"tests/test_map.py::PyMapTest::test_map_mut_8",
"tests/test_map.py::PyMapTest::test_map_mut_9",
"tests/test_map.py::PyMapTest::test_map_mut_stress",
"tests/test_map.py::PyMapTest::test_map_pickle",
"tests/test_map.py::PyMapTest::test_map_stress",
"tests/test_map.py::PyMapTest::test_map_values_1",
"tests/test_map.py::PyMapTest::test_map_values_2",
"tests/test_map.py::PyMapTest::test_repr_1",
"tests/test_map.py::PyMapTest::test_repr_2",
"tests/test_map.py::PyMapTest::test_repr_3",
"tests/test_map.py::CMapTest::test_abc_1",
"tests/test_map.py::CMapTest::test_hash_1",
"tests/test_map.py::CMapTest::test_hash_2",
"tests/test_map.py::CMapTest::test_hashkey_helper_1",
"tests/test_map.py::CMapTest::test_map_basics_1",
"tests/test_map.py::CMapTest::test_map_basics_2",
"tests/test_map.py::CMapTest::test_map_basics_3",
"tests/test_map.py::CMapTest::test_map_basics_4",
"tests/test_map.py::CMapTest::test_map_collision_1",
"tests/test_map.py::CMapTest::test_map_collision_2",
"tests/test_map.py::CMapTest::test_map_delete_1",
"tests/test_map.py::CMapTest::test_map_delete_2",
"tests/test_map.py::CMapTest::test_map_delete_3",
"tests/test_map.py::CMapTest::test_map_delete_4",
"tests/test_map.py::CMapTest::test_map_delete_5",
"tests/test_map.py::CMapTest::test_map_delete_6",
"tests/test_map.py::CMapTest::test_map_eq_1",
"tests/test_map.py::CMapTest::test_map_eq_2",
"tests/test_map.py::CMapTest::test_map_eq_3",
"tests/test_map.py::CMapTest::test_map_gc_1",
"tests/test_map.py::CMapTest::test_map_gc_2",
"tests/test_map.py::CMapTest::test_map_getitem_1",
"tests/test_map.py::CMapTest::test_map_in_1",
"tests/test_map.py::CMapTest::test_map_items_1",
"tests/test_map.py::CMapTest::test_map_items_2",
"tests/test_map.py::CMapTest::test_map_items_3",
"tests/test_map.py::CMapTest::test_map_items_4",
"tests/test_map.py::CMapTest::test_map_keys_1",
"tests/test_map.py::CMapTest::test_map_keys_2",
"tests/test_map.py::CMapTest::test_map_mut_1",
"tests/test_map.py::CMapTest::test_map_mut_10",
"tests/test_map.py::CMapTest::test_map_mut_11",
"tests/test_map.py::CMapTest::test_map_mut_12",
"tests/test_map.py::CMapTest::test_map_mut_13",
"tests/test_map.py::CMapTest::test_map_mut_14",
"tests/test_map.py::CMapTest::test_map_mut_15",
"tests/test_map.py::CMapTest::test_map_mut_16",
"tests/test_map.py::CMapTest::test_map_mut_17",
"tests/test_map.py::CMapTest::test_map_mut_18",
"tests/test_map.py::CMapTest::test_map_mut_19",
"tests/test_map.py::CMapTest::test_map_mut_2",
"tests/test_map.py::CMapTest::test_map_mut_3",
"tests/test_map.py::CMapTest::test_map_mut_4",
"tests/test_map.py::CMapTest::test_map_mut_5",
"tests/test_map.py::CMapTest::test_map_mut_6",
"tests/test_map.py::CMapTest::test_map_mut_7",
"tests/test_map.py::CMapTest::test_map_mut_8",
"tests/test_map.py::CMapTest::test_map_mut_9",
"tests/test_map.py::CMapTest::test_map_mut_stress",
"tests/test_map.py::CMapTest::test_map_stress",
"tests/test_map.py::CMapTest::test_map_values_1",
"tests/test_map.py::CMapTest::test_map_values_2",
"tests/test_map.py::CMapTest::test_repr_1",
"tests/test_map.py::CMapTest::test_repr_2",
"tests/test_map.py::CMapTest::test_repr_3"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2019-07-13T22:30:37Z" | apache-2.0 |
|
MagicStack__immutables-58 | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index c992bda..7e2c51b 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -45,13 +45,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 50
submodules: true
- name: Set up Python 3.7
- uses: actions/setup-python@v1
+ uses: actions/setup-python@v2
with:
python-version: 3.7
@@ -70,7 +70,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- python-version: [3.5, 3.6, 3.7, 3.8]
+ python-version: [3.5, 3.6, 3.7, 3.8, 3.9]
os: [ubuntu-16.04, macos-latest, windows-latest]
exclude:
# Python 3.5 is unable to properly
@@ -80,13 +80,13 @@ jobs:
python-version: 3.5
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 50
submodules: true
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
+ uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
@@ -130,7 +130,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 5
submodules: false
diff --git a/README.rst b/README.rst
index e62545e..4132bc0 100644
--- a/README.rst
+++ b/README.rst
@@ -12,7 +12,8 @@ An immutable mapping type for Python.
The underlying datastructure is a Hash Array Mapped Trie (HAMT)
used in Clojure, Scala, Haskell, and other functional languages.
This implementation is used in CPython 3.7 in the ``contextvars``
-module (see PEP 550 and PEP 567 for more details).
+module (see `PEP 550 <https://www.python.org/dev/peps/pep-0550/>`_ and
+`PEP 567 <https://www.python.org/dev/peps/pep-0567/>`_ for more details).
Immutable mappings based on HAMT have O(log N) performance for both
``set()`` and ``get()`` operations, which is essentially O(1) for
diff --git a/immutables/_map.c b/immutables/_map.c
index 9f0a586..7e510fd 100644
--- a/immutables/_map.c
+++ b/immutables/_map.c
@@ -3194,14 +3194,14 @@ map_py_repr(BaseMapObject *m)
if (MapMutation_Check(m)) {
if (_PyUnicodeWriter_WriteASCIIString(
- &writer, "<immutables.MapMutation({", 25) < 0)
+ &writer, "immutables.MapMutation({", 24) < 0)
{
goto error;
}
}
else {
if (_PyUnicodeWriter_WriteASCIIString(
- &writer, "<immutables.Map({", 17) < 0)
+ &writer, "immutables.Map({", 16) < 0)
{
goto error;
}
@@ -3255,16 +3255,6 @@ map_py_repr(BaseMapObject *m)
goto error;
}
- PyObject *addr = PyUnicode_FromFormat(" at %p>", m);
- if (addr == NULL) {
- goto error;
- }
- if (_PyUnicodeWriter_WriteStr(&writer, addr) < 0) {
- Py_DECREF(addr);
- goto error;
- }
- Py_DECREF(addr);
-
Py_ReprLeave((PyObject *)m);
return _PyUnicodeWriter_Finish(&writer);
diff --git a/immutables/map.py b/immutables/map.py
index 7c16139..fe9dbaf 100644
--- a/immutables/map.py
+++ b/immutables/map.py
@@ -649,8 +649,7 @@ class Map:
items = []
for key, val in self.items():
items.append("{!r}: {!r}".format(key, val))
- return '<immutables.Map({{{}}}) at 0x{:0x}>'.format(
- ', '.join(items), id(self))
+ return 'immutables.Map({{{}}})'.format(', '.join(items))
def __dump__(self): # pragma: no cover
buf = []
@@ -818,8 +817,7 @@ class MapMutation:
items = []
for key, val in self.__root.items():
items.append("{!r}: {!r}".format(key, val))
- return '<immutables.MapMutation({{{}}}) at 0x{:0x}>'.format(
- ', '.join(items), id(self))
+ return 'immutables.MapMutation({{{}}})'.format(', '.join(items))
def __len__(self):
return self.__count
diff --git a/setup.py b/setup.py
index b54270d..cb31d2c 100644
--- a/setup.py
+++ b/setup.py
@@ -59,6 +59,7 @@ setuptools.setup(
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
| MagicStack/immutables | 45105ecd8b56a4d88dbcb380fcb8ff4b9cc7b19c | diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 45367be..019007f 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -15,7 +15,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: [3.5, 3.6, 3.7, 3.8]
+ python-version: [3.5, 3.6, 3.7, 3.8, 3.9]
os: [windows-latest, ubuntu-18.04, macos-latest]
exclude:
# Python 3.5 is unable to properly
@@ -25,7 +25,7 @@ jobs:
python-version: 3.5
steps:
- - uses: actions/checkout@v1
+ - uses: actions/checkout@v2
with:
fetch-depth: 50
submodules: true
@@ -41,7 +41,7 @@ jobs:
__version__\s*=\s*(?:['"])([[:PEP440:]])(?:['"])
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
+ uses: actions/setup-python@v2
if: steps.release.outputs.version == 0
with:
python-version: ${{ matrix.python-version }}
diff --git a/tests/test_map.py b/tests/test_map.py
index b6ee7f1..4a473b3 100644
--- a/tests/test_map.py
+++ b/tests/test_map.py
@@ -845,11 +845,10 @@ class BaseMapTest:
def test_repr_1(self):
h = self.Map()
- self.assertTrue(repr(h).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(h), 'immutables.Map({})')
h = h.set(1, 2).set(2, 3).set(3, 4)
- self.assertTrue(repr(h).startswith(
- '<immutables.Map({1: 2, 2: 3, 3: 4}) at 0x'))
+ self.assertEqual(repr(h), 'immutables.Map({1: 2, 2: 3, 3: 4})')
def test_repr_2(self):
h = self.Map()
@@ -879,8 +878,7 @@ class BaseMapTest:
h = h.set(k, 1)
k.val = h
- self.assertTrue(repr(h).startswith(
- '<immutables.Map({{...}: 1}) at 0x'))
+ self.assertEqual(repr(h), 'immutables.Map({{...}: 1})')
def test_hash_1(self):
h = self.Map()
@@ -964,8 +962,7 @@ class BaseMapTest:
h = h.set('a', 1)
hm1 = h.mutate()
- self.assertTrue(repr(hm1).startswith(
- "<immutables.MapMutation({'a': 1})"))
+ self.assertEqual(repr(hm1), "immutables.MapMutation({'a': 1})")
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(hm1)
diff --git a/tests/test_none_keys.py b/tests/test_none_keys.py
index f7969f3..92e7813 100644
--- a/tests/test_none_keys.py
+++ b/tests/test_none_keys.py
@@ -61,7 +61,7 @@ class BaseNoneTest:
self.assertEqual(len(m), 1)
self.assertTrue(None in m)
self.assertEqual(m[None], 1)
- self.assertTrue(repr(m).startswith('<immutables.Map({None: 1}) at 0x'))
+ self.assertEqual(repr(m), 'immutables.Map({None: 1})')
for level in range(7):
key = NoneCollision('a', level)
@@ -72,7 +72,7 @@ class BaseNoneTest:
m = m.delete(None)
self.assertEqual(len(m), 0)
self.assertFalse(None in m)
- self.assertTrue(repr(m).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(m), 'immutables.Map({})')
self.assertEqual(m, self.Map())
@@ -125,7 +125,7 @@ class BaseNoneTest:
self.assertFalse(None in m3)
self.assertFalse(key in m3)
self.assertEqual(m3, self.Map())
- self.assertTrue(repr(m3).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(m3), 'immutables.Map({})')
with self.assertRaises(KeyError):
m3.delete(None)
with self.assertRaises(KeyError):
@@ -144,7 +144,7 @@ class BaseNoneTest:
self.assertFalse(None in m4)
self.assertFalse(key in m4)
self.assertEqual(m4, self.Map())
- self.assertTrue(repr(m4).startswith('<immutables.Map({}) at 0x'))
+ self.assertEqual(repr(m4), 'immutables.Map({})')
with self.assertRaises(KeyError):
m4.delete(None)
with self.assertRaises(KeyError):
| `immutables.Map.__str__` doesn't match `dict.__str__`
```python
In [7]: d = {'a': 1}
In [8]: str(d)
Out[8]: "{'a': 1}"
In [9]: str(immutables.Map(d))
Out[9]: "<immutables.Map({'a': 1}) at 0x108ed56c0>"
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_map.py::PyMapTest::test_map_mut_3",
"tests/test_map.py::PyMapTest::test_repr_1",
"tests/test_map.py::PyMapTest::test_repr_3",
"tests/test_none_keys.py::PyMapNoneTest::test_none_as_key",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collision_1"
] | [
"tests/test_map.py::PyMapTest::test_abc_1",
"tests/test_map.py::PyMapTest::test_hash_1",
"tests/test_map.py::PyMapTest::test_hash_2",
"tests/test_map.py::PyMapTest::test_hashkey_helper_1",
"tests/test_map.py::PyMapTest::test_kwarg_named_col",
"tests/test_map.py::PyMapTest::test_map_basics_1",
"tests/test_map.py::PyMapTest::test_map_basics_2",
"tests/test_map.py::PyMapTest::test_map_basics_3",
"tests/test_map.py::PyMapTest::test_map_basics_4",
"tests/test_map.py::PyMapTest::test_map_collision_1",
"tests/test_map.py::PyMapTest::test_map_collision_2",
"tests/test_map.py::PyMapTest::test_map_delete_1",
"tests/test_map.py::PyMapTest::test_map_delete_2",
"tests/test_map.py::PyMapTest::test_map_delete_3",
"tests/test_map.py::PyMapTest::test_map_delete_4",
"tests/test_map.py::PyMapTest::test_map_delete_5",
"tests/test_map.py::PyMapTest::test_map_delete_6",
"tests/test_map.py::PyMapTest::test_map_eq_1",
"tests/test_map.py::PyMapTest::test_map_eq_2",
"tests/test_map.py::PyMapTest::test_map_eq_3",
"tests/test_map.py::PyMapTest::test_map_gc_1",
"tests/test_map.py::PyMapTest::test_map_gc_2",
"tests/test_map.py::PyMapTest::test_map_getitem_1",
"tests/test_map.py::PyMapTest::test_map_in_1",
"tests/test_map.py::PyMapTest::test_map_is_subscriptable",
"tests/test_map.py::PyMapTest::test_map_items_1",
"tests/test_map.py::PyMapTest::test_map_items_2",
"tests/test_map.py::PyMapTest::test_map_items_3",
"tests/test_map.py::PyMapTest::test_map_items_4",
"tests/test_map.py::PyMapTest::test_map_keys_1",
"tests/test_map.py::PyMapTest::test_map_keys_2",
"tests/test_map.py::PyMapTest::test_map_mut_1",
"tests/test_map.py::PyMapTest::test_map_mut_10",
"tests/test_map.py::PyMapTest::test_map_mut_11",
"tests/test_map.py::PyMapTest::test_map_mut_12",
"tests/test_map.py::PyMapTest::test_map_mut_13",
"tests/test_map.py::PyMapTest::test_map_mut_14",
"tests/test_map.py::PyMapTest::test_map_mut_15",
"tests/test_map.py::PyMapTest::test_map_mut_16",
"tests/test_map.py::PyMapTest::test_map_mut_17",
"tests/test_map.py::PyMapTest::test_map_mut_18",
"tests/test_map.py::PyMapTest::test_map_mut_19",
"tests/test_map.py::PyMapTest::test_map_mut_2",
"tests/test_map.py::PyMapTest::test_map_mut_20",
"tests/test_map.py::PyMapTest::test_map_mut_21",
"tests/test_map.py::PyMapTest::test_map_mut_4",
"tests/test_map.py::PyMapTest::test_map_mut_5",
"tests/test_map.py::PyMapTest::test_map_mut_6",
"tests/test_map.py::PyMapTest::test_map_mut_7",
"tests/test_map.py::PyMapTest::test_map_mut_8",
"tests/test_map.py::PyMapTest::test_map_mut_9",
"tests/test_map.py::PyMapTest::test_map_mut_stress",
"tests/test_map.py::PyMapTest::test_map_pickle",
"tests/test_map.py::PyMapTest::test_map_stress_01",
"tests/test_map.py::PyMapTest::test_map_stress_02",
"tests/test_map.py::PyMapTest::test_map_values_1",
"tests/test_map.py::PyMapTest::test_map_values_2",
"tests/test_map.py::PyMapTest::test_repr_2",
"tests/test_map.py::CMapTest::test_abc_1",
"tests/test_map.py::CMapTest::test_hash_1",
"tests/test_map.py::CMapTest::test_hash_2",
"tests/test_map.py::CMapTest::test_hashkey_helper_1",
"tests/test_map.py::CMapTest::test_kwarg_named_col",
"tests/test_map.py::CMapTest::test_map_basics_1",
"tests/test_map.py::CMapTest::test_map_basics_2",
"tests/test_map.py::CMapTest::test_map_basics_3",
"tests/test_map.py::CMapTest::test_map_basics_4",
"tests/test_map.py::CMapTest::test_map_collision_1",
"tests/test_map.py::CMapTest::test_map_collision_2",
"tests/test_map.py::CMapTest::test_map_delete_1",
"tests/test_map.py::CMapTest::test_map_delete_2",
"tests/test_map.py::CMapTest::test_map_delete_3",
"tests/test_map.py::CMapTest::test_map_delete_4",
"tests/test_map.py::CMapTest::test_map_delete_5",
"tests/test_map.py::CMapTest::test_map_delete_6",
"tests/test_map.py::CMapTest::test_map_eq_1",
"tests/test_map.py::CMapTest::test_map_eq_2",
"tests/test_map.py::CMapTest::test_map_eq_3",
"tests/test_map.py::CMapTest::test_map_gc_1",
"tests/test_map.py::CMapTest::test_map_gc_2",
"tests/test_map.py::CMapTest::test_map_getitem_1",
"tests/test_map.py::CMapTest::test_map_in_1",
"tests/test_map.py::CMapTest::test_map_is_subscriptable",
"tests/test_map.py::CMapTest::test_map_items_1",
"tests/test_map.py::CMapTest::test_map_items_2",
"tests/test_map.py::CMapTest::test_map_items_3",
"tests/test_map.py::CMapTest::test_map_items_4",
"tests/test_map.py::CMapTest::test_map_keys_1",
"tests/test_map.py::CMapTest::test_map_keys_2",
"tests/test_map.py::CMapTest::test_map_mut_1",
"tests/test_map.py::CMapTest::test_map_mut_10",
"tests/test_map.py::CMapTest::test_map_mut_11",
"tests/test_map.py::CMapTest::test_map_mut_12",
"tests/test_map.py::CMapTest::test_map_mut_13",
"tests/test_map.py::CMapTest::test_map_mut_14",
"tests/test_map.py::CMapTest::test_map_mut_15",
"tests/test_map.py::CMapTest::test_map_mut_16",
"tests/test_map.py::CMapTest::test_map_mut_17",
"tests/test_map.py::CMapTest::test_map_mut_18",
"tests/test_map.py::CMapTest::test_map_mut_19",
"tests/test_map.py::CMapTest::test_map_mut_2",
"tests/test_map.py::CMapTest::test_map_mut_20",
"tests/test_map.py::CMapTest::test_map_mut_21",
"tests/test_map.py::CMapTest::test_map_mut_4",
"tests/test_map.py::CMapTest::test_map_mut_5",
"tests/test_map.py::CMapTest::test_map_mut_6",
"tests/test_map.py::CMapTest::test_map_mut_7",
"tests/test_map.py::CMapTest::test_map_mut_8",
"tests/test_map.py::CMapTest::test_map_mut_9",
"tests/test_map.py::CMapTest::test_map_mut_stress",
"tests/test_map.py::CMapTest::test_map_pickle",
"tests/test_map.py::CMapTest::test_map_stress_01",
"tests/test_map.py::CMapTest::test_map_stress_02",
"tests/test_map.py::CMapTest::test_map_values_1",
"tests/test_map.py::CMapTest::test_map_values_2",
"tests/test_map.py::CMapTest::test_repr_2",
"tests/test_none_keys.py::PyMapNoneTest::test_collision_4",
"tests/test_none_keys.py::PyMapNoneTest::test_iterators",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collision_2",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collision_3",
"tests/test_none_keys.py::PyMapNoneTest::test_none_collisions",
"tests/test_none_keys.py::PyMapNoneTest::test_none_mutation",
"tests/test_none_keys.py::PyMapNoneTest::test_none_set",
"tests/test_none_keys.py::CMapNoneTest::test_collision_4",
"tests/test_none_keys.py::CMapNoneTest::test_iterators",
"tests/test_none_keys.py::CMapNoneTest::test_none_collision_2",
"tests/test_none_keys.py::CMapNoneTest::test_none_collision_3",
"tests/test_none_keys.py::CMapNoneTest::test_none_collisions",
"tests/test_none_keys.py::CMapNoneTest::test_none_mutation",
"tests/test_none_keys.py::CMapNoneTest::test_none_set"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2021-02-08T17:28:05Z" | apache-2.0 |
|
Materials-Consortia__optimade-python-tools-1134 | diff --git a/optimade/filtertransformers/mongo.py b/optimade/filtertransformers/mongo.py
index d808ae8f..beba0fcd 100755
--- a/optimade/filtertransformers/mongo.py
+++ b/optimade/filtertransformers/mongo.py
@@ -141,9 +141,9 @@ class MongoTransformer(BaseTransformer):
def constant_first_comparison(self, arg):
# constant_first_comparison: constant OPERATOR ( non_string_value | not_implemented_string )
- return {
- arg[2]: {self.operator_map[self._reversed_operator_map[arg[1]]]: arg[0]}
- }
+ return self.property_first_comparison(
+ arg[2], {self.operator_map[self._reversed_operator_map[arg[1]]]: arg[0]}
+ )
@v_args(inline=True)
def value_op_rhs(self, operator, value):
| Materials-Consortia/optimade-python-tools | 1408a7c43954ad6159c21ef98dedce4ce22452e4 | diff --git a/optimade/server/data/test_references.json b/optimade/server/data/test_references.json
index 9dd372ad..05cb7932 100644
--- a/optimade/server/data/test_references.json
+++ b/optimade/server/data/test_references.json
@@ -58,5 +58,25 @@
"title": "Dummy reference that should remain orphaned from all structures for testing purposes",
"journal": "JACS",
"doi": "10.1038/00000"
+ },
+ {
+ "_id": {
+ "$oid": "98fb441f053b1744107019e3"
+ },
+ "id": "dummy/2022",
+ "last_modified": {
+ "$date": "2022-01-23T14:24:37.332Z"
+ },
+ "authors": [
+ {
+ "name": "A Nother",
+ "firstname": "A",
+ "lastname": "Nother"
+ }
+ ],
+ "year": "2019",
+ "note": "Dummy reference",
+ "title": "Just another title",
+ "journal": "JACS"
}
]
diff --git a/tests/filtertransformers/test_mongo.py b/tests/filtertransformers/test_mongo.py
index cf40440e..fec8c2c8 100644
--- a/tests/filtertransformers/test_mongo.py
+++ b/tests/filtertransformers/test_mongo.py
@@ -888,3 +888,8 @@ class TestMongoTransformer:
{"number": {"$eq": 0.0}},
]
}
+
+ def test_constant_first_comparisson(self):
+ assert self.transform("nelements != 5") == self.transform("5 != nelements")
+ assert self.transform("nelements > 5") == self.transform("5 < nelements")
+ assert self.transform("nelements <= 5") == self.transform("5 >= nelements")
| Queries with the form: 'value != prop' return entries where 'prop == None'
For the trajectory endpoint, I added a few references without DOI values.
A query like [https://optimade-trajectories.herokuapp.com/v1/references?filter=%2210.1038/00000%22!=doi](https://optimade-trajectories.herokuapp.com/v1/references?filter=%2210.1038/00000%22!=doi) returned 4 entries, while the query
[https://optimade-trajectories.herokuapp.com/v1/references?filter=doi!=%2210.1038/00000%22](https://optimade-trajectories.herokuapp.com/v1/references?filter=doi!=%2210.1038/00000%22) returns 2 entries
This also applies to other reference properties like journal and year. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_constant_first_comparisson"
] | [
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_empty",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_property_names",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_provider_property_name",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_nested_property_names",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_string_values",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_number_values",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_simple_comparisons",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_id",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_operators",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_filtering_on_relationships",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_other_provider_fields",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_not_implemented",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_list_length_aliases",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_suspected_timestamp_fields",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_unaliased_length_operator",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_mongo_special_id",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_aliased_length_operator",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_aliases",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_list_properties",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_known_properties",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_precedence",
"tests/filtertransformers/test_mongo.py::TestMongoTransformer::test_special_cases"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-04-29T12:04:01Z" | mit |
|
Materials-Consortia__optimade-python-tools-903 | diff --git a/openapi/index_openapi.json b/openapi/index_openapi.json
index 50a372d8..d540e638 100644
--- a/openapi/index_openapi.json
+++ b/openapi/index_openapi.json
@@ -433,7 +433,7 @@
"description": {
"title": "Description",
"type": "string",
- "description": "OPTIONAL human-readable description of the relationship"
+ "description": "OPTIONAL human-readable description of the relationship."
}
},
"description": "Specific meta field for base relationship resource"
@@ -684,7 +684,7 @@
"$ref": "#/components/schemas/ResponseMeta"
}
],
- "description": "A meta object containing non-standard information"
+ "description": "A meta object containing non-standard information."
},
"errors": {
"title": "Errors",
@@ -958,7 +958,7 @@
"$ref": "#/components/schemas/IndexInfoResource"
}
],
- "description": "Index meta-database /info data"
+ "description": "Index meta-database /info data."
},
"meta": {
"title": "Meta",
@@ -1248,7 +1248,7 @@
}
}
],
- "description": "List of unique OPTIMADE links resource objects"
+ "description": "List of unique OPTIMADE links resource objects."
},
"meta": {
"title": "Meta",
@@ -1457,7 +1457,7 @@
"description": "a meta object that contains non-standard meta-information about the relationship."
}
},
- "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource"
+ "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource."
},
"RelatedLinksResource": {
"title": "RelatedLinksResource",
@@ -1757,7 +1757,7 @@
"description": "a meta object that contains non-standard meta-information about the relationship."
}
},
- "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource"
+ "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource."
},
"ToplevelLinks": {
"title": "ToplevelLinks",
diff --git a/openapi/openapi.json b/openapi/openapi.json
index 4984ea39..bcdf4aff 100644
--- a/openapi/openapi.json
+++ b/openapi/openapi.json
@@ -1528,7 +1528,7 @@
"description": {
"title": "Description",
"type": "string",
- "description": "OPTIONAL human-readable description of the relationship"
+ "description": "OPTIONAL human-readable description of the relationship."
}
},
"description": "Specific meta field for base relationship resource"
@@ -1669,7 +1669,7 @@
"$ref": "#/components/schemas/EntryInfoResource"
}
],
- "description": "OPTIMADE information for an entry endpoint"
+ "description": "OPTIMADE information for an entry endpoint."
},
"meta": {
"title": "Meta",
@@ -1935,7 +1935,7 @@
"$ref": "#/components/schemas/ResponseMeta"
}
],
- "description": "A meta object containing non-standard information"
+ "description": "A meta object containing non-standard information."
},
"errors": {
"title": "Errors",
@@ -2095,7 +2095,7 @@
"$ref": "#/components/schemas/BaseInfoResource"
}
],
- "description": "The implementations /info data"
+ "description": "The implementations /info data."
},
"meta": {
"title": "Meta",
@@ -2366,7 +2366,7 @@
}
}
],
- "description": "List of unique OPTIMADE links resource objects"
+ "description": "List of unique OPTIMADE links resource objects."
},
"meta": {
"title": "Meta",
@@ -2609,7 +2609,7 @@
"description": "a meta object that contains non-standard meta-information about the relationship."
}
},
- "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource"
+ "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource."
},
"ReferenceResource": {
"title": "ReferenceResource",
@@ -2849,7 +2849,7 @@
}
}
],
- "description": "List of unique OPTIMADE references entry resource objects"
+ "description": "List of unique OPTIMADE references entry resource objects."
},
"meta": {
"title": "Meta",
@@ -2926,7 +2926,7 @@
"type": "object"
}
],
- "description": "A single references entry resource"
+ "description": "A single references entry resource."
},
"meta": {
"title": "Meta",
@@ -3334,7 +3334,7 @@
"description": "a meta object that contains non-standard meta-information about the relationship."
}
},
- "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource"
+ "description": "Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource."
},
"StructureResource": {
"title": "StructureResource",
@@ -3587,7 +3587,7 @@
}
}
],
- "description": "List of unique OPTIMADE structures entry resource objects"
+ "description": "List of unique OPTIMADE structures entry resource objects."
},
"meta": {
"title": "Meta",
@@ -3664,7 +3664,7 @@
"type": "object"
}
],
- "description": "A single structures entry resource"
+ "description": "A single structures entry resource."
},
"meta": {
"title": "Meta",
diff --git a/optimade/models/jsonapi.py b/optimade/models/jsonapi.py
index 7938dffb..f6724e01 100644
--- a/optimade/models/jsonapi.py
+++ b/optimade/models/jsonapi.py
@@ -345,10 +345,12 @@ class Response(BaseModel):
@root_validator(pre=True)
def either_data_meta_or_errors_must_be_set(cls, values):
required_fields = ("data", "meta", "errors")
- if not any(values.get(field) for field in required_fields):
+ if not any(field in values for field in required_fields):
raise ValueError(
f"At least one of {required_fields} MUST be specified in the top-level response"
)
+ if "errors" in values and not values.get("errors"):
+ raise ValueError("Errors MUST NOT be an empty or 'null' value.")
return values
class Config:
diff --git a/optimade/models/optimade_json.py b/optimade/models/optimade_json.py
index 26a73999..92757fc7 100644
--- a/optimade/models/optimade_json.py
+++ b/optimade/models/optimade_json.py
@@ -340,16 +340,16 @@ class Success(jsonapi.Response):
@root_validator(pre=True)
def either_data_meta_or_errors_must_be_set(cls, values):
- """Overwriting the existing validation function, since 'errors' MUST NOT be set"""
+ """Overwriting the existing validation function, since 'errors' MUST NOT be set."""
required_fields = ("data", "meta")
- if not any(values.get(field) for field in required_fields):
+ if not any(field in values for field in required_fields):
raise ValueError(
- f"At least one of {required_fields} MUST be specified in the top-level response"
+ f"At least one of {required_fields} MUST be specified in the top-level response."
)
# errors MUST be skipped
- if values.get("errors", None) is not None:
- raise ValueError("'errors' MUST be skipped for a successful response")
+ if "errors" in values:
+ raise ValueError("'errors' MUST be skipped for a successful response.")
return values
@@ -358,7 +358,7 @@ class BaseRelationshipMeta(jsonapi.Meta):
"""Specific meta field for base relationship resource"""
description: str = StrictField(
- ..., description="OPTIONAL human-readable description of the relationship"
+ ..., description="OPTIONAL human-readable description of the relationship."
)
@@ -372,7 +372,7 @@ class BaseRelationshipResource(jsonapi.BaseResource):
class Relationship(jsonapi.Relationship):
- """Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource"""
+ """Similar to normal JSON API relationship, but with addition of OPTIONAL meta field for a resource."""
data: Optional[
Union[BaseRelationshipResource, List[BaseRelationshipResource]]
diff --git a/optimade/models/responses.py b/optimade/models/responses.py
index 6662fa26..c9959650 100644
--- a/optimade/models/responses.py
+++ b/optimade/models/responses.py
@@ -33,7 +33,7 @@ class ErrorResponse(Response):
"""errors MUST be present and data MUST be skipped"""
meta: ResponseMeta = StrictField(
- ..., description="A meta object containing non-standard information"
+ ..., description="A meta object containing non-standard information."
)
errors: List[OptimadeError] = StrictField(
...,
@@ -43,26 +43,26 @@ class ErrorResponse(Response):
@root_validator(pre=True)
def data_must_be_skipped(cls, values):
- if values.get("data", None) is not None:
- raise ValueError("data MUST be skipped for failures reporting errors")
+ if "data" in values:
+ raise ValueError("data MUST be skipped for failures reporting errors.")
return values
class IndexInfoResponse(Success):
data: IndexInfoResource = StrictField(
- ..., description="Index meta-database /info data"
+ ..., description="Index meta-database /info data."
)
class EntryInfoResponse(Success):
data: EntryInfoResource = StrictField(
- ..., description="OPTIMADE information for an entry endpoint"
+ ..., description="OPTIMADE information for an entry endpoint."
)
class InfoResponse(Success):
data: BaseInfoResource = StrictField(
- ..., description="The implementations /info data"
+ ..., description="The implementations /info data."
)
@@ -85,34 +85,34 @@ class EntryResponseMany(Success):
class LinksResponse(EntryResponseMany):
data: Union[List[LinksResource], List[Dict[str, Any]]] = StrictField(
...,
- description="List of unique OPTIMADE links resource objects",
+ description="List of unique OPTIMADE links resource objects.",
uniqueItems=True,
)
class StructureResponseOne(EntryResponseOne):
data: Union[StructureResource, Dict[str, Any], None] = StrictField(
- ..., description="A single structures entry resource"
+ ..., description="A single structures entry resource."
)
class StructureResponseMany(EntryResponseMany):
data: Union[List[StructureResource], List[Dict[str, Any]]] = StrictField(
...,
- description="List of unique OPTIMADE structures entry resource objects",
+ description="List of unique OPTIMADE structures entry resource objects.",
uniqueItems=True,
)
class ReferenceResponseOne(EntryResponseOne):
data: Union[ReferenceResource, Dict[str, Any], None] = StrictField(
- ..., description="A single references entry resource"
+ ..., description="A single references entry resource."
)
class ReferenceResponseMany(EntryResponseMany):
data: Union[List[ReferenceResource], List[Dict[str, Any]]] = StrictField(
...,
- description="List of unique OPTIMADE references entry resource objects",
+ description="List of unique OPTIMADE references entry resource objects.",
uniqueItems=True,
)
| Materials-Consortia/optimade-python-tools | 1d8fc8e29a21e78ecb933e97315fbe72b653bdec | diff --git a/tests/models/test_jsonapi.py b/tests/models/test_jsonapi.py
index 3d684666..9cb66d2e 100644
--- a/tests/models/test_jsonapi.py
+++ b/tests/models/test_jsonapi.py
@@ -1,9 +1,12 @@
from pydantic import ValidationError
import pytest
-from optimade.models.jsonapi import Error, ToplevelLinks
def test_hashability():
+ """Check a list of errors can be converted to a set,
+ i.e., check that Errors can be hashed."""
+ from optimade.models.jsonapi import Error
+
error = Error(id="test")
assert set([error])
@@ -14,6 +17,8 @@ def test_toplevel_links():
can be validated as a URL or a Links object too.
"""
+ from optimade.models.jsonapi import ToplevelLinks
+
test_links = {
"first": {"href": "http://example.org/structures?page_limit=3&page_offset=0"},
"last": {"href": "http://example.org/structures?page_limit=3&page_offset=10"},
@@ -55,3 +60,22 @@ def test_toplevel_links():
with pytest.raises(ValidationError):
ToplevelLinks(**{"base_url": {"href": "not a link"}})
+
+
+def test_response_top_level():
+ """Ensure a response with "null" values can be created."""
+ from optimade.models.jsonapi import Response
+
+ assert isinstance(Response(data=[]), Response)
+ assert isinstance(Response(data=None), Response)
+ assert isinstance(Response(meta={}), Response)
+ assert isinstance(Response(meta=None), Response)
+
+ # "errors" MUST NOT be an empty or `null` value if given.
+ with pytest.raises(ValidationError, match=r"Errors MUST NOT be an empty.*"):
+ assert isinstance(Response(errors=[]), Response)
+ with pytest.raises(ValidationError, match=r"Errors MUST NOT be an empty.*"):
+ assert isinstance(Response(errors=None), Response)
+
+ with pytest.raises(ValidationError, match=r"At least one of .*"):
+ Response(links={})
| Too strict validator for top-level fields
I think the root validator `either_data_meta_or_errors_must_be_set()` for `Response` is a bit too strict? While either of those MUST be set, they should be able to have a "null" value, either an empty dict, list or similar or `None`.
So basically the validator should be changed to just check for the existence of the top-level names as keys in the passed dict `values`, and not check their values - or did I misunderstand something here? Do they _need_ to have none-"null" content as well? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/models/test_jsonapi.py::test_response_top_level"
] | [
"tests/models/test_jsonapi.py::test_hashability",
"tests/models/test_jsonapi.py::test_toplevel_links"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2021-08-10T08:06:33Z" | mit |
|
MatterMiners__cobald-46 | diff --git a/src/cobald/controller/stepwise.py b/src/cobald/controller/stepwise.py
index 3b4241a..1e79f09 100644
--- a/src/cobald/controller/stepwise.py
+++ b/src/cobald/controller/stepwise.py
@@ -187,7 +187,7 @@ class UnboundStepwise(object):
:note: The partial rules are sealed, and :py:meth:`~.UnboundStepwise.add`
cannot be called on it.
"""
- return Partial(Stepwise, self.base, *self.rules, *args, **kwargs)
+ return Partial(Stepwise, self.base, *self.rules, *args, __leaf__=True, **kwargs)
def __call__(self, target: Pool, interval: float = None):
if interval is None:
diff --git a/src/cobald/interfaces/_controller.py b/src/cobald/interfaces/_controller.py
index 8dcd617..e9da6c3 100644
--- a/src/cobald/interfaces/_controller.py
+++ b/src/cobald/interfaces/_controller.py
@@ -28,4 +28,4 @@ class Controller(metaclass=abc.ABCMeta):
pipeline = controller(rate=10) >> pool
"""
- return Partial(cls, *args, **kwargs)
+ return Partial(cls, *args, __leaf__=False, **kwargs)
diff --git a/src/cobald/interfaces/_partial.py b/src/cobald/interfaces/_partial.py
index 9eaab6d..7057ba9 100644
--- a/src/cobald/interfaces/_partial.py
+++ b/src/cobald/interfaces/_partial.py
@@ -1,11 +1,12 @@
from inspect import Signature, BoundArguments
-from typing import Type, Generic, TypeVar, Tuple, Dict, TYPE_CHECKING, Union, overload
+from typing import Type, Generic, TypeVar, TYPE_CHECKING, Union, overload
-from ._pool import Pool
+from . import _pool
if TYPE_CHECKING:
from ._controller import Controller
from ._proxy import PoolDecorator
+ from ._pool import Pool
Owner = Union[Controller, PoolDecorator]
C_co = TypeVar('C_co', bound=Owner)
else:
@@ -31,20 +32,24 @@ class Partial(Generic[C_co]):
# apply target by chaining
pipeline = control >> Decorator() >> Pool()
+ :note: The keyword argument ``__leaf__`` is reserved for internal usage.
+
:note: Binding :py:class:`~.Controller`\ s and :py:class:`~.Decorator`\ s
creates a temporary :py:class:`~.PartialBind`. Only binding to a
:py:class:`~.Pool` as the last element creates a concrete binding.
"""
- __slots__ = ('ctor', 'args', 'kwargs')
+ __slots__ = ('ctor', 'args', 'kwargs', 'leaf')
- def __init__(self, ctor: Type[C_co], *args, **kwargs):
+ def __init__(self, ctor: Type[C_co], *args, __leaf__, **kwargs):
self.ctor = ctor
self.args = args
self.kwargs = kwargs
- self._check_signature(args, kwargs)
+ self.leaf = __leaf__
+ self._check_signature()
- def _check_signature(self, args: Tuple, kwargs: Dict):
- if 'target' in kwargs or (args and isinstance(args[0], Pool)):
+ def _check_signature(self):
+ args, kwargs = self.args, self.kwargs
+ if 'target' in kwargs or (args and isinstance(args[0], _pool.Pool)):
raise TypeError(
"%s[%s] cannot bind 'target' by calling. "
"Use `this >> target` instead." % (
@@ -52,8 +57,10 @@ class Partial(Generic[C_co]):
)
)
try:
+ if not self.leaf:
+ args = None, *args
_ = Signature.from_callable(self.ctor).bind_partial(
- None, *args, **kwargs
+ *args, **kwargs
) # type: BoundArguments
except TypeError as err:
message = err.args[0]
@@ -62,21 +69,38 @@ class Partial(Generic[C_co]):
) from err
def __call__(self, *args, **kwargs) -> 'Partial[C_co]':
- return Partial(self.ctor, *self.args, *args, **self.kwargs, **kwargs)
+ return Partial(
+ self.ctor,
+ *self.args, *args,
+ __leaf__=self.leaf,
+ **self.kwargs, **kwargs
+ )
+
+ def __construct__(self, *args, **kwargs):
+ return self.ctor(*args, *self.args, **kwargs, **self.kwargs)
@overload # noqa: F811
- def __rshift__(self, other: 'Union[Partial, PartialBind]') -> 'PartialBind[C_co]':
+ def __rshift__(self, other: 'Union[Owner, Pool, PartialBind[Pool]]') -> 'C_co':
...
@overload # noqa: F811
- def __rshift__(self, other: 'Union[Owner, Pool]') -> 'C_co':
+ def __rshift__(self, other: 'Union[Partial, PartialBind]') -> 'PartialBind[C_co]':
...
def __rshift__(self, other): # noqa: F811
- if isinstance(other, (Partial, PartialBind)):
+ if isinstance(other, PartialBind):
+ return PartialBind(self, other.parent, *other.targets)
+ elif isinstance(other, Partial):
+ if other.leaf:
+ return self >> other.__construct__()
return PartialBind(self, other)
else:
- return self.ctor(other, *self.args, **self.kwargs)
+ return self.__construct__(other)
+
+ def __repr__(self):
+ return '{self.__class__.__name__}(ctor={self.ctor.__name__}'.format(self=self)\
+ + ', args={self.args}, kwargs={self.kwargs}'.format(self=self) \
+ + ', leaf={self.leaf})'.format(self=self)
class PartialBind(Generic[C_co]):
@@ -89,7 +113,11 @@ class PartialBind(Generic[C_co]):
"""
__slots__ = ('parent', 'targets')
- def __init__(self, parent: Partial[C_co], *targets: Partial[Owner]):
+ def __init__(
+ self,
+ parent: Partial[C_co],
+ *targets: 'Union[Partial[Owner], PartialBind[Owner]]'
+ ):
self.parent = parent
self.targets = targets
@@ -98,14 +126,16 @@ class PartialBind(Generic[C_co]):
...
@overload # noqa: F811
- def __rshift__(self, other: Pool) -> 'C_co':
+ def __rshift__(self, other: 'Pool') -> 'C_co':
...
- def __rshift__(self, other: Union[Pool, Partial[Owner]]): # noqa: F811
- if isinstance(other, Pool):
+ def __rshift__(self, other: 'Union[Pool, Partial[Owner]]'): # noqa: F811
+ if isinstance(other, _pool.Pool):
pool = self.targets[-1] >> other
for owner in reversed(self.targets[:-1]):
pool = owner >> pool
return self.parent >> pool
+ elif isinstance(other, Partial) and other.leaf:
+ return self >> other.__construct__()
else:
return PartialBind(self.parent, *self.targets, other)
diff --git a/src/cobald/interfaces/_pool.py b/src/cobald/interfaces/_pool.py
index 4f957d3..e6612de 100644
--- a/src/cobald/interfaces/_pool.py
+++ b/src/cobald/interfaces/_pool.py
@@ -1,4 +1,10 @@
import abc
+from typing import TypeVar, Type
+
+from ._partial import Partial
+
+
+C = TypeVar('C', bound='Controller')
class Pool(metaclass=abc.ABCMeta):
@@ -33,3 +39,16 @@ class Pool(metaclass=abc.ABCMeta):
def allocation(self) -> float:
"""Fraction of the provided resources which are assigned for usage"""
raise NotImplementedError
+
+ @classmethod
+ def s(cls: Type[C], *args, **kwargs) -> Partial[C]:
+ """
+ Create an unbound prototype of this class, partially applying arguments
+
+ .. code:: python
+
+ pool = RemotePool.s(port=1337)
+
+ pipeline = controller >> pool(host='localhost')
+ """
+ return Partial(cls, *args, __leaf__=True, **kwargs)
diff --git a/src/cobald/interfaces/_proxy.py b/src/cobald/interfaces/_proxy.py
index 5b81d8a..64735e5 100644
--- a/src/cobald/interfaces/_proxy.py
+++ b/src/cobald/interfaces/_proxy.py
@@ -28,7 +28,7 @@ class PoolDecorator(Pool):
pipeline = controller >> decorator >> pool
"""
- return Partial(cls, *args, **kwargs)
+ return Partial(cls, *args, __leaf__=False, **kwargs)
@property
def supply(self):
| MatterMiners/cobald | 79788e7e172089b88ecce4ffab7bbda27fcccb55 | diff --git a/cobald_tests/interfaces/test_partial.py b/cobald_tests/interfaces/test_partial.py
index e8d42bf..d5eb642 100644
--- a/cobald_tests/interfaces/test_partial.py
+++ b/cobald_tests/interfaces/test_partial.py
@@ -45,6 +45,32 @@ class TestPartial(object):
pipeline = partial_control >> FullMockPool()
assert isinstance(pipeline, MockController)
+ def test_pool_curry_bind(self):
+ """Curry and bind the last element of a pipeline"""
+ partial_pool = FullMockPool.s()
+ assert isinstance(partial_pool, Partial)
+ partial_pool = partial_pool(demand=10)
+ assert isinstance(partial_pool, Partial)
+ partial_control = MockController.s()
+ pipeline = partial_control >> partial_pool
+ assert isinstance(pipeline, MockController)
+ assert isinstance(pipeline.target, FullMockPool)
+ assert pipeline.target.demand == 10
+
+ def test_pool_recursive(self):
+ """Curry and bind the last element of a long pipeline"""
+ partial_pool = FullMockPool.s(demand=10)
+ for _ in range(3):
+ partial_pool = partial_pool()
+ assert isinstance(partial_pool, Partial)
+ pipeline = MockController.s() >> MockDecorator.s() >> MockDecorator.s()\
+ >> partial_pool
+ assert isinstance(pipeline, MockController)
+ assert isinstance(pipeline.target, MockDecorator)
+ assert isinstance(pipeline.target.target, MockDecorator)
+ assert isinstance(pipeline.target.target.target, FullMockPool)
+ assert pipeline.target.target.target.demand == 10
+
def test_signature_check(self):
class ArgController(Controller):
def __init__(self, target, a, b, c=3, *, kwa=2, kwb=3):
| Allow `.s` syntax for pools as well
Controller and decorator types both have a ``.s`` classmethod to allow binding them with ``>>``. Pools do not, as they are always concrete and mark the end of a binding chain.
```python3
LinearController.s() >> Pool()
```
The use of ``.s`` or not might be confusing without understanding how this work. For usability, it might be advantageous to have a dummy ``.s`` method for pools as well.
```python3
LinearController.s() >> Pool.s()
```
This might also be advantageous for "templating" Pools (as also intended for the others):
```python3
DemoPool = MyDronePool.s(collector="cobald.demo.edu")
LinearController.s() >> DemoPool(size=8)
LinearController.s() >> DemoPool(size=4)
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cobald_tests/interfaces/test_partial.py::TestPartial::test_pool_curry_bind",
"cobald_tests/interfaces/test_partial.py::TestPartial::test_pool_recursive"
] | [
"cobald_tests/interfaces/test_partial.py::TestPartial::test_bind",
"cobald_tests/interfaces/test_partial.py::TestPartial::test_recursive_bind",
"cobald_tests/interfaces/test_partial.py::TestPartial::test_recursive_curry",
"cobald_tests/interfaces/test_partial.py::TestPartial::test_signature_check"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2019-08-01T15:23:19Z" | mit |
|
MatterMiners__tardis-125 | diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 43ee6bb..66bed9d 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2020-01-16, command
+.. Created by changelog.py at 2020-01-17, command
'/Users/giffler/.cache/pre-commit/repont7o94ca/py_env-default/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -6,7 +6,7 @@
CHANGELOG
#########
-[Unreleased] - 2020-01-16
+[Unreleased] - 2020-01-17
=========================
Added
@@ -17,6 +17,7 @@ Added
* Add support for COBalD legacy object initialisation
* The machine name has been added as a default tag in the telegraf monitoring plugin, can be overwritten.
* An optional and per site configurable drone minimum lifetime has been added
+* Add the possibility to use an unified `COBalD` and `TARDIS` configuration
Fixed
-----
diff --git a/docs/source/changes/125.add_unified_configuration.yaml b/docs/source/changes/125.add_unified_configuration.yaml
new file mode 100644
index 0000000..74847ac
--- /dev/null
+++ b/docs/source/changes/125.add_unified_configuration.yaml
@@ -0,0 +1,6 @@
+category: added
+summary: Add the possibility to use an unified `COBalD` and `TARDIS` configuration
+pull requests:
+ - 125
+description: |
+ The possibility to combine the `COBalD` and the `TARDIS` configuration in one single `yaml` has been added.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index dd507f0..ba69e33 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -141,6 +141,75 @@ Configuration of TARDIS
Memory: 16
Disk: 160
+Unified Configuration
+=====================
+
+.. content-tabs:: left-col
+
+ Alternatively a unified ``COBalD`` and ``TARDIS`` configuration can be used. In this case, the ``TARDIS``
+ part of the configuration is represented by a ``tardis`` MappingNode.
+
+ .. warning::
+ In case of the unified configuration you can currently not use the yaml tag ``!TardisPoolFactory`` to initialize
+ the pool factory, please use the `COBalD` legacy object initialisation
+ ``__type__: tardis.resources.poolfactory.create_composite_pool`` instead!
+
+.. content-tabs:: right-col
+
+ .. rubric:: Example configuration
+ .. code-block:: yaml
+
+ pipeline:
+ # Makes decision to add remove resources based utilisation and allocation
+ - !LinearController
+ low_utilisation: 0.90
+ high_allocation: 0.90
+ rate: 1
+ # Limits the demand for a resource
+ - !Limiter
+ minimum: 1
+ # Log changes
+ - !Logger
+ name: 'changes'
+ # Factory function to create composite resource pool
+ - __type__: tardis.resources.poolfactory.create_composite_pool
+ tardis:
+ Plugins:
+ SqliteRegistry:
+ db_file: drone_registry.db
+
+ BatchSystem:
+ adapter: FakeBatchSystem
+ allocation: 1.0
+ utilization: !PeriodicValue
+ period: 3600
+ amplitude: 0.5
+ offset: 0.5
+ phase: 0.
+ machine_status: Available
+
+ Sites:
+ - name: Fake
+ adapter: FakeSite
+ quota: 8000 # CPU core quota
+
+ Fake:
+ api_response_delay: !RandomGauss
+ mu: 0.1
+ sigma: 0.01
+ resource_boot_time: !RandomGauss
+ mu: 60
+ sigma: 10
+ MachineTypes:
+ - m1.infinity
+ MachineTypeConfiguration:
+ m1.infinity:
+ MachineMetaData:
+ m1.infinity:
+ Cores: 8
+ Memory: 16
+ Disk: 160
+
Start-up your instance
======================
diff --git a/setup.py b/setup.py
index c96442b..b4005f5 100644
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,10 @@ setup(
entry_points={
"cobald.config.yaml_constructors": [
"TardisPoolFactory = tardis.resources.poolfactory:create_composite_pool"
- ]
+ ],
+ "cobald.config.sections": [
+ "tardis = tardis.configuration.configuration:Configuration"
+ ],
},
keywords=package_about["__keywords__"],
packages=find_packages(exclude=["tests"]),
diff --git a/tardis/configuration/configuration.py b/tardis/configuration/configuration.py
index f5ea52d..24abfd6 100644
--- a/tardis/configuration/configuration.py
+++ b/tardis/configuration/configuration.py
@@ -33,10 +33,13 @@ def translate_config(obj):
class Configuration(Borg):
_shared_state = AttributeDict()
- def __init__(self, config_file: str = None):
+ def __init__(self, configuration: [str, dict] = None):
super(Configuration, self).__init__()
- if config_file:
- self.load_config(config_file)
+ if configuration:
+ if isinstance(configuration, str): # interpret string as file name
+ self.load_config(configuration)
+ else:
+ self.update_config(configuration)
def load_config(self, config_file: str) -> None:
"""
@@ -45,6 +48,14 @@ class Configuration(Borg):
:type config_file: str
"""
with open(config_file, "r") as config_file:
- self._shared_state.update(
- translate_config(convert_to_attribute_dict(yaml.safe_load(config_file)))
- )
+ self.update_config(yaml.safe_load(config_file))
+
+ def update_config(self, configuration: dict):
+ """
+ Updates the shared state of the configuration borg
+ :param configuration: Dictionary containing the configuration
+ :type configuration: dict
+ """
+ self._shared_state.update(
+ translate_config(convert_to_attribute_dict(configuration))
+ )
diff --git a/tardis/resources/poolfactory.py b/tardis/resources/poolfactory.py
index a80a379..6b2aa2a 100644
--- a/tardis/resources/poolfactory.py
+++ b/tardis/resources/poolfactory.py
@@ -27,7 +27,7 @@ def str_to_state(resources):
return resources
-def create_composite_pool(configuration: str = "tardis.yml") -> WeightedComposite:
+def create_composite_pool(configuration: str = None) -> WeightedComposite:
configuration = Configuration(configuration)
composites = []
| MatterMiners/tardis | 2f569a2b421c08883f78e2ed17e53f40812e2231 | diff --git a/tests/configuration_t/OpenStack.yml b/tests/configuration_t/OpenStack.yml
new file mode 100644
index 0000000..61c323d
--- /dev/null
+++ b/tests/configuration_t/OpenStack.yml
@@ -0,0 +1,3 @@
+OpenStack:
+ api_key: qwertzuiop
+ api_secret: katze123
diff --git a/tests/configuration_t/test_configuration.py b/tests/configuration_t/test_configuration.py
index 81d1155..00a6c49 100644
--- a/tests/configuration_t/test_configuration.py
+++ b/tests/configuration_t/test_configuration.py
@@ -1,8 +1,10 @@
from tardis.configuration.configuration import Configuration
from tardis.utilities.executors.sshexecutor import SSHExecutor
+from tardis.utilities.attributedict import AttributeDict
from unittest import TestCase
import os
+import yaml
class TestConfiguration(TestCase):
@@ -36,6 +38,15 @@ class TestConfiguration(TestCase):
{"api_key": "asdfghjkl", "api_secret": "qwertzuiop"},
)
+ def test_update_configuration(self):
+ with open(os.path.join(self.test_path, "OpenStack.yml"), "r") as config_file:
+ config_file_content = yaml.safe_load(config_file)
+ self.configuration1 = Configuration(config_file_content)
+ self.assertEqual(
+ self.configuration1.OpenStack,
+ AttributeDict(api_key="qwertzuiop", api_secret="katze123"),
+ )
+
def test_translate_config(self):
b64_result = (
b"I2Nsb3VkLWNvbmZpZwoKd3JpdGVfZmlsZXM6CiAgLSBwYXRoOiAvZXRjL2huc2NpY2xvdWQvc2l0ZS1pZC5jZmcKICAgIGNvbn",
| Unified COBalD/TARDIS configuration
End user configuration would be easier if we had one *single* file used by both TARDIS and COBalD. We should embed one in the other or have a unified configuration that has both side-by-side. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/configuration_t/test_configuration.py::TestConfiguration::test_update_configuration"
] | [
"tests/configuration_t/test_configuration.py::TestConfiguration::test_access_missing_attribute",
"tests/configuration_t/test_configuration.py::TestConfiguration::test_configuration_instances",
"tests/configuration_t/test_configuration.py::TestConfiguration::test_load_configuration",
"tests/configuration_t/test_configuration.py::TestConfiguration::test_shared_state",
"tests/configuration_t/test_configuration.py::TestConfiguration::test_translate_config"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-01-17T15:21:50Z" | mit |
|
MatterMiners__tardis-145 | diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index d22bc81..fc03530 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2020-03-19, command
+.. Created by changelog.py at 2020-04-28, command
'/Users/giffler/.cache/pre-commit/repont7o94ca/py_env-python3.7/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -32,9 +32,14 @@ Fixed
* Fix state transitions for jobs retried by HTCondor
* Fix state transitions and refactoring of the SLURM site adapter
-[Unreleased] - 2020-03-19
+[Unreleased] - 2020-04-28
=========================
+Added
+-----
+
+* Add ssh connection sharing to `SSHExecutor` in order to re-use existing connection
+
Changed
-------
diff --git a/docs/source/changes/145.add_ssh_connection_sharing.yaml b/docs/source/changes/145.add_ssh_connection_sharing.yaml
new file mode 100644
index 0000000..e556827
--- /dev/null
+++ b/docs/source/changes/145.add_ssh_connection_sharing.yaml
@@ -0,0 +1,9 @@
+category: added
+summary: "Add ssh connection sharing to `SSHExecutor` in order to re-use existing connection"
+pull requests:
+ - 145
+issues:
+ - 135
+description: |
+ The `SSHExector` is now re-using existing connections. Closed connections are automatically reestablished. This will
+ avoid connection problems when bothering a remote ssh server with too many requests in too short intervals.
diff --git a/docs/source/executors/executors.rst b/docs/source/executors/executors.rst
index f0a1398..6169f8d 100644
--- a/docs/source/executors/executors.rst
+++ b/docs/source/executors/executors.rst
@@ -41,9 +41,10 @@ SSH Executor
.. content-tabs:: left-col
- The ssh executor is used to asynchronously execute shell commands remotely via ssh. All parameters specified in the
- configuration are directly passed as keyword arguments to `asyncssh` `connect` call. You can find all available
- parameters in the `asyncssh documentation`_
+ The ssh executor is used to asynchronously execute shell commands remotely via ssh. The actual ssh connection to
+ the host is preserved, recycled and automatically reestablished. All parameters specified in the configuration are
+ directly passed as keyword arguments to `asyncssh` `connect` call. You can find all available parameters in the
+ `asyncssh documentation`_
.. _asyncssh documentation: https://asyncssh.readthedocs.io/en/latest/api.html#connect
diff --git a/tardis/utilities/executors/sshexecutor.py b/tardis/utilities/executors/sshexecutor.py
index 7f79943..3003b0e 100644
--- a/tardis/utilities/executors/sshexecutor.py
+++ b/tardis/utilities/executors/sshexecutor.py
@@ -3,6 +3,7 @@ from ...exceptions.executorexceptions import CommandExecutionFailure
from ...interfaces.executor import Executor
from ..attributedict import AttributeDict
+import asyncio
import asyncssh
@@ -10,13 +11,45 @@ import asyncssh
class SSHExecutor(Executor):
def __init__(self, **parameters):
self._parameters = parameters
+ self._ssh_connection = None
+ self._lock = None
+
+ async def _establish_connection(self):
+ for retry in range(1, 10):
+ try:
+ return await asyncssh.connect(**self._parameters)
+ except (
+ ConnectionResetError,
+ asyncssh.DisconnectError,
+ asyncssh.ConnectionLost,
+ BrokenPipeError,
+ ):
+ await asyncio.sleep(retry * 10)
+ return await asyncssh.connect(**self._parameters)
+
+ @property
+ async def ssh_connection(self):
+ if self._ssh_connection is None:
+ async with self.lock:
+ # check that connection has not yet been initialize in a different task
+ while self._ssh_connection is None:
+ self._ssh_connection = await self._establish_connection()
+ return self._ssh_connection
+
+ @property
+ def lock(self):
+ # Create lock once tardis event loop is running.
+ # To avoid got Future <Future pending> attached to a different loop exception
+ if self._lock is None:
+ self._lock = asyncio.Lock()
+ return self._lock
async def run_command(self, command, stdin_input=None):
+ ssh_connection = await self.ssh_connection
try:
- async with asyncssh.connect(**self._parameters) as conn:
- response = await conn.run(
- command, check=True, input=stdin_input and stdin_input.encode()
- )
+ response = await ssh_connection.run(
+ command, check=True, input=stdin_input and stdin_input.encode()
+ )
except asyncssh.ProcessError as pe:
raise CommandExecutionFailure(
message=f"Run command {command} via SSHExecutor failed",
@@ -25,18 +58,15 @@ class SSHExecutor(Executor):
stdout=pe.stdout,
stderr=pe.stderr,
) from pe
- except (
- ConnectionResetError,
- asyncssh.misc.DisconnectError,
- asyncssh.misc.ConnectionLost,
- BrokenPipeError,
- ) as ce:
+ except asyncssh.ChannelOpenError as coe:
+ # Broken connection will be replaced by a new connection during next call
+ self._ssh_connection = None
raise CommandExecutionFailure(
- message=f"Could not run command {command} due to SSH failure: {ce}",
+ message=f"Could not run command {command} due to SSH failure: {coe}",
exit_code=255,
stdout="",
- stderr="SSH failure",
- ) from ce
+ stderr="SSH Broken Connection",
+ ) from coe
else:
return AttributeDict(
stdout=response.stdout,
| MatterMiners/tardis | d1a436ebdb48d7f415b9a26526d8f5e0f68e564a | diff --git a/tests/utilities_t/executors_t/test_sshexecutor.py b/tests/utilities_t/executors_t/test_sshexecutor.py
index d06fcd6..37a4476 100644
--- a/tests/utilities_t/executors_t/test_sshexecutor.py
+++ b/tests/utilities_t/executors_t/test_sshexecutor.py
@@ -1,55 +1,42 @@
-from tests.utilities.utilities import run_async
+from tests.utilities.utilities import async_return, run_async
from tardis.utilities.attributedict import AttributeDict
from tardis.utilities.executors.sshexecutor import SSHExecutor
from tardis.exceptions.executorexceptions import CommandExecutionFailure
-from asyncssh import ProcessError
-from asyncssh.misc import ConnectionLost, DisconnectError
-
-try:
- from contextlib import asynccontextmanager
-except ImportError:
- from aiotools import async_ctx_manager as asynccontextmanager
+from asyncssh import ChannelOpenError, ConnectionLost, DisconnectError, ProcessError
from unittest import TestCase
from unittest.mock import patch
+import asyncio
import yaml
-def generate_connect(response, exception=None):
- @asynccontextmanager
- async def connect(*args, **kwargs):
- class Connection(object):
- async def run(self, *args, input, **kwargs):
- if exception:
- raise exception
- self.stdout = input and input.decode()
- return self
-
- @property
- def exit_status(self):
- return response.exit_status
-
- @property
- def stderr(self):
- return response.stderr
-
- yield Connection()
+class MockConnection(object):
+ def __init__(self, exception=None, **kwargs):
+ self.exception = exception and exception(**kwargs)
- return connect
+ async def run(self, command, input=None, **kwargs):
+ if self.exception:
+ raise self.exception
+ return AttributeDict(
+ stdout=input and input.decode(), stderr="TestError", exit_status=0
+ )
class TestSSHExecutor(TestCase):
+ mock_asyncssh = None
+
@classmethod
def setUpClass(cls):
cls.mock_asyncssh_patcher = patch(
"tardis.utilities.executors.sshexecutor.asyncssh"
)
cls.mock_asyncssh = cls.mock_asyncssh_patcher.start()
+ cls.mock_asyncssh.ChannelOpenError = ChannelOpenError
+ cls.mock_asyncssh.ConnectionLost = ConnectionLost
+ cls.mock_asyncssh.DisconnectError = DisconnectError
cls.mock_asyncssh.ProcessError = ProcessError
- cls.mock_asyncssh.misc.ConnectionLost = ConnectionLost
- cls.mock_asyncssh.misc.DisconnectError = DisconnectError
@classmethod
def tearDownClass(cls):
@@ -57,82 +44,105 @@ class TestSSHExecutor(TestCase):
def setUp(self) -> None:
self.response = AttributeDict(stderr="", exit_status=0)
- self.mock_asyncssh.connect.side_effect = generate_connect(self.response)
- self.mock_asyncssh.reset_mock()
-
- def test_run_command(self):
- executor = SSHExecutor(
- host="test_host", username="test", client_keys=["TestKey"]
+ self.mock_asyncssh.connect.return_value = async_return(
+ return_value=MockConnection()
)
- self.assertIsNone(run_async(executor.run_command, command="Test").stdout)
- self.mock_asyncssh.connect.assert_called_with(
+ self.test_asyncssh_params = AttributeDict(
host="test_host", username="test", client_keys=["TestKey"]
)
+ self.executor = SSHExecutor(**self.test_asyncssh_params)
self.mock_asyncssh.reset_mock()
- executor = SSHExecutor(
- host="test_host", username="test", client_keys=("TestKey",)
- )
- self.assertIsNone(run_async(executor.run_command, command="Test").stdout)
- self.mock_asyncssh.connect.assert_called_with(
- host="test_host", username="test", client_keys=("TestKey",)
+ @patch("tardis.utilities.executors.sshexecutor.asyncio.sleep", async_return)
+ def test_establish_connection(self):
+ self.assertIsInstance(
+ run_async(self.executor._establish_connection), MockConnection
)
- self.mock_asyncssh.reset_mock()
+ self.mock_asyncssh.connect.assert_called_with(**self.test_asyncssh_params)
- executor = SSHExecutor(
- host="test_host", username="test", client_keys=("TestKey",)
- )
- self.assertEqual(
- run_async(executor.run_command, command="Test", stdin_input="Test").stdout,
- "Test",
- )
+ test_exceptions = [
+ ConnectionResetError(),
+ DisconnectError(reason="test_reason", code=255),
+ ConnectionLost(reason="test_reason"),
+ BrokenPipeError(),
+ ]
+
+ for exception in test_exceptions:
+ self.mock_asyncssh.reset_mock()
+ self.mock_asyncssh.connect.side_effect = exception
+
+ with self.assertRaises(type(exception)):
+ run_async(self.executor._establish_connection)
+
+ self.assertEqual(self.mock_asyncssh.connect.call_count, 10)
+
+ self.mock_asyncssh.connect.side_effect = None
+
+ def test_connection_property(self):
+ async def helper_coroutine():
+ return await self.executor.ssh_connection
+
+ self.assertIsNone(self.executor._ssh_connection)
+ run_async(helper_coroutine)
+
+ self.assertIsInstance(self.executor._ssh_connection, MockConnection)
+
+ current_ssh_connection = self.executor._ssh_connection
+
+ run_async(helper_coroutine)
+
+ self.assertEqual(self.executor._ssh_connection, current_ssh_connection)
+
+ def test_lock(self):
+ self.assertIsInstance(self.executor.lock, asyncio.Lock)
+
+ def test_run_command(self):
+ self.assertIsNone(run_async(self.executor.run_command, command="Test").stdout)
self.mock_asyncssh.connect.assert_called_with(
- host="test_host", username="test", client_keys=("TestKey",)
+ host="test_host", username="test", client_keys=["TestKey"]
)
+ self.mock_asyncssh.reset_mock()
- def test_run_raises_process_error(self):
- test_exception = ProcessError(
- env="Test",
- command="Test",
- subsystem="Test",
- exit_status=1,
- exit_signal=None,
- returncode=1,
- stdout="TestError",
- stderr="TestError",
+ response = run_async(
+ self.executor.run_command, command="Test", stdin_input="Test"
)
- self.mock_asyncssh.connect.side_effect = generate_connect(
- self.response, exception=test_exception
- )
+ self.assertEqual(response.stdout, "Test")
- executor = SSHExecutor(
- host="test_host", username="test", client_keys=("TestKey",)
- )
+ self.assertEqual(response.stderr, "TestError")
- with self.assertRaises(CommandExecutionFailure):
- run_async(executor.run_command, command="Test", stdin_input="Test")
+ self.assertEqual(response.exit_code, 0)
- def test_run_raises_ssh_errors(self):
- test_exceptions = [
- ConnectionResetError,
- DisconnectError(reason="test_reason", code=255),
- ConnectionLost(reason="test_reason"),
- BrokenPipeError,
- ]
+ raising_executor = SSHExecutor(**self.test_asyncssh_params)
- for test_exception in test_exceptions:
- self.mock_asyncssh.connect.side_effect = generate_connect(
- self.response, exception=test_exception
+ self.mock_asyncssh.connect.return_value = async_return(
+ return_value=MockConnection(
+ exception=ProcessError,
+ env="Test",
+ command="Test",
+ subsystem="Test",
+ exit_status=1,
+ exit_signal=None,
+ returncode=1,
+ stdout="TestError",
+ stderr="TestError",
)
+ )
- executor = SSHExecutor(
- host="test_host", username="test", client_keys=("TestKey",)
+ with self.assertRaises(CommandExecutionFailure):
+ run_async(raising_executor.run_command, command="Test", stdin_input="Test")
+
+ raising_executor = SSHExecutor(**self.test_asyncssh_params)
+
+ self.mock_asyncssh.connect.return_value = async_return(
+ return_value=MockConnection(
+ exception=ChannelOpenError, reason="test_reason", code=255
)
+ )
- with self.assertRaises(CommandExecutionFailure):
- run_async(executor.run_command, command="Test", stdin_input="Test")
+ with self.assertRaises(CommandExecutionFailure):
+ run_async(raising_executor.run_command, command="Test", stdin_input="Test")
def test_construction_by_yaml(self):
executor = yaml.safe_load(
@@ -144,9 +154,7 @@ class TestSSHExecutor(TestCase):
- TestKey
"""
)
- response = AttributeDict(stderr="", exit_status=0)
- self.mock_asyncssh.connect.side_effect = generate_connect(response)
self.assertEqual(
run_async(executor.run_command, command="Test", stdin_input="Test").stdout,
"Test",
@@ -154,5 +162,3 @@ class TestSSHExecutor(TestCase):
self.mock_asyncssh.connect.assert_called_with(
host="test_host", username="test", client_keys=["TestKey"]
)
- self.mock_asyncssh.connect.side_effect = None
- self.mock_asyncssh.reset_mock()
| Improve exception handling and recycling of connections in SSH executors of TARDIS
This seems to be very much like the issue reported as #133 — we encountered a traceback after an SSH timeout when using {{tardis.utilities.executors.sshexecutor.SSHExecutor}} (sadly, did not catch the trace, but I guess the issue is generic).
It would be nice if Tardis could handle that and recover :wink:.
It might be interesting also for other users that we now use SSH multiplexing as workaround as shown here:
https://github.com/unibonn/puppet-cobald/commit/0348e098de832f6af96460b39588412dbc6d2701
This significantly reduces the latency of the SSH executor, reduces the number of connections, and seems to increase stability (you should be aware that anybody who can access the multiplex socket can hijack the connection, which is why it's usually stored in {{~/.ssh}}).
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/utilities_t/executors_t/test_sshexecutor.py::TestSSHExecutor::test_connection_property",
"tests/utilities_t/executors_t/test_sshexecutor.py::TestSSHExecutor::test_construction_by_yaml",
"tests/utilities_t/executors_t/test_sshexecutor.py::TestSSHExecutor::test_establish_connection",
"tests/utilities_t/executors_t/test_sshexecutor.py::TestSSHExecutor::test_lock",
"tests/utilities_t/executors_t/test_sshexecutor.py::TestSSHExecutor::test_run_command"
] | [] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-04-27T20:13:20Z" | mit |
|
MatterMiners__tardis-150 | diff --git a/docs/source/adapters/site.rst b/docs/source/adapters/site.rst
index f13b12b..ef9a436 100644
--- a/docs/source/adapters/site.rst
+++ b/docs/source/adapters/site.rst
@@ -16,9 +16,9 @@ Site Adapter
:py:class:`~tardis.resources.dronestates.AvailableState` before draining it. If no value is given, infinite lifetime
is assumed. Multiple sites are supported by using SequenceNodes.
-.. note::
- Even a minimum lifetime is set, it is not guaranteed that the :py:class:`~tardis.resources.drone.Drone` is not
- drained due to a dropping demand for it before its minimum lifetime is exceeded.
+ .. note::
+ Even if a minimum lifetime is set, it is not guaranteed that the :py:class:`~tardis.resources.drone.Drone` is not
+ drained due to its dropping demand before its minimum lifetime is exceeded.
Generic Site Adapter Configuration
@@ -414,6 +414,23 @@ Available adapter configuration options
| | Default: ShellExecutor is used! | |
+----------------+---------------------------------------------------------------------------------------------+-----------------+
+Available machine type configuration options
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. content-tabs:: left-col
+
+ +----------------+--------------------------------------------------------------------------------------------------+-----------------+
+ | Option | Short Description | Requirement |
+ +================+==================================================================================================+=================+
+ | Walltime | Expected walltime of drone | **Required** |
+ +----------------+--------------------------------------------------------------------------------------------------+-----------------+
+ | Partition | Name of the Slurm partition to run in | **Required** |
+ +----------------+--------------------------------------------------------------------------------------------------+-----------------+
+ | StartupCommand | The command to execute at job start | **Required** |
+ +----------------+--------------------------------------------------------------------------------------------------+-----------------+
+ | SubmitOptions | Options to add to the `sbatch` command. `long` and `short` arguments are supported (see example) | **Optional** |
+ +----------------+--------------------------------------------------------------------------------------------------+-----------------+
+
.. content-tabs:: right-col
.. rubric:: Example configuration
@@ -440,6 +457,18 @@ Available adapter configuration options
Walltime: '1440'
Partition: normal
StartupCommand: 'pilot_clean.sh'
+ SubmitOptions:
+ short:
+ C: "intel"
+ long:
+ gres: "gpu:2,mic:1"
+ six_hours:
+ Walltime: '360'
+ Partition: normal
+ StartupCommand: 'pilot_clean.sh'
+ SubmitOptions:
+ long:
+ gres: "gpu:2,mic:1"
twelve_hours:
Walltime: '720'
Partition: normal
@@ -453,6 +482,11 @@ Available adapter configuration options
Cores: 20
Memory: 62
Disk: 480
+ six_hours:
+ Cores: 20
+ Memory: 62
+ Disk: 480
+
.. content-tabs:: left-col
diff --git a/docs/source/api/tardis.plugins.prometheusmonitoring.rst b/docs/source/api/tardis.plugins.prometheusmonitoring.rst
new file mode 100644
index 0000000..0029134
--- /dev/null
+++ b/docs/source/api/tardis.plugins.prometheusmonitoring.rst
@@ -0,0 +1,7 @@
+tardis.plugins.prometheusmonitoring module
+==========================================
+
+.. automodule:: tardis.plugins.prometheusmonitoring
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/api/tardis.plugins.rst b/docs/source/api/tardis.plugins.rst
index 56d6aae..9c8ae6c 100644
--- a/docs/source/api/tardis.plugins.rst
+++ b/docs/source/api/tardis.plugins.rst
@@ -11,5 +11,6 @@ Submodules
.. toctree::
+ tardis.plugins.prometheusmonitoring
tardis.plugins.sqliteregistry
tardis.plugins.telegrafmonitoring
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index d7667ed..b9a16fd 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2020-05-25, command
+.. Created by changelog.py at 2020-06-03, command
'/Users/giffler/.cache/pre-commit/repont7o94ca/py_env-default/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -32,13 +32,14 @@ Fixed
* Fix state transitions for jobs retried by HTCondor
* Fix state transitions and refactoring of the SLURM site adapter
-[Unreleased] - 2020-05-25
+[Unreleased] - 2020-06-03
=========================
Added
-----
* Added an example HTCondor jdl for the HTCondor site adapter
+* Enable support for `sbatch` command line options in the Slurm site adapter
* Add ssh connection sharing to `SSHExecutor` in order to re-use existing connection
Changed
diff --git a/docs/source/changes/150.enable_sbatch_cmdline_options.yaml b/docs/source/changes/150.enable_sbatch_cmdline_options.yaml
new file mode 100644
index 0000000..0d5f281
--- /dev/null
+++ b/docs/source/changes/150.enable_sbatch_cmdline_options.yaml
@@ -0,0 +1,9 @@
+category: added
+summary: "Enable support for `sbatch` command line options in the Slurm site adapter"
+pull requests:
+ - 150
+issues:
+ - 147
+description: |
+ `sbatch` command line option can now be added to the `MachineTypeConfiguration` of the
+ Slurm site adapter. `short` and `long` option are supported via yaml MappingNodes.
diff --git a/tardis/adapters/sites/slurm.py b/tardis/adapters/sites/slurm.py
index a3a8fb5..9431ca6 100644
--- a/tardis/adapters/sites/slurm.py
+++ b/tardis/adapters/sites/slurm.py
@@ -11,6 +11,7 @@ from ...utilities.attributedict import convert_to_attribute_dict
from ...utilities.executors.shellexecutor import ShellExecutor
from ...utilities.asynccachemap import AsyncCacheMap
from ...utilities.utils import htcondor_csv_parser
+from ...utilities.utils import slurm_cmd_option_formatter
from asyncio import TimeoutError
from contextlib import contextmanager
@@ -63,6 +64,10 @@ class SlurmAdapter(SiteAdapter):
)
self._startup_command = self._configuration.StartupCommand
+ self._sbatch_cmdline_option_string = slurm_cmd_option_formatter(
+ self.sbatch_cmdline_options
+ )
+
self._executor = getattr(self._configuration, "executor", ShellExecutor())
self._slurm_status = AsyncCacheMap(
@@ -107,15 +112,13 @@ class SlurmAdapter(SiteAdapter):
async def deploy_resource(
self, resource_attributes: AttributeDict
) -> AttributeDict:
+
request_command = (
- f"sbatch -p {self.machine_type_configuration.Partition} "
- f"-N 1 -n {self.machine_meta_data.Cores} "
- f"--mem={self.machine_meta_data.Memory}gb "
- f"-t {self.machine_type_configuration.Walltime} "
- f"--export=SLURM_Walltime="
- f"{self.machine_type_configuration.Walltime} "
+ "sbatch "
+ f"{self._sbatch_cmdline_option_string} "
f"{self._startup_command}"
)
+
result = await self._executor.run_command(request_command)
logger.debug(f"{self.site_name} sbatch returned {result}")
pattern = re.compile(r"^Submitted batch job (\d*)", flags=re.MULTILINE)
@@ -165,6 +168,27 @@ class SlurmAdapter(SiteAdapter):
{"JobId": resource_attributes.remote_resource_uuid}, **resource_attributes
)
+ @property
+ def sbatch_cmdline_options(self):
+ sbatch_options = self.machine_type_configuration.get(
+ "SubmitOptions", AttributeDict()
+ )
+
+ return AttributeDict(
+ short=AttributeDict(
+ **sbatch_options.get("short", AttributeDict()),
+ p=self.machine_type_configuration.Partition,
+ N=1,
+ n=self.machine_meta_data.Cores,
+ t=self.machine_type_configuration.Walltime,
+ ),
+ long=AttributeDict(
+ **sbatch_options.get("long", AttributeDict()),
+ mem=f"{self.machine_meta_data.Memory}gb",
+ export=f"SLURM_Walltime={self.machine_type_configuration.Walltime}",
+ ),
+ )
+
async def stop_resource(self, resource_attributes: AttributeDict):
logger.debug("Slurm jobs cannot be stopped gracefully. Terminating instead.")
return await self.terminate_resource(resource_attributes)
diff --git a/tardis/utilities/utils.py b/tardis/utilities/utils.py
index 0525ff2..f17512e 100644
--- a/tardis/utilities/utils.py
+++ b/tardis/utilities/utils.py
@@ -1,12 +1,17 @@
+from .attributedict import AttributeDict
from .executors.shellexecutor import ShellExecutor
from ..exceptions.executorexceptions import CommandExecutionFailure
+from ..interfaces.executor import Executor
from io import StringIO
+from typing import List, Tuple
import csv
-async def async_run_command(cmd, shell_executor=ShellExecutor()):
+async def async_run_command(
+ cmd: str, shell_executor: Executor = ShellExecutor()
+) -> str:
try:
response = await shell_executor.run_command(cmd)
except CommandExecutionFailure as ef:
@@ -22,16 +27,25 @@ async def async_run_command(cmd, shell_executor=ShellExecutor()):
return response.stdout
-def htcondor_cmd_option_formatter(options):
+def cmd_option_formatter(options: AttributeDict, prefix: str, separator: str) -> str:
options = (
- f"-{name} {value}" if value is not None else f"-{name}"
+ f"{prefix}{name}{separator}{value}" if value is not None else f"{prefix}{name}"
for name, value in options.items()
)
return " ".join(options)
-def htcondor_csv_parser(htcondor_input, fieldnames, delimiter="\t", replacements=None):
+def htcondor_cmd_option_formatter(options: AttributeDict) -> str:
+ return cmd_option_formatter(options, prefix="-", separator=" ")
+
+
+def htcondor_csv_parser(
+ htcondor_input: str,
+ fieldnames: [List, Tuple],
+ delimiter: str = "\t",
+ replacements: dict = None,
+):
replacements = replacements or {}
with StringIO(htcondor_input) as csv_input:
cvs_reader = csv.DictReader(
@@ -42,3 +56,26 @@ def htcondor_csv_parser(htcondor_input, fieldnames, delimiter="\t", replacements
key: value if value not in replacements.keys() else replacements[value]
for key, value in row.items()
}
+
+
+def slurm_cmd_option_formatter(options: AttributeDict) -> str:
+ option_prefix = dict(short="-", long="--")
+ option_separator = dict(short=" ", long="=")
+
+ option_string = ""
+
+ for option_type in ("short", "long"):
+ try:
+ tmp_option_string = cmd_option_formatter(
+ getattr(options, option_type),
+ prefix=option_prefix[option_type],
+ separator=option_separator[option_type],
+ )
+ except AttributeError:
+ pass
+ else:
+ if option_string: # add additional space between short and long options
+ option_string += " "
+ option_string += tmp_option_string
+
+ return option_string
| MatterMiners/tardis | 068ad68d85901633cb78ccbb6f004666b254cc8e | diff --git a/tests/adapters_t/sites_t/test_slurm.py b/tests/adapters_t/sites_t/test_slurm.py
index a8f0355..64c4dbf 100644
--- a/tests/adapters_t/sites_t/test_slurm.py
+++ b/tests/adapters_t/sites_t/test_slurm.py
@@ -176,7 +176,26 @@ class TestSlurmAdapter(TestCase):
)
self.mock_executor.return_value.run_command.assert_called_with(
- "sbatch -p normal -N 1 -n 20 --mem=62gb -t 60 --export=SLURM_Walltime=60 pilot.sh" # noqa: B950
+ "sbatch -p normal -N 1 -n 20 -t 60 --mem=62gb --export=SLURM_Walltime=60 pilot.sh" # noqa: B950
+ )
+
+ @mock_executor_run_command(TEST_DEPLOY_RESOURCE_RESPONSE)
+ def test_deploy_resource_w_submit_options(self):
+ self.test_site_config.MachineTypeConfiguration.test2large.SubmitOptions = AttributeDict( # noqa: B950
+ long=AttributeDict(gres="tmp:1G")
+ )
+
+ slurm_adapter = SlurmAdapter(machine_type="test2large", site_name="TestSite")
+
+ run_async(
+ slurm_adapter.deploy_resource,
+ resource_attributes=AttributeDict(
+ machine_type="test2large", site_name="TestSite"
+ ),
+ )
+
+ self.mock_executor.return_value.run_command.assert_called_with(
+ "sbatch -p normal -N 1 -n 20 -t 60 --gres=tmp:1G --mem=62gb --export=SLURM_Walltime=60 pilot.sh" # noqa: B950
)
def test_machine_meta_data(self):
diff --git a/tests/utilities_t/test_utils.py b/tests/utilities_t/test_utils.py
index dcab165..76638d6 100644
--- a/tests/utilities_t/test_utils.py
+++ b/tests/utilities_t/test_utils.py
@@ -1,6 +1,9 @@
+from tardis.utilities.attributedict import AttributeDict
from tardis.utilities.utils import async_run_command
from tardis.utilities.utils import htcondor_cmd_option_formatter
from tardis.utilities.utils import htcondor_csv_parser
+from tardis.utilities.utils import slurm_cmd_option_formatter
+
from tardis.exceptions.executorexceptions import CommandExecutionFailure
from ..utilities.utilities import run_async
@@ -21,10 +24,15 @@ class TestAsyncRunCommand(TestCase):
class TestHTCondorCMDOptionFormatter(TestCase):
def test_htcondor_cmd_option_formatter(self):
- options = {"pool": "my-htcondor.local", "test": None}
- options_string = htcondor_cmd_option_formatter(options)
+ options = AttributeDict(pool="my-htcondor.local", test=None)
+ option_string = htcondor_cmd_option_formatter(options)
+
+ self.assertEqual(option_string, "-pool my-htcondor.local -test")
- self.assertEqual(options_string, "-pool my-htcondor.local -test")
+ options = AttributeDict()
+ option_string = htcondor_cmd_option_formatter(options)
+
+ self.assertEqual(option_string, "")
class TestHTCondorCSVParser(TestCase):
@@ -63,3 +71,31 @@ class TestHTCondorCSVParser(TestCase):
Test2=None,
),
)
+
+
+class TestSlurmCMDOptionFormatter(TestCase):
+ def test_slurm_cmd_option_formatter(self):
+ options = AttributeDict()
+ option_string = slurm_cmd_option_formatter(options)
+
+ self.assertEqual(option_string, "")
+
+ options = AttributeDict(short=AttributeDict(foo="bar", test=None))
+ option_string = slurm_cmd_option_formatter(options)
+
+ self.assertEqual(option_string, "-foo bar -test")
+
+ options = AttributeDict(long=AttributeDict(foo="bar", test=None))
+ option_string = slurm_cmd_option_formatter(options)
+
+ self.assertEqual(option_string, "--foo=bar --test")
+
+ options = AttributeDict(
+ short=AttributeDict(foo="bar", test=None),
+ long=AttributeDict(foo_long="bar_long", test_long=None),
+ )
+ option_string = slurm_cmd_option_formatter(options)
+
+ self.assertEqual(
+ option_string, "-foo bar -test --foo_long=bar_long --test_long"
+ )
| [Slurm adapter] Need to specify custom (GRES) flags for sbatch
Some use cases in Slurm can only be covered with Generic Resource ([GRES](https://slurm.schedmd.com/gres.html)) scheduling.
One such example is the reservation of scratch disk space:
https://bugs.schedmd.com/show_bug.cgi?id=2549#c9
Since the name of the GRES is completely arbitrary and up to the HPC-site, it is probably best to have a way to pass arbitrary parameters to `sbatch`. The best place (configuration-wise) is likely the `MachineTypeConfiguration`, so it is similar to the `jdl` funcitonality in HTCondor.
I ma not yet sure if it is better to allow fully arbitrary parameters or have a dedicated `gres` configuration stanza which directly takes the `gres` to reserve.
Pinging also @wiene | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource_w_submit_options",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_exception_handling",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_meta_data",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_type",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_state_translation",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_of_completed_jobs",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_raise_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_site_name",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_start_up_command_deprecation_warning",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_stop_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_terminate_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_update_resource_status",
"tests/utilities_t/test_utils.py::TestAsyncRunCommand::test_async_run_command",
"tests/utilities_t/test_utils.py::TestHTCondorCMDOptionFormatter::test_htcondor_cmd_option_formatter",
"tests/utilities_t/test_utils.py::TestHTCondorCSVParser::test_htcondor_csv_parser",
"tests/utilities_t/test_utils.py::TestSlurmCMDOptionFormatter::test_slurm_cmd_option_formatter"
] | [] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-05-14T12:21:29Z" | mit |
|
MatterMiners__tardis-173 | diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index c2c4f06..1f95158 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2021-03-22, command
+.. Created by changelog.py at 2021-03-23, command
'/Users/giffler/.cache/pre-commit/repor6pnmwlm/py_env-python3.9/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -6,7 +6,7 @@
CHANGELOG
#########
-[Unreleased] - 2021-03-22
+[Unreleased] - 2021-03-23
=========================
Added
@@ -18,6 +18,7 @@ Fixed
-----
* Fixes a bug that the drone_minimum_lifetime parameter is not working as described in the documentation
+* Fixes a bug in the HTCondor Site Adapter which leads to wrong requirements when using non HTCondor OBS
[0.5.0] - 2020-12-09
====================
diff --git a/docs/source/changes/173.fix_meta_data_translation_htcondor.yaml b/docs/source/changes/173.fix_meta_data_translation_htcondor.yaml
new file mode 100644
index 0000000..bf434eb
--- /dev/null
+++ b/docs/source/changes/173.fix_meta_data_translation_htcondor.yaml
@@ -0,0 +1,12 @@
+category: fixed
+summary: "Fixes a bug in the HTCondor Site Adapter which leads to wrong requirements when using non HTCondor OBS"
+description: |
+ The HTCondor Site Adapter takes a wrong `machine_meta_data_translation_mapping` into account in some circumstances.
+ Due to a bug introduced in #157, the HTCondor Site Adapter uses the `machine_meta_data_translation_mapping` of the
+ Batchsystem Adapter (OBS). In case the OBS is also HTCondor or the OBS has the same translations it does not have any
+ affect. However, in case the OBS is using different units for memory and disk space --hence different translation
+ mappings-- the requested Drones have wrong requirements.
+pull requests:
+ - 173
+issues:
+ - 170
diff --git a/tardis/adapters/sites/htcondor.py b/tardis/adapters/sites/htcondor.py
index fa4a7bb..604ed16 100644
--- a/tardis/adapters/sites/htcondor.py
+++ b/tardis/adapters/sites/htcondor.py
@@ -7,7 +7,7 @@ from ...utilities.asynccachemap import AsyncCacheMap
from ...utilities.attributedict import AttributeDict
from ...utilities.staticmapping import StaticMapping
from ...utilities.executors.shellexecutor import ShellExecutor
-from ...utilities.utils import csv_parser
+from ...utilities.utils import csv_parser, machine_meta_data_translation
from contextlib import contextmanager
from datetime import datetime
@@ -58,6 +58,10 @@ htcondor_status_codes = {
class HTCondorAdapter(SiteAdapter):
+ htcondor_machine_meta_data_translation_mapping = AttributeDict(
+ Cores=1, Memory=1024, Disk=1024 * 1024
+ )
+
def __init__(self, machine_type: str, site_name: str):
self._machine_type = machine_type
self._site_name = site_name
@@ -99,11 +103,14 @@ class HTCondorAdapter(SiteAdapter):
drone_environment = self.drone_environment(
resource_attributes.drone_uuid,
- resource_attributes.machine_meta_data_translation_mapping,
+ resource_attributes.obs_machine_meta_data_translation_mapping,
)
submit_jdl = jdl_template.substitute(
- drone_environment,
+ machine_meta_data_translation(
+ self.machine_meta_data,
+ self.htcondor_machine_meta_data_translation_mapping,
+ ),
Environment=";".join(
f"TardisDrone{key}={value}" for key, value in drone_environment.items()
),
diff --git a/tardis/adapters/sites/slurm.py b/tardis/adapters/sites/slurm.py
index 7f79d10..72b4f69 100644
--- a/tardis/adapters/sites/slurm.py
+++ b/tardis/adapters/sites/slurm.py
@@ -110,7 +110,7 @@ class SlurmAdapter(SiteAdapter):
sbatch_cmdline_option_string = submit_cmd_option_formatter(
self.sbatch_cmdline_options(
resource_attributes.drone_uuid,
- resource_attributes.machine_meta_data_translation_mapping,
+ resource_attributes.obs_machine_meta_data_translation_mapping,
)
)
diff --git a/tardis/interfaces/siteadapter.py b/tardis/interfaces/siteadapter.py
index 42e91c8..e3b98e7 100644
--- a/tardis/interfaces/siteadapter.py
+++ b/tardis/interfaces/siteadapter.py
@@ -1,5 +1,6 @@
from ..configuration.configuration import Configuration
from ..utilities.attributedict import AttributeDict
+from ..utilities.utils import machine_meta_data_translation
from abc import ABCMeta, abstractmethod
from cobald.utility.primitives import infinity as inf
@@ -92,16 +93,10 @@ class SiteAdapter(metaclass=ABCMeta):
:return: Translated
:rtype: dict
"""
- try:
- drone_environment = {
- key: meta_data_translation_mapping[key] * value
- for key, value in self.machine_meta_data.items()
- }
- except KeyError as ke:
- logger.critical(f"drone_environment failed: no translation known for {ke}")
- raise
- else:
- drone_environment["Uuid"] = drone_uuid
+ drone_environment = machine_meta_data_translation(
+ self.machine_meta_data, meta_data_translation_mapping
+ )
+ drone_environment["Uuid"] = drone_uuid
return drone_environment
diff --git a/tardis/resources/drone.py b/tardis/resources/drone.py
index 2cb7bbf..95cd783 100644
--- a/tardis/resources/drone.py
+++ b/tardis/resources/drone.py
@@ -40,7 +40,7 @@ class Drone(Pool):
self.resource_attributes = AttributeDict(
site_name=self._site_agent.site_name,
machine_type=self.site_agent.machine_type,
- machine_meta_data_translation_mapping=self.batch_system_agent.machine_meta_data_translation_mapping, # noqa B950
+ obs_machine_meta_data_translation_mapping=self.batch_system_agent.machine_meta_data_translation_mapping, # noqa B950
remote_resource_uuid=remote_resource_uuid,
created=created or datetime.now(),
updated=updated or datetime.now(),
diff --git a/tardis/utilities/utils.py b/tardis/utilities/utils.py
index b4ba112..20d21ea 100644
--- a/tardis/utilities/utils.py
+++ b/tardis/utilities/utils.py
@@ -7,6 +7,9 @@ from io import StringIO
from typing import List, Tuple
import csv
+import logging
+
+logger = logging.getLogger("cobald.runtime.tardis.utilities.utils")
async def async_run_command(
@@ -82,6 +85,30 @@ def csv_parser(
}
+def machine_meta_data_translation(
+ machine_meta_data: AttributeDict, meta_data_translation_mapping: AttributeDict
+):
+ """
+ Helper function to translate units of the machine_meta_data to match the
+ units required by the overlay batch system
+ :param machine_meta_data: Machine Meta Data (Cores, Memory, Disk)
+ :param meta_data_translation_mapping: Map used for the translation of meta
+ data, contains conversion factors
+ :return:
+ :rtype: dict
+ """
+ try:
+ return {
+ key: meta_data_translation_mapping[key] * value
+ for key, value in machine_meta_data.items()
+ }
+ except KeyError as ke:
+ logger.critical(
+ f"machine_meta_data_translation failed: no translation known for {ke}"
+ )
+ raise
+
+
def submit_cmd_option_formatter(options: AttributeDict) -> str:
option_prefix = dict(short="-", long="--")
option_separator = dict(short=" ", long="=")
| MatterMiners/tardis | 346fa3a4e3708b5ae493ed9ba017deb04fcc7fb3 | diff --git a/tests/adapters_t/sites_t/test_htcondorsiteadapter.py b/tests/adapters_t/sites_t/test_htcondorsiteadapter.py
index a83d394..722bee7 100644
--- a/tests/adapters_t/sites_t/test_htcondorsiteadapter.py
+++ b/tests/adapters_t/sites_t/test_htcondorsiteadapter.py
@@ -34,7 +34,7 @@ CONDOR_SUSPEND_FAILED_OUTPUT = """Couldn't find/suspend all jobs in cluster 1351
CONDOR_SUSPEND_FAILED_MESSAGE = """Run command condor_suspend 1351043 via
ShellExecutor failed"""
-CONDOR_SUBMIT_JDL = """executable = start_pilot.sh
+CONDOR_SUBMIT_JDL_CONDOR_OBS = """executable = start_pilot.sh
transfer_input_files = setup_pilot.sh
output = logs/$(cluster).$(process).out
error = logs/$(cluster).$(process).err
@@ -50,6 +50,22 @@ request_disk=167772160
queue 1""" # noqa: B950
+CONDOR_SUBMIT_JDL_SPARK_OBS = """executable = start_pilot.sh
+transfer_input_files = setup_pilot.sh
+output = logs/$(cluster).$(process).out
+error = logs/$(cluster).$(process).err
+log = logs/cluster.log
+
+accounting_group=tardis
+
+environment=TardisDroneCores=8;TardisDroneMemory=32;TardisDroneDisk=160;TardisDroneUuid=test-123
+
+request_cpus=8
+request_memory=32768
+request_disk=167772160
+
+queue 1""" # noqa: B950
+
class TestHTCondorSiteAdapter(TestCase):
mock_config_patcher = None
@@ -94,12 +110,12 @@ class TestHTCondorSiteAdapter(TestCase):
)
@mock_executor_run_command(stdout=CONDOR_SUBMIT_OUTPUT)
- def test_deploy_resource(self):
+ def test_deploy_resource_htcondor_obs(self):
response = run_async(
self.adapter.deploy_resource,
AttributeDict(
drone_uuid="test-123",
- machine_meta_data_translation_mapping=AttributeDict(
+ obs_machine_meta_data_translation_mapping=AttributeDict(
Cores=1,
Memory=1024,
Disk=1024 * 1024,
@@ -111,7 +127,24 @@ class TestHTCondorSiteAdapter(TestCase):
self.assertFalse(response.updated - datetime.now() > timedelta(seconds=1))
self.mock_executor.return_value.run_command.assert_called_with(
- "condor_submit", stdin_input=CONDOR_SUBMIT_JDL
+ "condor_submit", stdin_input=CONDOR_SUBMIT_JDL_CONDOR_OBS
+ )
+ self.mock_executor.reset()
+
+ run_async(
+ self.adapter.deploy_resource,
+ AttributeDict(
+ drone_uuid="test-123",
+ obs_machine_meta_data_translation_mapping=AttributeDict(
+ Cores=1,
+ Memory=1,
+ Disk=1,
+ ),
+ ),
+ )
+
+ self.mock_executor.return_value.run_command.assert_called_with(
+ "condor_submit", stdin_input=CONDOR_SUBMIT_JDL_SPARK_OBS
)
self.mock_executor.reset()
@@ -125,7 +158,7 @@ class TestHTCondorSiteAdapter(TestCase):
self.adapter.deploy_resource,
AttributeDict(
drone_uuid="test-123",
- machine_meta_data_translation_mapping=AttributeDict(
+ obs_machine_meta_data_translation_mapping=AttributeDict(
Cores=1,
Memory=1024,
Disk=1024 * 1024,
diff --git a/tests/adapters_t/sites_t/test_slurm.py b/tests/adapters_t/sites_t/test_slurm.py
index 19897eb..63bdc8b 100644
--- a/tests/adapters_t/sites_t/test_slurm.py
+++ b/tests/adapters_t/sites_t/test_slurm.py
@@ -162,7 +162,7 @@ class TestSlurmAdapter(TestCase):
resource_attributes=AttributeDict(
machine_type="test2large",
site_name="TestSite",
- machine_meta_data_translation_mapping=AttributeDict(
+ obs_machine_meta_data_translation_mapping=AttributeDict(
Cores=1,
Memory=1000,
Disk=1000,
@@ -199,7 +199,7 @@ class TestSlurmAdapter(TestCase):
resource_attributes=AttributeDict(
machine_type="test2large",
site_name="TestSite",
- machine_meta_data_translation_mapping=AttributeDict(
+ obs_machine_meta_data_translation_mapping=AttributeDict(
Cores=1,
Memory=1000,
Disk=1000,
diff --git a/tests/interfaces_t/test_siteadapter.py b/tests/interfaces_t/test_siteadapter.py
index 71eedd6..614b3ac 100644
--- a/tests/interfaces_t/test_siteadapter.py
+++ b/tests/interfaces_t/test_siteadapter.py
@@ -65,7 +65,7 @@ class TestSiteAdapter(TestCase):
)
with self.assertLogs(
- logger="cobald.runtime.tardis.interfaces.site", level=logging.CRITICAL
+ logger="cobald.runtime.tardis.utilities.utils", level=logging.CRITICAL
), self.assertRaises(KeyError):
self.site_adapter.drone_environment(
drone_uuid="test-123",
| HTCondor Site Adapter takes in some circumstances wrong a `machine_meta_data_translation_mapping`.
The HTCondor Site Adapter takes in some circumstances wrong a `machine_meta_data_translation_mapping` into account. Due to a bug introduced in #157, the HTCondor Site Adapter uses the `machine_meta_data_translation_mapping` of the Batchsystem Adapter (OBS).
In case the OBS is also HTCondor or the OBS has the same translations it does not have any affect. However, in case the OBS is using different units for memory and disk space --hence different translation mappings-- the requested Drones have wrong requirements.
The code containing the bug is
https://github.com/MatterMiners/tardis/blob/7043b312ea95db508b9984c6e7b81a36647e27b8/tardis/adapters/sites/htcondor.py#L102-L112
Proposed solution:
- [ ] Use the `machine_meta_data_translation_mapping` only for the Drone environment (necessary to limit HTCondor's STARTD to use only the requested resources of a machine via HTCondor ClassAds)
- [ ] Use a HTCondor Site Adapter specific mapping for the actual resource request via the JDL | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_deploy_resource_htcondor_obs",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_translate_resources_raises_logs",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource_w_submit_options",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_drone_environment"
] | [
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_exception_handling",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_machine_meta_data",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_machine_type",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_completed",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_held",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_idle",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_raise_future",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_raise_past",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_removing",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_run",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_transfering_output",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_resource_status_unexpanded",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_site_name",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_stop_resource",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_stop_resource_failed_raise",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_stop_resource_failed_redo",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_terminate_resource",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_terminate_resource_failed_raise",
"tests/adapters_t/sites_t/test_htcondorsiteadapter.py::TestHTCondorSiteAdapter::test_terminate_resource_failed_redo",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_exception_handling",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_meta_data",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_type",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_state_translation",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_of_completed_jobs",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_raise_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_site_name",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_start_up_command_deprecation_warning",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_stop_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_terminate_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_update_resource_status",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_configuration",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_deploy_resource",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_drone_heartbeat_interval",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_drone_minimum_lifetime",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_drone_uuid",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_handle_exception",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_handle_response_matching",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_handle_response_non_matching",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_machine_meta_data",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_machine_type",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_machine_type_configuration",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_resource_status",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_site_configuration",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_site_name",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_stop_resource",
"tests/interfaces_t/test_siteadapter.py::TestSiteAdapter::test_terminate_resource"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-23T11:31:08Z" | mit |
|
MatterMiners__tardis-175 | diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 1f95158..c8aad5a 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2021-03-23, command
+.. Created by changelog.py at 2021-03-24, command
'/Users/giffler/.cache/pre-commit/repor6pnmwlm/py_env-python3.9/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -6,7 +6,7 @@
CHANGELOG
#########
-[Unreleased] - 2021-03-23
+[Unreleased] - 2021-03-24
=========================
Added
@@ -17,6 +17,7 @@ Added
Fixed
-----
+* Fixes a bug that get_resource_ratios raised a ValueError
* Fixes a bug that the drone_minimum_lifetime parameter is not working as described in the documentation
* Fixes a bug in the HTCondor Site Adapter which leads to wrong requirements when using non HTCondor OBS
diff --git a/docs/source/changes/175.fix_resource_ratios.yaml b/docs/source/changes/175.fix_resource_ratios.yaml
new file mode 100644
index 0000000..1ba3409
--- /dev/null
+++ b/docs/source/changes/175.fix_resource_ratios.yaml
@@ -0,0 +1,10 @@
+category: fixed
+summary: "Fixes a bug that get_resource_ratios raised a ValueError"
+description: |
+ In case one of the resource ratios is `undefined` or even has the value `error`
+ a `ValueError` or `TypeError` could occur. In case one of those errors occurs,
+ an empty list is returned.
+pull requests:
+ - 175
+issues:
+ - 168
diff --git a/tardis/adapters/batchsystems/htcondor.py b/tardis/adapters/batchsystems/htcondor.py
index 0bb702b..67ec25b 100644
--- a/tardis/adapters/batchsystems/htcondor.py
+++ b/tardis/adapters/batchsystems/htcondor.py
@@ -172,14 +172,13 @@ class HTCondorAdapter(BatchSystemAdapter):
await self._htcondor_status.update_status()
try:
htcondor_status = self._htcondor_status[drone_uuid]
- except KeyError:
- return {}
- else:
- return (
+ return [
float(value)
for key, value in htcondor_status.items()
if key in self.ratios.keys()
- )
+ ]
+ except (KeyError, ValueError, TypeError):
+ return []
async def get_allocation(self, drone_uuid: str) -> float:
"""
| MatterMiners/tardis | d9896533503e0b215aa941db1c91e696ade1ef31 | diff --git a/tests/adapters_t/batchsystems_t/test_htcondor.py b/tests/adapters_t/batchsystems_t/test_htcondor.py
index cc4c41f..0a0e2a8 100644
--- a/tests/adapters_t/batchsystems_t/test_htcondor.py
+++ b/tests/adapters_t/batchsystems_t/test_htcondor.py
@@ -57,6 +57,8 @@ class TestHTCondorAdapter(TestCase):
f"test_drained\tslot1@test\tDrained\tIdle\tundefined\t{self.cpu_ratio}\t{self.memory_ratio}", # noqa: B950
f"test_owner\tslot1@test\tOwner\tIdle\tundefined\t{self.cpu_ratio}\t{self.memory_ratio}", # noqa: B950
f"test_uuid_plus\tslot1@test_uuid@test\tUnclaimed\tIdle\ttest_uuid\t{self.cpu_ratio}\t{self.memory_ratio}", # noqa: B950
+ f"test_undefined\tslot1@test\tUnclaimed\tIdle\tundefined\tundefined\t{self.memory_ratio}", # noqa: B950
+ f"test_error\tslot1@test\tUnclaimed\tIdle\tundefined\terror\t{self.memory_ratio}", # noqa: B950
"exoscale-26d361290f\tslot1@exoscale-26d361290f\tUnclaimed\tIdle\tundefined\t0.125\t0.125", # noqa: B950
]
)
@@ -151,12 +153,31 @@ class TestHTCondorAdapter(TestCase):
[self.cpu_ratio, self.memory_ratio],
)
self.mock_async_run_command.assert_called_with(self.command)
+ self.mock_async_run_command.reset_mock()
self.assertEqual(
run_async(
self.htcondor_adapter.get_resource_ratios, drone_uuid="not_exists"
),
- {},
+ [],
+ )
+ self.mock_async_run_command.assert_not_called()
+ self.mock_async_run_command.reset_mock()
+
+ self.assertEqual(
+ run_async(
+ self.htcondor_adapter.get_resource_ratios, drone_uuid="test_undefined"
+ ),
+ [],
+ )
+ self.mock_async_run_command.assert_not_called()
+ self.mock_async_run_command.reset_mock()
+
+ self.assertEqual(
+ run_async(
+ self.htcondor_adapter.get_resource_ratios, drone_uuid="test_error"
+ ),
+ [],
)
def test_get_resource_ratios_without_options(self):
| Rare crash in `get_resource_ratios` when Slot variables are not defined yet
@wiene and I observe rare crashes with this trace (line numbers of the 0.5.0 version):
```
2021-02-20 01:41:11 [ERROR][cobald.runtime.runner.asyncio]: runner aborted: <cobald.daemon.runners.asyncio_runner.AsyncioRunner object at 0x7fbb5d697978>
Traceback (most recent call last):
File "/opt/cobald/lib64/python3.6/site-packages/cobald/daemon/runners/base_runner.py", line 62, in run
self._run()
File "/opt/cobald/lib64/python3.6/site-packages/cobald/daemon/runners/asyncio_runner.py", line 28, in _run
self.event_loop.run_until_complete(self._run_payloads())
File "/usr/lib64/python3.6/asyncio/base_events.py", line 484, in run_until_complete
return future.result()
File "/opt/cobald/lib64/python3.6/site-packages/cobald/daemon/runners/asyncio_runner.py", line 36, in _run_payloads
await self._reap_payloads()
File "/opt/cobald/lib64/python3.6/site-packages/cobald/daemon/runners/asyncio_runner.py", line 58, in _reap_payloads
raise task.exception()
File "/opt/cobald/lib64/python3.6/site-packages/cobald/daemon/runners/async_tools.py", line 7, in raise_return
value = await payload()
File "/opt/cobald/lib64/python3.6/site-packages/tardis/resources/drone.py", line 94, in run
await current_state.run(self)
File "/opt/cobald/lib64/python3.6/site-packages/tardis/resources/dronestates.py", line 171, in run
drone_uuid=drone.resource_attributes["drone_uuid"]
File "/opt/cobald/lib64/python3.6/site-packages/tardis/agents/batchsystemagent.py", line 20, in get_allocation
return await self._batch_system_adapter.get_allocation(drone_uuid)
File "/opt/cobald/lib64/python3.6/site-packages/tardis/adapters/batchsystems/htcondor.py", line 195, in get_allocation
return max(await self.get_resource_ratios(drone_uuid), default=0.0)
File "/opt/cobald/lib64/python3.6/site-packages/tardis/adapters/batchsystems/htcondor.py", line 181, in <genexpr>
if key in self.ratios.keys()
ValueError: could not convert string to float: 'error'
```
So that happens in here:
https://github.com/MatterMiners/tardis/blob/1139be80d885305ef8a62fca7ca49d8e15ca334e/tardis/adapters/batchsystems/htcondor.py#L172-L182
We set the following `BatchSystem.ratios` for our HTCondor LBS:
```YAML
cpu_ratio: Real(TotalSlotCpus-Cpus)/TotalSlotCpus
memory_ratio: Real(TotalSlotMemory-Memory)/TotalSlotMemory
cpu_usage: IfThenElse(AverageCPUsUsage=?=undefined, 0, Real(AverageCPUsUsage))
```
This seems to happen always close / synchronous to the `schedd` negotiating with the LBS, i.e. new drones starting. Presumably, `Cpus` or `Memory` may not be well-defined for short time fractions in HTCondor (maybe when the `pslot` is in the process of being formed?).
Of course, this could be worked around with an `ifThenElse`, but maybe catching an `error` string in TARDIS and retrying later in that case might be more reliable :smile: . | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_resource_ratios"
] | [
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_disintegrate_machine",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_drain_machine",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_drain_machine_without_options",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_allocation",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_machine_status",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_resource_ratios_without_options",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_utilisation",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_integrate_machine",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_machine_meta_data_translation_mapping"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-23T16:09:16Z" | mit |
|
MatterMiners__tardis-178 | diff --git a/.github/workflows/verification.yml b/.github/workflows/verification.yml
new file mode 100644
index 0000000..de6e1a3
--- /dev/null
+++ b/.github/workflows/verification.yml
@@ -0,0 +1,23 @@
+name: Static Checks
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.9'
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install .[contrib]
+ - name: Lint with flake8
+ run: |
+ flake8 tardis tests setup.py
+ - name: Format with black
+ run: |
+ black tardis tests setup.py --diff --check --target-version py36
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index d7679cc..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-language: python
-
-python:
- - "3.6"
- - "3.7"
- - "3.8"
- - "3.9"
- - "nightly" # currently points to 3.8-dev
- - "pypy3"
-
-os:
- - linux
-
-matrix:
- allow_failures:
- - python: "nightly"
- - python: "pypy3"
- include:
- - python: 3.7
- name: Style
- dist: xenial
- script:
- - python -m flake8 tardis tests setup.py
- - python -m black --target-version py36 --check tardis/ tests/ setup.py
- fast_finish: true
-
-install:
- - pip install codecov
- - pip install coverage
- - pip install .[contrib]
- - pip install git+https://github.com/MatterMiners/cobald.git
-
-script: coverage run setup.py test
-
-after_success:
- - coverage report
- - codecov
diff --git a/README.md b/README.md
index 880d4f7..256ddd3 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,5 @@
-[![Build Status](https://travis-ci.org/MatterMiners/tardis.svg?branch=master)](https://travis-ci.org/MatterMiners/tardis)
+[![Build Status](https://github.com/MatterMiners/tardis/actions/workflows/unittests.yml/badge.svg)](https://github.com/MatterMiners/tardis/actions/workflows/unittests.yml)
+[![Verification](https://github.com/MatterMiners/tardis/actions/workflows/verification.yml/badge.svg)](https://github.com/MatterMiners/tardis/actions/workflows/verification.yml)
[![codecov](https://codecov.io/gh/MatterMiners/tardis/branch/master/graph/badge.svg)](https://codecov.io/gh/MatterMiners/tardis)
[![Documentation Status](https://readthedocs.org/projects/cobald-tardis/badge/?version=latest)](https://cobald-tardis.readthedocs.io/en/latest/?badge=latest)
[![Development and Help Chat](https://badges.gitter.im/MatterMiners.png)](https://gitter.im/MatterMiners/community)
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 1f95158..c8aad5a 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2021-03-23, command
+.. Created by changelog.py at 2021-03-24, command
'/Users/giffler/.cache/pre-commit/repor6pnmwlm/py_env-python3.9/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -6,7 +6,7 @@
CHANGELOG
#########
-[Unreleased] - 2021-03-23
+[Unreleased] - 2021-03-24
=========================
Added
@@ -17,6 +17,7 @@ Added
Fixed
-----
+* Fixes a bug that get_resource_ratios raised a ValueError
* Fixes a bug that the drone_minimum_lifetime parameter is not working as described in the documentation
* Fixes a bug in the HTCondor Site Adapter which leads to wrong requirements when using non HTCondor OBS
diff --git a/docs/source/changes/175.fix_resource_ratios.yaml b/docs/source/changes/175.fix_resource_ratios.yaml
new file mode 100644
index 0000000..1ba3409
--- /dev/null
+++ b/docs/source/changes/175.fix_resource_ratios.yaml
@@ -0,0 +1,10 @@
+category: fixed
+summary: "Fixes a bug that get_resource_ratios raised a ValueError"
+description: |
+ In case one of the resource ratios is `undefined` or even has the value `error`
+ a `ValueError` or `TypeError` could occur. In case one of those errors occurs,
+ an empty list is returned.
+pull requests:
+ - 175
+issues:
+ - 168
diff --git a/setup.py b/setup.py
index 615818b..3a46a9c 100644
--- a/setup.py
+++ b/setup.py
@@ -68,7 +68,8 @@ setup(
extras_require={
"docs": ["sphinx", "sphinx_rtd_theme", "sphinxcontrib-contentui"],
"test": TESTS_REQUIRE,
- "contrib": ["flake8", "flake8-bugbear", "black"] + TESTS_REQUIRE,
+ "contrib": ["flake8", "flake8-bugbear", "black; implementation_name=='cpython'"]
+ + TESTS_REQUIRE,
},
tests_require=TESTS_REQUIRE,
zip_safe=False,
diff --git a/tardis/adapters/batchsystems/htcondor.py b/tardis/adapters/batchsystems/htcondor.py
index 0bb702b..67ec25b 100644
--- a/tardis/adapters/batchsystems/htcondor.py
+++ b/tardis/adapters/batchsystems/htcondor.py
@@ -172,14 +172,13 @@ class HTCondorAdapter(BatchSystemAdapter):
await self._htcondor_status.update_status()
try:
htcondor_status = self._htcondor_status[drone_uuid]
- except KeyError:
- return {}
- else:
- return (
+ return [
float(value)
for key, value in htcondor_status.items()
if key in self.ratios.keys()
- )
+ ]
+ except (KeyError, ValueError, TypeError):
+ return []
async def get_allocation(self, drone_uuid: str) -> float:
"""
| MatterMiners/tardis | d9896533503e0b215aa941db1c91e696ade1ef31 | diff --git a/.github/workflows/unittests.yml b/.github/workflows/unittests.yml
new file mode 100644
index 0000000..c38701a
--- /dev/null
+++ b/.github/workflows/unittests.yml
@@ -0,0 +1,27 @@
+name: Unit Tests
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ['3.6', '3.7', '3.8', '3.9']
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install .[contrib]
+ pip install coverage codecov
+ - name: Test with unittest
+ run: |
+ coverage run -m unittest -v
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v1
diff --git a/tests/adapters_t/batchsystems_t/test_htcondor.py b/tests/adapters_t/batchsystems_t/test_htcondor.py
index cc4c41f..0a0e2a8 100644
--- a/tests/adapters_t/batchsystems_t/test_htcondor.py
+++ b/tests/adapters_t/batchsystems_t/test_htcondor.py
@@ -57,6 +57,8 @@ class TestHTCondorAdapter(TestCase):
f"test_drained\tslot1@test\tDrained\tIdle\tundefined\t{self.cpu_ratio}\t{self.memory_ratio}", # noqa: B950
f"test_owner\tslot1@test\tOwner\tIdle\tundefined\t{self.cpu_ratio}\t{self.memory_ratio}", # noqa: B950
f"test_uuid_plus\tslot1@test_uuid@test\tUnclaimed\tIdle\ttest_uuid\t{self.cpu_ratio}\t{self.memory_ratio}", # noqa: B950
+ f"test_undefined\tslot1@test\tUnclaimed\tIdle\tundefined\tundefined\t{self.memory_ratio}", # noqa: B950
+ f"test_error\tslot1@test\tUnclaimed\tIdle\tundefined\terror\t{self.memory_ratio}", # noqa: B950
"exoscale-26d361290f\tslot1@exoscale-26d361290f\tUnclaimed\tIdle\tundefined\t0.125\t0.125", # noqa: B950
]
)
@@ -151,12 +153,31 @@ class TestHTCondorAdapter(TestCase):
[self.cpu_ratio, self.memory_ratio],
)
self.mock_async_run_command.assert_called_with(self.command)
+ self.mock_async_run_command.reset_mock()
self.assertEqual(
run_async(
self.htcondor_adapter.get_resource_ratios, drone_uuid="not_exists"
),
- {},
+ [],
+ )
+ self.mock_async_run_command.assert_not_called()
+ self.mock_async_run_command.reset_mock()
+
+ self.assertEqual(
+ run_async(
+ self.htcondor_adapter.get_resource_ratios, drone_uuid="test_undefined"
+ ),
+ [],
+ )
+ self.mock_async_run_command.assert_not_called()
+ self.mock_async_run_command.reset_mock()
+
+ self.assertEqual(
+ run_async(
+ self.htcondor_adapter.get_resource_ratios, drone_uuid="test_error"
+ ),
+ [],
)
def test_get_resource_ratios_without_options(self):
| Move CI to GitHub actions
Move CI to GitHub actions instead of using `travis.org`, which is recently quite slow. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_resource_ratios"
] | [
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_disintegrate_machine",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_drain_machine",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_drain_machine_without_options",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_allocation",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_machine_status",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_resource_ratios_without_options",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_get_utilisation",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_integrate_machine",
"tests/adapters_t/batchsystems_t/test_htcondor.py::TestHTCondorAdapter::test_machine_meta_data_translation_mapping"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-03-23T18:03:34Z" | mit |
|
MatterMiners__tardis-197 | diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 0fa911c..2d008be 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2021-07-12, command
+.. Created by changelog.py at 2021-07-15, command
'/Users/giffler/.cache/pre-commit/repor6pnmwlm/py_env-python3.9/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -6,7 +6,7 @@
CHANGELOG
#########
-[Unreleased] - 2021-07-12
+[Unreleased] - 2021-07-15
=========================
Added
diff --git a/tardis/adapters/sites/slurm.py b/tardis/adapters/sites/slurm.py
index 72b4f69..96154c2 100644
--- a/tardis/adapters/sites/slurm.py
+++ b/tardis/adapters/sites/slurm.py
@@ -9,8 +9,7 @@ from ...utilities.attributedict import AttributeDict
from ...utilities.attributedict import convert_to_attribute_dict
from ...utilities.executors.shellexecutor import ShellExecutor
from ...utilities.asynccachemap import AsyncCacheMap
-from ...utilities.utils import csv_parser
-from ...utilities.utils import submit_cmd_option_formatter
+from ...utilities.utils import convert_to, csv_parser, submit_cmd_option_formatter
from asyncio import TimeoutError
from contextlib import contextmanager
@@ -174,7 +173,7 @@ class SlurmAdapter(SiteAdapter):
walltime = self.machine_type_configuration.Walltime
drone_environment = ",".join(
- f"TardisDrone{key}={value}"
+ f"TardisDrone{key}={convert_to(value, int, value)}"
for key, value in self.drone_environment(
drone_uuid, machine_meta_data_translation_mapping
).items()
@@ -190,7 +189,12 @@ class SlurmAdapter(SiteAdapter):
),
long=AttributeDict(
**sbatch_options.get("long", AttributeDict()),
- mem=f"{self.machine_meta_data.Memory}gb",
+ # slurm does not accept floating point variables for memory,
+ # therefore use internally megabytes and convert it to an integer
+ # to allow for request i.e. 2.5 GB in the machine meta data. According
+ # to http://cern.ch/go/x7p8 SLURM is using factors of 1024 to convert
+ # between memory units
+ mem=f"{int(self.machine_meta_data.Memory * 1024)}mb",
export=f"SLURM_Walltime={walltime},{drone_environment}",
),
)
diff --git a/tardis/utilities/utils.py b/tardis/utilities/utils.py
index 20d21ea..62f5216 100644
--- a/tardis/utilities/utils.py
+++ b/tardis/utilities/utils.py
@@ -4,7 +4,8 @@ from ..exceptions.executorexceptions import CommandExecutionFailure
from ..interfaces.executor import Executor
from io import StringIO
-from typing import List, Tuple
+from typing import Any, Callable, List, TypeVar, Tuple
+
import csv
import logging
@@ -131,3 +132,16 @@ def submit_cmd_option_formatter(options: AttributeDict) -> str:
option_string += tmp_option_string
return option_string.strip()
+
+
+T = TypeVar("T")
+sentinel = object()
+
+
+def convert_to(
+ value: Any, convert_to_type: Callable[[Any], T], default: Any = sentinel
+) -> T:
+ try:
+ return convert_to_type(value)
+ except ValueError:
+ return default
| MatterMiners/tardis | b3ea6fa926048c7812632a43663f0a5b005ee443 | diff --git a/.github/workflows/deployment-tests.yml b/.github/workflows/deployment-tests.yml
index 8d7e215..c8617d0 100644
--- a/.github/workflows/deployment-tests.yml
+++ b/.github/workflows/deployment-tests.yml
@@ -43,8 +43,8 @@ jobs:
- name: Install dependencies on ${{ matrix.os_container }}
run: |
python3 -m pip install --upgrade pip
- pip install .[contrib]
- pip install coverage codecov
+ python3 -m pip install .[contrib]
+ python3 -m pip install coverage codecov
- name: Test with unittest on ${{ matrix.os_container }}
run: |
coverage run -m unittest -v
@@ -58,8 +58,8 @@ jobs:
- name: Install dependencies on ${{ matrix.platform }}
run: |
python3 -m pip install --upgrade pip
- pip install .[contrib]
- pip install coverage codecov
+ python3 -m pip install .[contrib]
+ python3 -m pip install coverage codecov
- name: Test with unittest on ${{ matrix.platform }}
run: |
coverage run -m unittest -v
diff --git a/tests/adapters_t/sites_t/test_slurm.py b/tests/adapters_t/sites_t/test_slurm.py
index 63bdc8b..7a979cf 100644
--- a/tests/adapters_t/sites_t/test_slurm.py
+++ b/tests/adapters_t/sites_t/test_slurm.py
@@ -157,18 +157,20 @@ class TestSlurmAdapter(TestCase):
expected_resource_attributes.update(
created=datetime.now(), updated=datetime.now()
)
- returned_resource_attributes = run_async(
- self.slurm_adapter.deploy_resource,
- resource_attributes=AttributeDict(
- machine_type="test2large",
- site_name="TestSite",
- obs_machine_meta_data_translation_mapping=AttributeDict(
- Cores=1,
- Memory=1000,
- Disk=1000,
- ),
- drone_uuid="testsite-1390065",
+
+ resource_attributes = AttributeDict(
+ machine_type="test2large",
+ site_name="TestSite",
+ obs_machine_meta_data_translation_mapping=AttributeDict(
+ Cores=1,
+ Memory=1024,
+ Disk=1024,
),
+ drone_uuid="testsite-1390065",
+ )
+
+ returned_resource_attributes = run_async(
+ self.slurm_adapter.deploy_resource, resource_attributes
)
self.assertLess(
@@ -183,7 +185,27 @@ class TestSlurmAdapter(TestCase):
)
self.mock_executor.return_value.run_command.assert_called_with(
- "sbatch -p normal -N 1 -n 20 -t 60 --mem=62gb --export=SLURM_Walltime=60,TardisDroneCores=20,TardisDroneMemory=62000,TardisDroneDisk=100000,TardisDroneUuid=testsite-1390065 pilot.sh" # noqa: B950
+ "sbatch -p normal -N 1 -n 20 -t 60 --mem=63488mb --export=SLURM_Walltime=60,TardisDroneCores=20,TardisDroneMemory=63488,TardisDroneDisk=102400,TardisDroneUuid=testsite-1390065 pilot.sh" # noqa: B950
+ )
+
+ self.mock_executor.reset_mock()
+
+ self.test_site_config.MachineMetaData.test2large.Memory = 2.5
+
+ run_async(self.slurm_adapter.deploy_resource, resource_attributes)
+
+ self.mock_executor.return_value.run_command.assert_called_with(
+ "sbatch -p normal -N 1 -n 20 -t 60 --mem=2560mb --export=SLURM_Walltime=60,TardisDroneCores=20,TardisDroneMemory=2560,TardisDroneDisk=102400,TardisDroneUuid=testsite-1390065 pilot.sh" # noqa: B950
+ )
+
+ self.mock_executor.reset_mock()
+
+ self.test_site_config.MachineMetaData.test2large.Memory = 2.546372129
+
+ run_async(self.slurm_adapter.deploy_resource, resource_attributes)
+
+ self.mock_executor.return_value.run_command.assert_called_with(
+ "sbatch -p normal -N 1 -n 20 -t 60 --mem=2607mb --export=SLURM_Walltime=60,TardisDroneCores=20,TardisDroneMemory=2607,TardisDroneDisk=102400,TardisDroneUuid=testsite-1390065 pilot.sh" # noqa: B950
)
@mock_executor_run_command(TEST_DEPLOY_RESOURCE_RESPONSE)
@@ -209,7 +231,7 @@ class TestSlurmAdapter(TestCase):
)
self.mock_executor.return_value.run_command.assert_called_with(
- "sbatch -p normal -N 1 -n 20 -t 60 --gres=tmp:1G --mem=62gb --export=SLURM_Walltime=60,TardisDroneCores=20,TardisDroneMemory=62000,TardisDroneDisk=100000,TardisDroneUuid=testsite-1390065 pilot.sh" # noqa: B950
+ "sbatch -p normal -N 1 -n 20 -t 60 --gres=tmp:1G --mem=63488mb --export=SLURM_Walltime=60,TardisDroneCores=20,TardisDroneMemory=62000,TardisDroneDisk=100000,TardisDroneUuid=testsite-1390065 pilot.sh" # noqa: B950
)
def test_machine_meta_data(self):
| Slurm siteadapter fails with fractional GB of memory
@wiene and I found that trying to define drones with non-integer `Memory` in their `MachineMetaData` works fine with most siteadapters (e.g. HTCondor), but breaks with Slurm:
```
2021-07-11 03:32:12 [WARNING][cobald.runtime.tardis.adapters.sites.slurm]: Execute command failed:
(message=Run command sbatch -p medium -N 1 -n 1 -t 2880 --gres=scratch:4G --qos=low --mem=2.5gb --export=SLURM_Walltime=2880,TardisDroneCores=1,TardisDroneMemory=2560.0,TardisDroneDisk=4194304,TardisDroneUuid=uni-bonn.bonna-b76b556e52 /home/physik-services/drones/launch-atlas-drone via SSHExecutor
failed, exit_code=255, stdout=, stderr=sbatch: error: Invalid --mem specification
, stdin=None)
```
=> It seems `sbatch` (and `srun`) do not accept floating point values :cry: .
One solution would be to change:
https://github.com/MatterMiners/tardis/blob/ae5320255759c4ff626175985ee2d21f8f3729c9/tardis/adapters/sites/slurm.py#L193
to use `mb` as unit, and multiply by `1024`, and explicitly convert to integer to prevent submit failure. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource_w_submit_options"
] | [
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_exception_handling",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_meta_data",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_type",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_state_translation",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_of_completed_jobs",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_raise_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_site_name",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_start_up_command_deprecation_warning",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_stop_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_terminate_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_update_resource_status"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-07-12T14:56:47Z" | mit |
|
MatterMiners__tardis-238 | diff --git a/.flake8 b/.flake8
index d461ede..d098624 100644
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,6 @@
[flake8]
statistics = True
max-line-length = 80
-ignore = E501, B008, B011, W503
+ignore = E501, B008, B011, B019, W503
select = C,E,F,W,B,B9
exclude = docs,.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg
diff --git a/tardis/utilities/asyncbulkcall.py b/tardis/utilities/asyncbulkcall.py
index 1e0f993..4fb5192 100644
--- a/tardis/utilities/asyncbulkcall.py
+++ b/tardis/utilities/asyncbulkcall.py
@@ -1,4 +1,4 @@
-from typing import TypeVar, Generic, Iterable, List, Tuple, Optional
+from typing import TypeVar, Generic, Iterable, List, Tuple, Optional, Set
from typing_extensions import Protocol
import asyncio
import time
@@ -73,7 +73,9 @@ class AsyncBulkCall(Generic[T, R]):
self._delay = delay
self._concurrency = sys.maxsize if concurrent is None else concurrent
# task handling dispatch from queue to command execution
- self._master_worker: Optional[asyncio.Task] = None
+ self._dispatch_task: Optional[asyncio.Task] = None
+ # tasks handling individual command executions
+ self._bulk_tasks: Set[asyncio.Task] = set()
self._verify_settings()
@cached_property
@@ -100,18 +102,16 @@ class AsyncBulkCall(Generic[T, R]):
async def __call__(self, __task: T) -> R:
"""Queue a ``task`` for bulk execution and return the result when available"""
result: "asyncio.Future[R]" = asyncio.get_event_loop().create_future()
- await self._queue.put((__task, result))
- self._ensure_worker()
+ # queue item first so that the dispatch task does not finish before
+ self._queue.put_nowait((__task, result))
+ # ensure there is a worker to dispatch items for command execution
+ if self._dispatch_task is None:
+ self._dispatch_task = asyncio.ensure_future(self._bulk_dispatch())
return await result
- def _ensure_worker(self):
- """Ensure there is a worker to dispatch tasks for command execution"""
- if self._master_worker is None:
- self._master_worker = asyncio.ensure_future(self._bulk_dispatch())
-
async def _bulk_dispatch(self):
"""Collect tasks into bulks and dispatch them for command execution"""
- while True:
+ while not self._queue.empty():
bulk = list(zip(*(await self._get_bulk())))
if not bulk:
continue
@@ -122,9 +122,14 @@ class AsyncBulkCall(Generic[T, R]):
await self._concurrent.acquire()
task = asyncio.ensure_future(self._bulk_execute(tuple(tasks), futures))
task.add_done_callback(lambda _: self._concurrent.release)
+ # track tasks via strong references to avoid them being garbage collected.
+ # see bpo#44665
+ self._bulk_tasks.add(task)
+ task.add_done_callback(lambda _: self._bulk_tasks.discard(task))
# yield to the event loop so that the `while True` loop does not arbitrarily
# delay other tasks on the fast paths for `_get_bulk` and `acquire`.
await asyncio.sleep(0)
+ self._dispatch_task = None
async def _get_bulk(self) -> "List[Tuple[T, asyncio.Future[R]]]":
"""Fetch the next bulk from the internal queue"""
@@ -132,6 +137,7 @@ class AsyncBulkCall(Generic[T, R]):
# always pull in at least one item asynchronously
# this avoids stalling for very low delays and efficiently waits for items
results = [await queue.get()]
+ queue.task_done()
deadline = time.monotonic() + self._delay
while len(results) < max_items and time.monotonic() < deadline:
try:
| MatterMiners/tardis | f89d96a5bb6cf519cf8c93218b8477d04a353d8a | diff --git a/tests/utilities_t/test_asyncbulkcall.py b/tests/utilities_t/test_asyncbulkcall.py
index 7fbcf4f..6ffd6a0 100644
--- a/tests/utilities_t/test_asyncbulkcall.py
+++ b/tests/utilities_t/test_asyncbulkcall.py
@@ -61,6 +61,22 @@ class TestAsyncBulkCall(TestCase):
result = run_async(self.execute, execution, count=2048)
self.assertEqual(result, [(i, i) for i in range(2048)])
+ def test_restart(self):
+ """Test that calls work after pausing"""
+ run_async(self.check_restart)
+
+ async def check_restart(self):
+ bunch_size = 4
+ # use large delay to only trigger on size
+ execution = AsyncBulkCall(CallCounter(), size=bunch_size // 2, delay=256)
+ for repeat in range(6):
+ result = await self.execute(execution, bunch_size)
+ self.assertEqual(
+ result, [(i, i // 2 + repeat * 2) for i in range(bunch_size)]
+ )
+ await asyncio.sleep(0.01) # pause to allow for cleanup
+ assert execution._dispatch_task is None
+
def test_sanity_checks(self):
"""Test against illegal settings"""
for wrong_size in (0, -1, 0.5, 2j, "15"):
| Missing task done call in AsyncBulkCall
It seems that `AsyncBulkCall` is missing one `task_done` call. The simplify the logic, we can assume that `size` is set to 1.
In that case the first task is fetched in
https://github.com/MatterMiners/tardis/blob/75b60e3a6c91caf4774d6638b2faf26cc8b6b3a7/tardis/utilities/asyncbulkcall.py#L134
the `while` loop in https://github.com/MatterMiners/tardis/blob/75b60e3a6c91caf4774d6638b2faf26cc8b6b3a7/tardis/utilities/asyncbulkcall.py#L136 is skipped.
and therefore `task_done` in https://github.com/MatterMiners/tardis/blob/75b60e3a6c91caf4774d6638b2faf26cc8b6b3a7/tardis/utilities/asyncbulkcall.py#L148 in never called.
More generally speaking there is always one `task_done` call missing independent of `size`. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/utilities_t/test_asyncbulkcall.py::TestAsyncBulkCall::test_restart"
] | [
"tests/utilities_t/test_asyncbulkcall.py::TestAsyncBulkCall::test_bulk_delay",
"tests/utilities_t/test_asyncbulkcall.py::TestAsyncBulkCall::test_bulk_size",
"tests/utilities_t/test_asyncbulkcall.py::TestAsyncBulkCall::test_delay_tiny",
"tests/utilities_t/test_asyncbulkcall.py::TestAsyncBulkCall::test_sanity_checks"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-04-07T10:16:23Z" | mit |
|
MatterMiners__tardis-241 | diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index a09a9a3..70c6322 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2022-03-09, command
+.. Created by changelog.py at 2022-04-13, command
'/Users/giffler/.cache/pre-commit/repor6pnmwlm/py_env-default/bin/changelog docs/source/changes compile --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -6,7 +6,7 @@
CHANGELOG
#########
-[Unreleased] - 2022-03-09
+[Unreleased] - 2022-04-13
=========================
Added
diff --git a/setup.py b/setup.py
index e26e654..9f84530 100644
--- a/setup.py
+++ b/setup.py
@@ -78,7 +78,7 @@ setup(
"cobald>=0.12.3",
"asyncssh",
"aiotelegraf",
- "elasticsearch",
+ "elasticsearch>=7.17,<8.0.0",
"aioprometheus>=21.9.0",
"kubernetes_asyncio",
"pydantic",
diff --git a/tardis/adapters/sites/slurm.py b/tardis/adapters/sites/slurm.py
index dc8c6b0..f5fd0b8 100644
--- a/tardis/adapters/sites/slurm.py
+++ b/tardis/adapters/sites/slurm.py
@@ -92,6 +92,7 @@ class SlurmAdapter(SiteAdapter):
STAGE_OUT=ResourceStatus.Running,
STOPPED=ResourceStatus.Stopped,
SUSPENDED=ResourceStatus.Stopped,
+ TIMEOUT=ResourceStatus.Deleted,
): translator.get(x, default=ResourceStatus.Error),
JobId=lambda x: int(x),
)
diff --git a/tardis/plugins/elasticsearchmonitoring.py b/tardis/plugins/elasticsearchmonitoring.py
index cbc60fe..46c11be 100644
--- a/tardis/plugins/elasticsearchmonitoring.py
+++ b/tardis/plugins/elasticsearchmonitoring.py
@@ -25,7 +25,9 @@ class ElasticsearchMonitoring(Plugin):
self._index = config.index
self._meta = getattr(config, "meta", "")
- self._es = Elasticsearch([{"host": config.host, "port": config.port}])
+ self._es = Elasticsearch(
+ [{"scheme": "http", "host": config.host, "port": config.port}]
+ )
async def notify(self, state: State, resource_attributes: AttributeDict) -> None:
"""
@@ -47,7 +49,7 @@ class ElasticsearchMonitoring(Plugin):
"state": str(state),
"meta": self._meta,
"timestamp": int(time() * 1000),
- "resource_status": str(resource_attributes["resource_status"]),
+ "resource_status": str(resource_attributes.get("resource_status", "")),
}
await self.async_execute(document)
| MatterMiners/tardis | 8f1cd47fc7610ec7cd10de3379084cae9d10c480 | diff --git a/tests/adapters_t/sites_t/test_slurm.py b/tests/adapters_t/sites_t/test_slurm.py
index 7a979cf..a372abb 100644
--- a/tests/adapters_t/sites_t/test_slurm.py
+++ b/tests/adapters_t/sites_t/test_slurm.py
@@ -323,7 +323,7 @@ class TestSlurmAdapter(TestCase):
"STAGE_OUT": ResourceStatus.Running,
"STOPPED": ResourceStatus.Stopped,
"SUSPENDED": ResourceStatus.Stopped,
- "TIMEOUT": ResourceStatus.Error,
+ "TIMEOUT": ResourceStatus.Deleted,
}
for id, value in enumerate(state_translations.values()):
diff --git a/tests/plugins_t/test_elasticsearchmonitoring.py b/tests/plugins_t/test_elasticsearchmonitoring.py
index 6877020..9867cb1 100644
--- a/tests/plugins_t/test_elasticsearchmonitoring.py
+++ b/tests/plugins_t/test_elasticsearchmonitoring.py
@@ -80,3 +80,39 @@ class TestElasticsearchMonitoring(TestCase):
id=f"{test_param.drone_uuid}-2",
index=f"{self.plugin._index}-{self.mock_datetime.now.return_value.strftime.return_value}", # noqa: B950
)
+
+ def test_notify_resource_status_missing(self):
+ test_param = AttributeDict(
+ site_name="test-site",
+ machine_type="test_machine_type",
+ created=datetime.now(),
+ updated=datetime.now(),
+ drone_uuid="test-drone",
+ )
+
+ test_param_ext = {
+ **test_param,
+ "state": str(CleanupState()),
+ "meta": self.plugin._meta,
+ "timestamp": int(self.mock_time.return_value * 1000),
+ "resource_status": "",
+ "revision": 2,
+ }
+
+ self.mock_elasticsearch.return_value.search.return_value = {
+ "hits": {"total": {"value": 2}}
+ }
+
+ run_async(
+ self.plugin.notify, state=CleanupState(), resource_attributes=test_param
+ )
+
+ self.mock_elasticsearch.return_value.search.assert_called_with(
+ index=f"{self.plugin._index}*",
+ body={"query": {"term": {"drone_uuid.keyword": test_param.drone_uuid}}},
+ )
+ self.mock_elasticsearch.return_value.create.assert_called_with(
+ body=test_param_ext,
+ id=f"{test_param.drone_uuid}-2",
+ index=f"{self.plugin._index}-{self.mock_datetime.now.return_value.strftime.return_value}", # noqa: B950
+ )
| Drones go into `Error` state when a slurm batch job times out
If a slurm batch job runs out of walltime it will report status `TIMEOUT` in `squeue`. This is not properly recognized by tardis which puts the corresponding VM into `Error` state and repeatedly tries to `scancel` the job (which reports success, since from a slurm point of view things are already as they should be). Tardis keeps the VM in `Error` state until `squeue -o -h -t all` stops reporting the job ID (takes approx. 15 minutes in the case I observed). If that happens proper cleanup seems to happen.
A few snippets from a log file to illustrate things:
- Job creation:
```console
cobald.runtime.tardis.adapters.sites.slurm: 2022-04-13 10:45:43 jureca sbatch returned {'stdout': 'Submitted batch job 10367673\n', 'stderr': '', 'exit_code': 0}
cobald.runtime.tardis.plugins.sqliteregistry: 2022-04-13 10:45:43 Drone: {'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 10, 45, 43, 732464), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Booting: 1>} has changed state to BootingState
```
The queue / partition this is submitted to has a walltime limit of 2 hours (the corresponding cobald/tardis configuration is set to `115` minutes). After approx 2 hours the job is still reported as running:
```console
cobald.runtime.tardis.adapters.sites.slurm: 2022-04-13 12:40:39 jureca has status {'JobId': '10367673', 'Host': 'jrc0728', 'State': 'RUNNING'}.
cobald.runtime.tardis.resources.dronestates: 2022-04-13 12:40:39 Resource attributes: {'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 12, 40, 39, 229941), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Running: 2>}
```
Shortly after that the job times out on the slurm batch system, sending the resource into `Error` status
```console
cobald.runtime.tardis.adapters.sites.slurm: 2022-04-13 12:41:39 Slurm status update finished.
cobald.runtime.tardis.adapters.sites.slurm: 2022-04-13 12:41:39 jureca has status {'JobId': '10367673', 'Host': 'jrc0728', 'State': 'TIMEOUT'}.
cobald.runtime.tardis.resources.dronestates: 2022-04-13 12:41:39 Resource attributes: {'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 12, 41, 39, 963501), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Error: 5>}
```
After this tardis repeatedly tries to cancel the resource:
```console
asyncssh: 2022-04-13 12:42:41 [conn=0, chan=323] Command: scancel 10367673
asyncssh: 2022-04-13 12:42:41 [conn=0, chan=323] Received exit status 0
```
this seems to send the resource into `CleanupState` temporarily
```console
cobald.runtime.tardis.resources.dronestates: 2022-04-13 12:43:41 Drone {'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 12, 42, 41, 513820), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Stopped: 3>} in CleanupState
```
But the next `squeue` query resets that to `Error`:
```console
asyncssh: 2022-04-13 12:43:41 [conn=0, chan=324] Command: squeue -o "%A|%N|%T" -h -t all
asyncssh: 2022-04-13 12:43:42 [conn=0, chan=324] Received exit status 0
asyncssh: 2022-04-13 12:43:42 [conn=0, chan=324] Received channel close
asyncssh: 2022-04-13 12:43:42 [conn=0, chan=324] Channel closed
cobald.runtime.tardis.adapters.sites.slurm: 2022-04-13 12:43:42 Slurm status update finished.
cobald.runtime.tardis.adapters.sites.slurm: 2022-04-13 12:43:42 jureca has status {'JobId': '10367673', 'Host': 'jrc0728', 'State': 'TIMEOUT'}.
cobald.runtime.tardis.resources.dronestates: 2022-04-13 12:43:42 Resource attributes: {'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 12, 43, 42, 508553), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Error: 5>}
```
The last two points then repeat until `squeue` no longer reports the `JobId`, when that happens tardis seems to treat this as `COMPLETED` (or slurm starts to report this as `COMPLETED`(?)) and deletes the resource
```console
cobald.runtime.tardis.adapters.sites.slurm: 2022-04-13 12:58:57 jureca has status {'JobID': 10367673, 'State': 'COMPLETED'}.
cobald.runtime.tardis.resources.dronestates: 2022-04-13 12:58:57 Resource attributes: {'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 12, 58, 57, 307470), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Deleted: 4>}
cobald.runtime.tardis.plugins.sqliteregistry: 2022-04-13 12:58:57 Drone: {'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 12, 58, 57, 307754), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Deleted: 4>} has changed state to DownState
cobald.runtime.tardis.plugins.sqliteregistry: 2022-04-13 12:58:57 DELETE FROM Resources
WHERE drone_uuid = :drone_uuid
AND site_id = (SELECT site_id from Sites WHERE site_name = :site_name),{'state': 'DownState', 'site_name': 'jureca', 'machine_type': 'default', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1024, 'Disk': 1048576}, 'remote_resource_uuid': 10367673, 'created': datetime.datetime(2022, 4, 13, 10, 45, 43, 732421), 'updated': datetime.datetime(2022, 4, 13, 12, 58, 57, 307754), 'drone_uuid': 'jureca-0af69344e7', 'resource_status': <ResourceStatus.Deleted: 4>} executed
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_state_translation",
"tests/plugins_t/test_elasticsearchmonitoring.py::TestElasticsearchMonitoring::test_notify_resource_status_missing"
] | [
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_deploy_resource_w_submit_options",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_exception_handling",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_meta_data",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_machine_type",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_of_completed_jobs",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_raise_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_resource_status_update_failed",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_site_name",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_start_up_command_deprecation_warning",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_stop_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_terminate_resource",
"tests/adapters_t/sites_t/test_slurm.py::TestSlurmAdapter::test_update_resource_status",
"tests/plugins_t/test_elasticsearchmonitoring.py::TestElasticsearchMonitoring::test_notify"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-04-13T15:22:24Z" | mit |
|
MatterMiners__tardis-301 | diff --git a/tardis/resources/drone.py b/tardis/resources/drone.py
index 4d0322b..a5b3c13 100644
--- a/tardis/resources/drone.py
+++ b/tardis/resources/drone.py
@@ -3,9 +3,9 @@ from typing import List, Union, Optional, Type
from tardis.agents.batchsystemagent import BatchSystemAgent
from tardis.agents.siteagent import SiteAgent
from tardis.interfaces.plugin import Plugin
+from tardis.interfaces.siteadapter import ResourceStatus
from tardis.interfaces.state import State
-from .dronestates import RequestState
-from .dronestates import DownState
+from .dronestates import DownState, RequestState
from ..plugins.sqliteregistry import SqliteRegistry
from ..utilities.attributedict import AttributeDict
from ..utilities.utils import load_states
@@ -32,8 +32,8 @@ class Drone(Pool):
remote_resource_uuid=None,
drone_uuid=None,
state: Optional[State] = None,
- created: float = None,
- updated: float = None,
+ created: Optional[float] = None,
+ updated: Optional[float] = None,
):
self._site_agent = site_agent
self._batch_system_agent = batch_system_agent
@@ -117,6 +117,12 @@ class Drone(Pool):
# to be notified on the first state change. As calling the
# ``set_state`` coroutine is not possible in the constructor, we
# initiate the first state change here
+ #
+ # In addition, all necessary attributes in `resource_attributes`
+ # `AttributeDict` need to be present and have meaningful defaults.
+ # `resource_status` should be set to `ResourceStatus.Booting` on
+ # newly created drones by default.
+ self.resource_attributes.resource_status = ResourceStatus.Booting
await self.set_state(RequestState())
while True:
current_state = self.state
| MatterMiners/tardis | 35bfe1762fc99e53737ad55b41e9a8fe2ad56212 | diff --git a/tests/resources_t/test_drone.py b/tests/resources_t/test_drone.py
index f071036..1d38efe 100644
--- a/tests/resources_t/test_drone.py
+++ b/tests/resources_t/test_drone.py
@@ -1,9 +1,10 @@
from ..utilities.utilities import async_return, run_async, set_awaitable_return_value
from tardis.interfaces.plugin import Plugin
+from tardis.interfaces.siteadapter import ResourceStatus
from tardis.interfaces.state import State
from tardis.resources.drone import Drone
-from tardis.resources.dronestates import DrainState, DownState
+from tardis.resources.dronestates import DrainState, DownState, RequestState
from tardis.plugins.sqliteregistry import SqliteRegistry
from tardis.utilities.attributedict import AttributeDict
@@ -112,6 +113,24 @@ class TestDrone(TestCase):
def test_site_agent(self):
self.assertEqual(self.drone.site_agent, self.mock_site_agent)
+ @patch("tardis.resources.drone.RequestState", spec=RequestState)
+ def test_first_run_of_initialized_drone(self, mocked_request_state):
+ # self.drone.run() is executed in an endless loop, therefore an
+ # EndOfLoop exception is thrown, which then can be caught. Throwing
+ # StopIteration does not work in a while loop
+ class EndOfLoop(Exception):
+ pass
+
+ mocked_request_state.return_value.run.side_effect = EndOfLoop
+ with self.assertRaises(EndOfLoop):
+ run_async(self.drone.run)
+
+ # Actual test code
+ self.assertIsInstance(self.drone.state, RequestState)
+ self.assertEqual(
+ self.drone.resource_attributes.resource_status, ResourceStatus.Booting
+ )
+
@patch("tardis.resources.drone.asyncio.sleep")
def test_run(self, mocked_asyncio_sleep):
mocked_asyncio_sleep.side_effect = async_return
| Missing resource_status results is crashing Prometheus plugin
Dear all,
we are currently migrating our C/T to the newest image (the old image is from September 2022 with a custom auditor plugin) and are seeing some crashes related to the Prometheus monitoring plugin. We are running the following image:
```
docker images --digests
REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE
matterminers/cobald-tardis latest sha256:7b6fc72444eb7f25d8b17d6e957311fb4d7d5e3abed70aed4875e373aafcbafc d2ca28594b2b 6 weeks ago 1.03GB
```
The crash happens when a drone changes to RequestState:
```
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: cobald.runtime.tardis.plugins.prometheusmonitoring: 2023-07-10 08:03:24 Drone: {'site_name': 'NEMO', 'machine_type': 'tardis_c40m100', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1000, 'Disk': 1000}, 'remote_resource_uuid':
None, 'created': datetime.datetime(2023, 7, 10, 8, 3, 23, 922620), 'updated': datetime.datetime(2023, 7, 10, 8, 3, 24, 469225), 'drone_uuid': 'nemo-34fd16c8b9'} has changed state to RequestState
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: cobald.runtime.runner.asyncio: 2023-07-10 08:03:24 runner aborted: <cobald.daemon.runners.asyncio_runner.AsyncioRunner object at 0x7f51391150d0>
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: Traceback (most recent call last):
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/cobald/daemon/runners/base_runner.py", line 68, in run
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: await self.manage_payloads()
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/cobald/daemon/runners/asyncio_runner.py", line 54, in manage_payloads
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: await self._payload_failure
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/cobald/daemon/runners/asyncio_runner.py", line 40, in _monitor_payload
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: result = await payload()
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/tardis/resources/drone.py", line 120, in run
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: await self.set_state(RequestState())
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/tardis/resources/drone.py", line 143, in set_state
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: await self.notify_plugins()
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/tardis/resources/drone.py", line 153, in notify_plugins
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: await plugin.notify(self.state, self.resource_attributes)
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/tardis/plugins/prometheusmonitoring.py", line 67, in notify
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: new_status = resource_attributes.resource_status
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: File "/usr/local/lib/python3.8/site-packages/tardis/utilities/attributedict.py", line 17, in __getattr__
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: raise AttributeError(
Jul 10 10:03:24 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2129502]: AttributeError: resource_status is not a valid attribute. Dict contains {'site_name': 'NEMO', 'machine_type': 'tardis_c40m100', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1000, 'Disk': 1000}, 'remote_resource_uuid': None,
'created': datetime.datetime(2023, 7, 10, 8, 3, 23, 922620), 'updated': datetime.datetime(2023, 7, 10, 8, 3, 24, 469225), 'drone_uuid': 'nemo-34fd16c8b9'}.
```
As the error message indicates, the attribute `resource_status` is missing in the `resource_attributes` dict. This dict is accessed in the `notify` method of the Prometheus plugin (line 67):
https://github.com/MatterMiners/tardis/blob/0f76db0b3ccb3645fcf971fa82e15c33d4f51258/tardis/plugins/prometheusmonitoring.py#L47-L73
After the service restarts C/T and some time, we hover see that the `recource_status` is now present in the `resource_attributes` dict:
```
Jul 10 10:03:38 monopol.bfg.privat docker-COBalD-Tardis-atlhei[2130347]: cobald.runtime.tardis.plugins.sqliteregistry: 2023-07-10 08:03:38 Drone: {'site_name': 'NEMO', 'machine_type': 'tardis_c40m100', 'obs_machine_meta_data_translation_mapping': {'Cores': 1, 'Memory': 1000, 'Disk': 1000}, 'remote_resource_uuid': 16960754, 'created': datetime.datetime(2023, 7, 10, 8, 3, 38, 592070), 'updated': datetime.datetime(2023, 7, 10, 8, 3, 38, 592190), 'drone_uuid': 'nemo-34fd16c8b9', 'resource_status': <ResourceStatus.Booting: 1>} has changed state to BootingState
```
I can work on a fix, but I would need to know how we want the Prometheus plugin to behave. I currently have two ideas in mind:
- just skip the prometheus update if the `resource_status` attribute is missing
- set the `new_status` variable to a default value (e.g. `BootingState`) when the `resource_status` attribute is missing
But maybe you have also a different fix in mind.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/resources_t/test_drone.py::TestDrone::test_first_run_of_initialized_drone"
] | [
"tests/resources_t/test_drone.py::TestDrone::test_allocation",
"tests/resources_t/test_drone.py::TestDrone::test_batch_system_agent",
"tests/resources_t/test_drone.py::TestDrone::test_database",
"tests/resources_t/test_drone.py::TestDrone::test_database_state",
"tests/resources_t/test_drone.py::TestDrone::test_demand",
"tests/resources_t/test_drone.py::TestDrone::test_heartbeat_interval",
"tests/resources_t/test_drone.py::TestDrone::test_life_time",
"tests/resources_t/test_drone.py::TestDrone::test_maximum_demand",
"tests/resources_t/test_drone.py::TestDrone::test_notify_plugins",
"tests/resources_t/test_drone.py::TestDrone::test_register_plugins",
"tests/resources_t/test_drone.py::TestDrone::test_removal_plugins",
"tests/resources_t/test_drone.py::TestDrone::test_run",
"tests/resources_t/test_drone.py::TestDrone::test_set_state",
"tests/resources_t/test_drone.py::TestDrone::test_site_agent",
"tests/resources_t/test_drone.py::TestDrone::test_state",
"tests/resources_t/test_drone.py::TestDrone::test_supply",
"tests/resources_t/test_drone.py::TestDrone::test_utilisation"
] | {
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2023-07-17T12:09:40Z" | mit |
|
MatterMiners__tardis-317 | diff --git a/setup.py b/setup.py
index 1528388..01a5e31 100644
--- a/setup.py
+++ b/setup.py
@@ -92,7 +92,7 @@ setup(
"pydantic<2.0.0",
"asyncstdlib",
"typing_extensions",
- "python-auditor==0.2.0",
+ "python-auditor==0.3.0",
"tzlocal",
*REST_REQUIRES,
],
diff --git a/tardis/plugins/sqliteregistry.py b/tardis/plugins/sqliteregistry.py
index 99b2ad6..5e931b0 100644
--- a/tardis/plugins/sqliteregistry.py
+++ b/tardis/plugins/sqliteregistry.py
@@ -96,7 +96,7 @@ class SqliteRegistry(Plugin):
"CONSTRAINT unique_machine_type_per_site UNIQUE (machine_type, site_id)", # noqa B950
],
"Resources": [
- "id INTEGER PRIMARY KEY AUTOINCREMENT,"
+ "id INTEGER PRIMARY KEY AUTOINCREMENT",
"remote_resource_uuid VARCHAR(255)",
"drone_uuid VARCHAR(255) UNIQUE",
"state_id INTEGER",
diff --git a/tardis/utilities/asynccachemap.py b/tardis/utilities/asynccachemap.py
index 2b1733b..d05427c 100644
--- a/tardis/utilities/asynccachemap.py
+++ b/tardis/utilities/asynccachemap.py
@@ -55,3 +55,15 @@ class AsyncCacheMap(Mapping):
def __len__(self):
return len(self._data)
+
+ def __eq__(self, other):
+ if not isinstance(other, AsyncCacheMap):
+ return False
+
+ return (
+ self._update_coroutine == other._update_coroutine
+ and self._max_age == other._max_age
+ and self._last_update == other._last_update
+ and self._data == other._data
+ and self._lock == other._lock
+ )
diff --git a/tardis/utilities/staticmapping.py b/tardis/utilities/staticmapping.py
index 6e5278d..f8cd428 100644
--- a/tardis/utilities/staticmapping.py
+++ b/tardis/utilities/staticmapping.py
@@ -13,3 +13,8 @@ class StaticMapping(Mapping):
def __len__(self):
return len(self._data)
+
+ def __eq__(self, other):
+ if not isinstance(other, StaticMapping):
+ return False
+ return self._data == other._data
| MatterMiners/tardis | 0348c6740664f4809b1b05f32df04d74ae56985f | diff --git a/tests/plugins_t/test_prometheusmonitoring.py b/tests/plugins_t/test_prometheusmonitoring.py
index 28aef63..7e04087 100644
--- a/tests/plugins_t/test_prometheusmonitoring.py
+++ b/tests/plugins_t/test_prometheusmonitoring.py
@@ -25,9 +25,10 @@ class TestPrometheusMonitoring(TestCase):
@patch("tardis.plugins.prometheusmonitoring.logging", Mock())
def setUp(self):
+ ip = "127.0.0.1"
self.config = self.mock_config.return_value
- self.config.Plugins.PrometheusMonitoring.addr = "127.0.0.1"
- self.config.Plugins.PrometheusMonitoring.port = get_free_port()
+ self.config.Plugins.PrometheusMonitoring.addr = ip
+ self.config.Plugins.PrometheusMonitoring.port = get_free_port(ip)
self.plugin = PrometheusMonitoring()
diff --git a/tests/utilities/utilities.py b/tests/utilities/utilities.py
index 66691ab..c5373bd 100644
--- a/tests/utilities/utilities.py
+++ b/tests/utilities/utilities.py
@@ -11,9 +11,9 @@ def async_return(*args, return_value=None, **kwargs):
return f
-def get_free_port(): # from https://gist.github.com/dbrgn/3979133
+def get_free_port(ip: str): # from https://gist.github.com/dbrgn/3979133
s = socket.socket()
- s.bind(("", 0))
+ s.bind((ip, 0))
port = s.getsockname()[1]
s.close()
return port
diff --git a/tests/utilities_t/test_asynccachemap.py b/tests/utilities_t/test_asynccachemap.py
index e67825b..249cf91 100644
--- a/tests/utilities_t/test_asynccachemap.py
+++ b/tests/utilities_t/test_asynccachemap.py
@@ -70,3 +70,33 @@ class TestAsyncCacheMap(TestCase):
self.assertTrue(
datetime.now() - self.async_cache_map.last_update < timedelta(seconds=1)
)
+
+ def test_eq_async_cache_map(self):
+ test_cache_map = AsyncCacheMap(
+ update_coroutine=self.async_cache_map._update_coroutine
+ )
+ # Since both objects have been recently initialized, all values (self._max_age,
+ # self._last_update, self._data and self._lock) are still the defaults
+ self.assertTrue(self.async_cache_map == test_cache_map)
+
+ # Test the opposite
+ self.assertFalse(self.async_cache_map != test_cache_map)
+
+ # change default values
+ run_async(self.async_cache_map.update_status)
+ self.assertFalse(self.async_cache_map == test_cache_map)
+
+ # update default values, self._last_update, self._lock still differ
+ run_async(test_cache_map.update_status)
+ self.assertFalse(self.async_cache_map == test_cache_map)
+
+ # Assimilate lock, self._last_update still differs
+ test_cache_map._lock = self.async_cache_map._lock
+ self.assertFalse(self.async_cache_map == test_cache_map)
+
+ # Make them equal again
+ test_cache_map._last_update = self.async_cache_map._last_update
+ self.assertTrue(self.async_cache_map == test_cache_map)
+
+ # Test different class
+ self.assertFalse(self.async_cache_map == self.test_data)
diff --git a/tests/utilities_t/test_staticmapping.py b/tests/utilities_t/test_staticmapping.py
index b906108..5c17468 100644
--- a/tests/utilities_t/test_staticmapping.py
+++ b/tests/utilities_t/test_staticmapping.py
@@ -26,3 +26,17 @@ class TestStaticMapping(TestCase):
self.static_map["testB"] = 456
with self.assertRaises(TypeError):
self.static_map["testC"] = 456
+
+ def test_eq_async_cache_map(self):
+ test_static_map = StaticMapping(**self.test_data)
+ self.assertTrue(self.static_map == test_static_map)
+
+ # Test the opposite
+ self.assertFalse(self.static_map != test_static_map)
+
+ # Change the data dictionary
+ test_static_map = StaticMapping(test=123)
+ self.assertFalse(self.static_map == test_static_map)
+
+ # Test different class
+ self.assertFalse(self.static_map == self.test_data)
| Fix code scanning alert - Implicit string concatenation in a list
This seems to be a bug in the SqliteRegistry.
Tracking issue for:
- [ ] https://github.com/MatterMiners/tardis/security/code-scanning/5
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_eq_async_cache_map",
"tests/utilities_t/test_staticmapping.py::TestStaticMapping::test_eq_async_cache_map"
] | [
"tests/plugins_t/test_prometheusmonitoring.py::TestPrometheusMonitoring::test_notify",
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_command_failing_update",
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_get_async_cache_map",
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_iter_async_cache_map",
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_json_failing_update",
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_last_update",
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_len_async_cache_map",
"tests/utilities_t/test_asynccachemap.py::TestAsyncCacheMap::test_update_async_cache_map",
"tests/utilities_t/test_staticmapping.py::TestStaticMapping::test_get_item_static_mapping",
"tests/utilities_t/test_staticmapping.py::TestStaticMapping::test_iter_static_mapping",
"tests/utilities_t/test_staticmapping.py::TestStaticMapping::test_len_static_mapping",
"tests/utilities_t/test_staticmapping.py::TestStaticMapping::test_modify_static_mapping"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2023-11-10T12:42:56Z" | mit |
|
MatterMiners__tardis-321 | diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index 370201a..c68f844 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -11,8 +11,8 @@ Rene Caspart <[email protected]>
Leon Schuhmacher <[email protected]>
R. Florian von Cube <[email protected]>
mschnepf <[email protected]>
-Alexander Haas <[email protected]>
Benjamin Rottler <[email protected]>
+Alexander Haas <[email protected]>
mschnepf <[email protected]>
Dirk Sammel <[email protected]>
Matthias J. Schnepf <[email protected]>
@@ -21,4 +21,5 @@ LGTM Migrator <[email protected]>
Matthias Schnepf <[email protected]>
PSchuhmacher <[email protected]>
Peter Wienemann <[email protected]>
+Raphael Kleinemühl <[email protected]>
rfvc <[email protected]>
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index f9e7ec6..f11fe78 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -1,4 +1,4 @@
-.. Created by changelog.py at 2023-11-10, command
+.. Created by changelog.py at 2023-11-25, command
'/Users/giffler/.cache/pre-commit/repor6pnmwlm/py_env-python3.10/bin/changelog docs/source/changes compile --categories Added Changed Fixed Security Deprecated --output=docs/source/changelog.rst'
based on the format of 'https://keepachangelog.com/'
@@ -6,9 +6,14 @@
CHANGELOG
#########
-[Unreleased] - 2023-11-10
+[Unreleased] - 2023-11-25
=========================
+Changed
+-------
+
+* Enable support for SSH command restrictions in Moab adapter
+
Fixed
-----
diff --git a/docs/source/changes/322.add_support_for_ssh_command_restrictions_moab.yaml b/docs/source/changes/322.add_support_for_ssh_command_restrictions_moab.yaml
new file mode 100644
index 0000000..2156f60
--- /dev/null
+++ b/docs/source/changes/322.add_support_for_ssh_command_restrictions_moab.yaml
@@ -0,0 +1,7 @@
+category: changed
+summary: "Enable support for SSH command restrictions in Moab adapter"
+description: |
+ The NEMO HPC is going to enable 2FA on the login nodes and SSH can be restricted to certain commands only. This
+ requires to avoid `&&` and `$(whoami)` in commands.
+pull requests:
+- 322
diff --git a/setup.py b/setup.py
index 01a5e31..3643bcc 100644
--- a/setup.py
+++ b/setup.py
@@ -92,7 +92,7 @@ setup(
"pydantic<2.0.0",
"asyncstdlib",
"typing_extensions",
- "python-auditor==0.3.0",
+ "python-auditor==0.3.1",
"tzlocal",
*REST_REQUIRES,
],
diff --git a/tardis/adapters/sites/moab.py b/tardis/adapters/sites/moab.py
index b918423..fd71dbb 100644
--- a/tardis/adapters/sites/moab.py
+++ b/tardis/adapters/sites/moab.py
@@ -31,12 +31,16 @@ logger = logging.getLogger("cobald.runtime.tardis.adapters.sites.moab")
async def showq(
*resource_attributes: Tuple[AttributeDict, ...], executor: Executor
) -> Iterable[Mapping]:
- cmd = "showq --xml -w user=$(whoami) && showq -c --xml -w user=$(whoami)"
+ showq_active_cmd = "showq --xml -w user=$(USER)"
+ showq_completed_cmd = "showq -c --xml -w user=$(USER)"
logger.debug("Moab status update is running.")
- response = await executor.run_command(cmd)
+ combined_response_stdout = ""
+ for cmd in (showq_active_cmd, showq_completed_cmd):
+ response = await executor.run_command(cmd)
+ combined_response_stdout += response.stdout
# combine two XML outputs to one
xml_output = minidom.parseString(
- response["stdout"].replace("\n", "").replace("</Data><Data>", "")
+ combined_response_stdout.replace("\n", "").replace("</Data><Data>", "")
)
xml_jobs_list = xml_output.getElementsByTagName("queue")
# parse XML output
| MatterMiners/tardis | 242e4c0dedf113c877e899a3b90e8ff4732a42f8 | diff --git a/tests/adapters_t/sites_t/test_moab.py b/tests/adapters_t/sites_t/test_moab.py
index c948cc4..5a0c104 100644
--- a/tests/adapters_t/sites_t/test_moab.py
+++ b/tests/adapters_t/sites_t/test_moab.py
@@ -9,7 +9,7 @@ from tests.utilities.utilities import mock_executor_run_command
from tests.utilities.utilities import run_async
from unittest import TestCase
-from unittest.mock import MagicMock, patch
+from unittest.mock import MagicMock, call, patch
from datetime import datetime
from warnings import filterwarnings
@@ -283,6 +283,7 @@ class TestMoabAdapter(TestCase):
@mock_executor_run_command(TEST_RESOURCE_STATE_TRANSLATION_RESPONSE)
def test_resource_state_translation(self):
+ self.mock_executor.reset_mock()
for num, (_, state) in enumerate(STATE_TRANSLATIONS):
job_id = f"76242{num:02}"
return_resource_attributes = run_async(
@@ -291,9 +292,13 @@ class TestMoabAdapter(TestCase):
)
self.assertEqual(return_resource_attributes.resource_status, state)
- self.mock_executor.return_value.run_command.assert_called_with(
- "showq --xml -w user=$(whoami) && showq -c --xml -w user=$(whoami)"
- )
+ self.mock_executor.return_value.run_command.assert_has_calls(
+ [
+ call("showq --xml -w user=$(USER)"),
+ call("showq -c --xml -w user=$(USER)"),
+ ]
+ )
+ self.mock_executor.reset_mock()
@mock_executor_run_command(TEST_RESOURCE_STATUS_RESPONSE_RUNNING)
def test_resource_status_update(self):
| pyauditor install broken in latest docker image
After #319 was merged into master, I wanted to try out the latest version of C/T.
I noticed that loading the auditor plugin fails. I could track this back back to a broken install of pyauditor in the docker container:
- With the latest docker image the pyauditor module is not populated with the classes we export
```
$ docker run -it --rm --entrypoint /bin/sh matterminers/cobald-tardis:latest
/srv # python
Python 3.10.13 (main, Oct 19 2023, 06:08:04) [GCC 12.2.1 20220924] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pyauditor
>>> dir(pyauditor)
['__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__']
```
- With the latest tagged version it works (i.e. `AuditorClientBuilder` and other classes are visible)
```
$ docker run -it --rm --entrypoint /bin/sh matterminers/cobald-tardis:0.8.0
/srv # python
Python 3.10.13 (main, Oct 19 2023, 06:08:04) [GCC 12.2.1 20220924] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pyauditor
>>> dir(pyauditor)
['AuditorClient', 'AuditorClientBlocking', 'AuditorClientBuilder', 'Component', 'Meta', 'Record', 'Score', '__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', 'pyauditor']
```
- However, installing the latest pyauditor version in a venv also works
```
$ python -m venv .venv
$ source .venv/bin/activate
$ pip install python-auditor
Successfully installed python-auditor-0.3.0
$ python
Python 3.11.5 (main, Sep 2 2023, 14:16:33) [GCC 13.2.1 20230801] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pyauditor
>>> dir(pyauditor)
['AuditorClient', 'AuditorClientBlocking', 'AuditorClientBuilder', 'Component', 'Meta', 'Record', 'Score', '__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', 'pyauditor']
```
- I also checked with Python3.10 using the `python:3.10` container (as this is the version used by the C/T container), and there it also works
Any idea why this is happening? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_resource_state_translation"
] | [
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_deploy_resource",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_deploy_resource_w_submit_options",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_exception_handling",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_machine_meta_data",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_machine_type",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_resource_status",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_resource_status_of_completed_jobs",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_resource_status_update",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_resource_status_update_failed",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_site_name",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_start_up_command_deprecation_warning",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_stop_resource",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_terminate_dead_resource",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_terminate_resource",
"tests/adapters_t/sites_t/test_moab.py::TestMoabAdapter::test_terminate_resource_error"
] | {
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-11-24T11:36:16Z" | mit |
|
Melevir__cognitive_complexity-15 | diff --git a/cognitive_complexity/utils/ast.py b/cognitive_complexity/utils/ast.py
index 7e3938c..d47917a 100644
--- a/cognitive_complexity/utils/ast.py
+++ b/cognitive_complexity/utils/ast.py
@@ -85,7 +85,7 @@ def process_node_itself(
return increment_by, 0, True
elif isinstance(node, ast.BoolOp):
inner_boolops_amount = len([n for n in ast.walk(node) if isinstance(n, ast.BoolOp)])
- base_complexity = inner_boolops_amount * max(increment_by, 1)
+ base_complexity = inner_boolops_amount
return increment_by, base_complexity, False
elif isinstance(node, (ast.Break, ast.Continue)):
return increment_by, max(1, increment_by), True
| Melevir/cognitive_complexity | aa428ca731bc0461cc9f95956ff4816154495729 | diff --git a/tests/test_cognitive_complexity.py b/tests/test_cognitive_complexity.py
index 0aac348..31053b5 100644
--- a/tests/test_cognitive_complexity.py
+++ b/tests/test_cognitive_complexity.py
@@ -113,16 +113,16 @@ def test_real_function():
raw_camelcase_words = []
for raw_word in re.findall(r'[a-z]+', constant): # +1
word = raw_word.strip()
- if ( # +2
- len(word) >= min_word_length # +4 (2 bool operator sequences * 2 for nesting)
+ if ( # +2 (nesting = 1)
+ len(word) >= min_word_length # +2 (2 bool operator sequences)
and not (word.startswith('-') or word.endswith('-'))
):
- if is_camel_case_word(word): # +2
+ if is_camel_case_word(word): # +3 (nesting=2)
raw_camelcase_words.append(word)
else: # +1
processed_words.append(word.lower())
return processed_words, raw_camelcase_words
- """) == 11
+ """) == 9
def test_break_and_continue():
@@ -142,9 +142,9 @@ def test_nested_functions():
def foo(a):
if a: # +2
return 1
- bar = lambda a: lambda b: b or 2 # +2 (+2 for or because lambda increases nesting)
+ bar = lambda a: lambda b: b or 2 # +1
return bar(foo(a))(a)
- """) == 4
+ """) == 3
def test_ternary_operator():
| Incorrect counting for sequences of binary logical operators
According to the Cognitive Complexity specification, sequences of binary logical operators receive a fundamental increment (B1) but not a nesting increment (B3). This is further supported by the `overriddenSymbolFrom()` example in appendix C.
The existing `test_real_function()` should be calculated as follows; note the +4 for the multiline `if` condition should be +2.
assert get_code_snippet_compexity("""
def process_raw_constant(constant, min_word_length):
processed_words = []
raw_camelcase_words = []
for raw_word in re.findall(r'[a-z]+', constant): # +1
word = raw_word.strip()
if ( # +2
len(word) >= min_word_length
and not (word.startswith('-') or word.endswith('-')) # +2 (2 bool operator sequences)
):
if is_camel_case_word(word): # +2
raw_camelcase_words.append(word)
else: # +1
processed_words.append(word.lower())
return processed_words, raw_camelcase_words
""") == 9 # not 11
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_cognitive_complexity.py::test_real_function",
"tests/test_cognitive_complexity.py::test_nested_functions"
] | [
"tests/test_cognitive_complexity.py::test_simple_if_simple_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_serial_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_serial_heterogenious_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_complex_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_elif_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_else_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_very_nested_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_try_condition_complexity",
"tests/test_cognitive_complexity.py::test_recursion_complexity",
"tests/test_cognitive_complexity.py::test_break_and_continue",
"tests/test_cognitive_complexity.py::test_ternary_operator",
"tests/test_cognitive_complexity.py::test_nested_if_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_else_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_elif_condition_complexity",
"tests/test_cognitive_complexity.py::test_for_else_complexity",
"tests/test_cognitive_complexity.py::test_while_else_complexity"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2020-07-05T20:58:20Z" | mit |
|
Melevir__cognitive_complexity-17 | diff --git a/cognitive_complexity/api.py b/cognitive_complexity/api.py
index 9a36850..392ca66 100644
--- a/cognitive_complexity/api.py
+++ b/cognitive_complexity/api.py
@@ -2,11 +2,14 @@ import ast
from cognitive_complexity.common_types import AnyFuncdef
from cognitive_complexity.utils.ast import (
- has_recursive_calls, process_child_nodes, process_node_itself,
+ has_recursive_calls, is_decorator, process_child_nodes, process_node_itself,
)
def get_cognitive_complexity(funcdef: AnyFuncdef) -> int:
+ if is_decorator(funcdef):
+ return get_cognitive_complexity(funcdef.body[0]) # type: ignore
+
complexity = 0
for node in funcdef.body:
complexity += get_cognitive_complexity_for_node(node)
@@ -18,7 +21,7 @@ def get_cognitive_complexity(funcdef: AnyFuncdef) -> int:
def get_cognitive_complexity_for_node(
node: ast.AST,
increment_by: int = 0,
- verbose: bool = True,
+ verbose: bool = False,
) -> int:
increment_by, base_complexity, should_iter_children = process_node_itself(node, increment_by)
diff --git a/cognitive_complexity/utils/ast.py b/cognitive_complexity/utils/ast.py
index 815bc05..19db71d 100644
--- a/cognitive_complexity/utils/ast.py
+++ b/cognitive_complexity/utils/ast.py
@@ -16,6 +16,15 @@ def has_recursive_calls(funcdef: AnyFuncdef) -> bool:
])
+def is_decorator(funcdef: AnyFuncdef) -> bool:
+ return (
+ isinstance(funcdef, ast.FunctionDef)
+ and len(funcdef.body) == 2
+ and isinstance(funcdef.body[0], ast.FunctionDef)
+ and isinstance(funcdef.body[1], ast.Return)
+ )
+
+
def process_child_nodes(
node: ast.AST,
increment_by: int,
| Melevir/cognitive_complexity | 06e1556f3e50f4e8ea0200ca03ef0835a3c27bfd | diff --git a/tests/test_cognitive_complexity.py b/tests/test_cognitive_complexity.py
index 6bf7d66..40cdf71 100644
--- a/tests/test_cognitive_complexity.py
+++ b/tests/test_cognitive_complexity.py
@@ -218,3 +218,39 @@ def test_while_else_complexity():
else: # +1
return 5
""") == 6
+
+
+def test_a_decorator_complexity():
+ assert get_code_snippet_compexity("""
+ def a_decorator(a, b):
+ def inner(func): # nesting = 0
+ if condition: # +1
+ print(b)
+ func()
+ return inner
+ """) == 1
+
+
+def test_not_a_decorator_complexity():
+ assert get_code_snippet_compexity("""
+ def not_a_decorator(a, b):
+ my_var = a*b
+ def inner(func): # nesting = 1
+ if condition: # +1 structure, +1 nesting
+ print(b)
+ func()
+ return inner
+ """) == 2
+
+
+def test_decorator_generator_complexity():
+ assert get_code_snippet_compexity("""
+ def decorator_generator(a):
+ def generator(func):
+ def decorator(func): # nesting = 0
+ if condition: # +1
+ print(b)
+ return func()
+ return decorator
+ return generator
+ """) == 1
| Not increment nesting for decorators
> Python’s decorator idiom allows additional behavior to be added to an existing function
without modifying the function itself. This addition is accomplished with the use of nested
functions in the decorator providing the additional behavior. In order not to penalize Python
coders for the use of a common feature of their language, an exception has been added.
However, an attempt has been made to define the exception narrowly. Specifically, to be
eligible for the exception, a function may contain only a nested function and a return
statement.
This is valid after #3 is done. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_cognitive_complexity.py::test_a_decorator_complexity",
"tests/test_cognitive_complexity.py::test_decorator_generator_complexity"
] | [
"tests/test_cognitive_complexity.py::test_simple_if_simple_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_serial_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_serial_heterogenious_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_complex_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_elif_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_else_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_very_nested_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_try_condition_complexity",
"tests/test_cognitive_complexity.py::test_recursion_complexity",
"tests/test_cognitive_complexity.py::test_real_function",
"tests/test_cognitive_complexity.py::test_break_and_continue",
"tests/test_cognitive_complexity.py::test_nested_functions",
"tests/test_cognitive_complexity.py::test_ternary_operator",
"tests/test_cognitive_complexity.py::test_nested_if_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_else_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_elif_condition_complexity",
"tests/test_cognitive_complexity.py::test_for_else_complexity",
"tests/test_cognitive_complexity.py::test_while_else_complexity",
"tests/test_cognitive_complexity.py::test_not_a_decorator_complexity"
] | {
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2020-07-11T12:21:57Z" | mit |
|
Melevir__cognitive_complexity-22 | diff --git a/cognitive_complexity/utils/ast.py b/cognitive_complexity/utils/ast.py
index 19db71d..43625fd 100644
--- a/cognitive_complexity/utils/ast.py
+++ b/cognitive_complexity/utils/ast.py
@@ -33,14 +33,7 @@ def process_child_nodes(
) -> int:
child_complexity = 0
child_nodes = ast.iter_child_nodes(node)
-
- for node_num, child_node in enumerate(child_nodes):
- if isinstance(node, ast.Try):
- if node_num == 1:
- # add +1 for all try nodes except body
- increment_by += 1
- if node_num:
- child_complexity += max(1, increment_by)
+ for child_node in child_nodes:
child_complexity += complexity_calculator(
child_node,
increment_by=increment_by,
@@ -50,7 +43,7 @@ def process_child_nodes(
def process_control_flow_breaker(
- node: Union[ast.If, ast.For, ast.While, ast.IfExp],
+ node: Union[ast.If, ast.For, ast.While, ast.IfExp, ast.ExceptHandler],
increment_by: int,
) -> Tuple[int, int, bool]:
if isinstance(node, ast.IfExp):
@@ -60,6 +53,10 @@ def process_control_flow_breaker(
elif isinstance(node, ast.If) and len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If):
# node is an elif; the increment will be counted on the ast.If
increment = 0
+ elif isinstance(node, ast.ExceptHandler):
+ # +1 for the catch/except-handler
+ increment = 0
+ increment_by += 1
elif node.orelse:
# +1 for the else and add a nesting level
increment = 1
@@ -80,6 +77,7 @@ def process_node_itself(
ast.For,
ast.While,
ast.IfExp,
+ ast.ExceptHandler,
)
incrementers_nodes = (
ast.FunctionDef,
| Melevir/cognitive_complexity | 1df07df7adf88bc2c002e4bf85da28c18bfbd086 | diff --git a/tests/test_cognitive_complexity.py b/tests/test_cognitive_complexity.py
index 40cdf71..f4a01cf 100644
--- a/tests/test_cognitive_complexity.py
+++ b/tests/test_cognitive_complexity.py
@@ -9,6 +9,14 @@ def test_simple_if_simple_condition_complexity():
""") == 1
+def test_simple_if_simple_condition_complexity_with_print():
+ assert get_code_snippet_compexity("""
+ def f(a, b):
+ if a: # +1
+ print('1')
+ """) == 1
+
+
def test_simple_if_serial_condition_complexity():
assert get_code_snippet_compexity("""
def f(a, b):
@@ -87,16 +95,41 @@ def test_very_nested_structure_condition_complexity():
""") == 6
-def test_try_condition_complexity():
+def test_try_condition_complexity_simple():
+ assert get_code_snippet_compexity("""
+ def f():
+ try:
+ print('hello1')
+ except Exception: # +1
+ print('goodbye')
+ """) == 1
+
+
+def test_try_condition_complexity_with_multiple_lines():
+ assert get_code_snippet_compexity("""
+ def f(a, b):
+ try:
+ print('hello1')
+ print('hello2')
+ print('hello3')
+ print('hello4')
+ print('hello5')
+ except Exception: # +1
+ print('goodbye')
+ """) == 1
+
+
+def test_try_condition_complexity_with_nesting():
assert get_code_snippet_compexity("""
def f(a, b):
try:
for foo in bar: # +1
- return a
+ if a > 0: # +2
+ return a
except Exception: # +1
if a < 0: # +2
return a
- """) == 4
+ """) == 6
def test_recursion_complexity():
@@ -125,6 +158,28 @@ def test_real_function():
""") == 9
+def test_real_function_with_try():
+ assert get_code_snippet_compexity("""
+ def process_raw_constant(constant, min_word_length):
+ try:
+ processed_words = []
+ raw_camelcase_words = []
+ for raw_word in re.findall(r'[a-z]+', constant): # +1
+ word = raw_word.strip()
+ if ( # +2 (nesting = 1)
+ len(word) >= min_word_length # +2 (2 bool operator sequences)
+ and not (word.startswith('-') or word.endswith('-'))
+ ):
+ if is_camel_case_word(word): # +3 (nesting=2)
+ raw_camelcase_words.append(word)
+ else: # +1
+ processed_words.append(word.lower())
+ return processed_words, raw_camelcase_words
+ except Exception as exp: # +1
+ return 1
+ """) == 9 + 1
+
+
def test_break_and_continue():
assert get_code_snippet_compexity("""
def f(a):
| Document try complexity computation
Hi, thanks for this great tool :smile: I was scratching my head when adding/removing any lines from `try` block was influencing cognitive complexity and searched in your documentation and then code why is that and found this line in code:
https://github.com/Melevir/cognitive_complexity/blob/1df07df7adf88bc2c002e4bf85da28c18bfbd086/cognitive_complexity/utils/ast.py#L40
It means that any line added inside try block will increment complexity (code below gives complexity 4)
```python
def a(b, c):
try:
print(1)
print(2)
print(3)
print(4)
except Exception:
raise
```
This behavior is unexpected as it was not mentioned in SonarSource links provided, can we document it here or in flake8 extension? Thanks | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_cognitive_complexity.py::test_try_condition_complexity_with_multiple_lines",
"tests/test_cognitive_complexity.py::test_real_function_with_try"
] | [
"tests/test_cognitive_complexity.py::test_simple_if_simple_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_simple_condition_complexity_with_print",
"tests/test_cognitive_complexity.py::test_simple_if_serial_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_serial_heterogenious_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_if_complex_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_elif_condition_complexity",
"tests/test_cognitive_complexity.py::test_simple_else_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_very_nested_structure_condition_complexity",
"tests/test_cognitive_complexity.py::test_try_condition_complexity_simple",
"tests/test_cognitive_complexity.py::test_try_condition_complexity_with_nesting",
"tests/test_cognitive_complexity.py::test_recursion_complexity",
"tests/test_cognitive_complexity.py::test_real_function",
"tests/test_cognitive_complexity.py::test_break_and_continue",
"tests/test_cognitive_complexity.py::test_nested_functions",
"tests/test_cognitive_complexity.py::test_ternary_operator",
"tests/test_cognitive_complexity.py::test_nested_if_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_else_condition_complexity",
"tests/test_cognitive_complexity.py::test_nested_elif_condition_complexity",
"tests/test_cognitive_complexity.py::test_for_else_complexity",
"tests/test_cognitive_complexity.py::test_while_else_complexity",
"tests/test_cognitive_complexity.py::test_a_decorator_complexity",
"tests/test_cognitive_complexity.py::test_not_a_decorator_complexity",
"tests/test_cognitive_complexity.py::test_decorator_generator_complexity"
] | {
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-05-20T20:59:26Z" | mit |
|
MicroPyramid__forex-python-27 | diff --git a/forex_python/converter.py b/forex_python/converter.py
index bcc8348..316f0cf 100644
--- a/forex_python/converter.py
+++ b/forex_python/converter.py
@@ -56,6 +56,8 @@ class CurrencyRates(Common):
raise RatesNotAvailableError("Currency Rates Source Not Ready")
def get_rate(self, base_cur, dest_cur, date_obj=None):
+ if base_cur == dest_cur:
+ return 1.
date_str = self._get_date_string(date_obj)
payload = {'base': base_cur, 'symbols': dest_cur}
source_url = self._source_url() + date_str
| MicroPyramid/forex-python | 1ebd8dda32503ca63d52b8a55f5647804fd1d1dd | diff --git a/tests/test.py b/tests/test.py
index 040ed75..2501aa9 100644
--- a/tests/test.py
+++ b/tests/test.py
@@ -50,6 +50,11 @@ class TestGetRate(TestCase):
# check if return value is float
self.assertTrue(isinstance(rate, float))
+
+ def test_get_rate_with_valid_codes_same_currency(self):
+ rate = get_rate('USD', 'USD')
+ # rate should be 1.
+ self.assertEqual(1., rate)
def test_get_rate_with_date(self):
date_obj = datetime.datetime.strptime('2010-05-10', "%Y-%m-%d").date()
| identity get_rate
It would be nice that the conversion from one currency to itself also work
```
>>> c.get_rate("CHF", "CHF")
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/home/user/.virtualenvs/tool/local/lib/python2.7/site-packages/forex_python/converter.py", line 67, in get_rate
base_cur, dest_cur, date_str))
RatesNotAvailableError: Currency Rate CHF => CHF not available for Date latest
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test.py::TestGetRate::test_get_rate_with_valid_codes_same_currency"
] | [
"tests/test.py::TestCurrencyName::test_with_valid_currency_code",
"tests/test.py::TestCurrencyName::test_with_invalid_currency_code",
"tests/test.py::TestAmountConvert::test_amount_convert_valid_currency_same_currency",
"tests/test.py::TestAmountConvert::test_amount_convert_invalid_currency",
"tests/test.py::TestForceDecimalAmountConvert::test_decimal_get_rates_invalid_code",
"tests/test.py::TestForceDecimalAmountConvert::test_decimal_get_rate_with_invalid_codes",
"tests/test.py::TestGetRates::test_get_rates_invalid_code",
"tests/test.py::TestCurrencySymbol::test_with_valid_currency_code",
"tests/test.py::TestCurrencySymbol::test_with_invalid_currency_code",
"tests/test.py::TestGetRate::test_get_rate_with_invalid_codes"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2017-02-27T13:16:26Z" | mit |
|
MindscapeHQ__raygun4py-82 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9a8e114..d80a3f8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,7 @@
+## 4.2.2 (23/01/2019):
+Bugfixes
+ - Fix `set_request_details` builder method not returning self causing it to be unchainable
+
## 4.2.1 (18/12/2018):
Bugfixes
- Set version correctly in crash report sent to Raygun API ([#78](https://github.com/MindscapeHQ/raygun4py/pull/79))
diff --git a/python2/raygun4py/version.py b/python2/raygun4py/version.py
index 53fb41b..3cd825b 100644
--- a/python2/raygun4py/version.py
+++ b/python2/raygun4py/version.py
@@ -2,4 +2,4 @@
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
-__version__ = '4.2.1'
+__version__ = '4.2.2'
diff --git a/python3/raygun4py/raygunmsgs.py b/python3/raygun4py/raygunmsgs.py
index c419558..6d3c318 100644
--- a/python3/raygun4py/raygunmsgs.py
+++ b/python3/raygun4py/raygunmsgs.py
@@ -97,6 +97,8 @@ class RaygunMessageBuilder(object):
rg_request_details = http_utilities.build_wsgi_compliant_request(request)
self.raygunMessage.details['request'] = rg_request_details
+ return self
+
def set_version(self, version):
self.raygunMessage.details['version'] = version
diff --git a/python3/raygun4py/version.py b/python3/raygun4py/version.py
index 53fb41b..3cd825b 100644
--- a/python3/raygun4py/version.py
+++ b/python3/raygun4py/version.py
@@ -2,4 +2,4 @@
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
-__version__ = '4.2.1'
+__version__ = '4.2.2'
| MindscapeHQ/raygun4py | 16d2e7a3ebcca16548e5ec806c85b3edfda95930 | diff --git a/python3/tests/test_raygunmsgs.py b/python3/tests/test_raygunmsgs.py
index 2f0b3e0..be16405 100644
--- a/python3/tests/test_raygunmsgs.py
+++ b/python3/tests/test_raygunmsgs.py
@@ -109,6 +109,12 @@ class TestRaygunMessageBuilder(unittest.TestCase):
self.assertEqual(self.builder.raygunMessage.details['request']['headers']['Referer'],
"https://www.google.com/")
+ def test_set_request_details_allows_chaining(self):
+ self.builder \
+ .set_request_details(self.raw_wsgi_request) \
+ .set_tags(['foo', 'bar'])
+
+
class TestRaygunErrorMessage(unittest.TestCase):
class GrandchildError(Exception):
pass
| AttributeError: 'NoneType' object has no attribute 'set_user'
Recently upgraded from 3.1.6 to 4.1.0 and it looks like `RaygunMessageBuilder.set_request_details` is returning `None` in some situations, causing exception sending to fail with an `AttributeError: 'NoneType' object has no attribute 'set_user'`
https://github.com/MindscapeHQ/raygun4py/blob/v4.1.0/python3/raygun4py/raygunprovider.py#L124-L125
https://github.com/MindscapeHQ/raygun4py/blob/v4.1.0/python3/raygun4py/raygunmsgs.py#L92-L97
Stacktrace:
```py
/usr/local/lib/python3.6/site-packages/raygun4py/raygunprovider.py:96: in send_exception
message = self._create_message(errorMessage, tags, custom_data, http_request, extra_environment_data)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <raygun4py.raygunprovider.RaygunSender object at 0x7f6dbbbf0320>, raygunExceptionMessage = <raygun4py.raygunmsgs.RaygunErrorMessage object at 0x7f6dbbbccf60>
tags = ['django-novars'], user_custom_data = None
http_request = {'form': {}, 'headers': {'CONTENT_TYPE': 'application/octet-stream', 'HTTP_COOKIE': '', 'PATH_INFO': '/', 'QUERY_STRING': '', ...}, 'hostName': 'testserver', 'httpMethod': 'GET', ...}
extra_environment_data = {'frameworkVersion': '2.1.2'}
def _create_message(self, raygunExceptionMessage, tags, user_custom_data, http_request, extra_environment_data):
return raygunmsgs.RaygunMessageBuilder().new() \
.set_machine_name(socket.gethostname()) \
.set_version(self.userversion) \
.set_client_details() \
.set_exception_details(raygunExceptionMessage) \
.set_environment_details(extra_environment_data) \
.set_tags(tags) \
.set_customdata(user_custom_data) \
> .set_request_details(http_request) \
.set_user(self.user) \
.build()
E AttributeError: 'NoneType' object has no attribute 'set_user'
/usr/local/lib/python3.6/site-packages/raygun4py/raygunprovider.py:124: AttributeError
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_set_request_details_allows_chaining"
] | [
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_request_ip",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_user_fname",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_wsgi_standard_header_names",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_user_anon",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_customdata",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_user_identifier",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_machinename",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_wsgi_fallbacks",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_request_ip_from_remote_addr",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_user_fullname",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_user_email",
"python3/tests/test_raygunmsgs.py::TestRaygunMessageBuilder::test_tags",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_exc_traceback_none_generates_empty_array",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_methodname_none",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_classname",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_chained_exception_last_exception_caught_is_parent",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_chained_exception_childs_cause_is_grandchild",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_chained_exception_nested_grandchild_message",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_chained_exception_message_child_has_nested_grandchild",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_chained_exception_message_parent_has_nested_child",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_chained_exception_cause_is_child",
"python3/tests/test_raygunmsgs.py::TestRaygunErrorMessage::test_chained_exception_nested_child_message"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2019-01-22T19:47:02Z" | mit |
|
MoshiBin__ssdpy-54 | diff --git a/CHANGES.md b/CHANGES.md
index 6617b09..3583296 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -4,6 +4,7 @@
- Fixed an issue where `ssdpy.client.discover()` was using wrong syntax.
- Changed the exception raised by `ssdpy.compat.if_nametoindex()` to be the same as in Python 3 (OSError).
- Added tests for `ssdpy.client`, `ssdpy.compat` and created more tests for `ssdpy.server` to increase coverage.
+- Added support for custom fields in NOTIFY. Pass `extra_fields={"field": "value"}` to `ssdpy.SSDPServer` or pass `-e|--extra-field NAME VALUE` to `ssdpy-server`.
### 0.3.0
(2020-08-10)
diff --git a/ssdpy/cli/server.py b/ssdpy/cli/server.py
index bbbb22d..b7f280f 100644
--- a/ssdpy/cli/server.py
+++ b/ssdpy/cli/server.py
@@ -11,17 +11,11 @@ logging.basicConfig()
def parse_args(argv):
parser = argparse.ArgumentParser(description="Start an SSDP server")
- parser.add_argument(
- "-V", "--version", action="version", version="%(prog)s {}".format(VERSION)
- )
+ parser.add_argument("-V", "--version", action="version", version="%(prog)s {}".format(VERSION))
parser.add_argument("-v", "--verbose", help="Be more verbose", action="store_true")
proto_group = parser.add_mutually_exclusive_group()
- proto_group.add_argument(
- "-4", "--ipv4", help="Listen on IPv4 (default: True)", action="store_true"
- )
- proto_group.add_argument(
- "-6", "--ipv6", help="Listen on IPv6 instead of IPv4", action="store_true"
- )
+ proto_group.add_argument("-4", "--ipv4", help="Listen on IPv4 (default: True)", action="store_true")
+ proto_group.add_argument("-6", "--ipv6", help="Listen on IPv6 instead of IPv4", action="store_true")
parser.add_argument("usn", help="Unique server name", nargs=1)
parser.add_argument(
"-t",
@@ -52,11 +46,22 @@ def parse_args(argv):
"--address",
help="Address of the interface to listen on. Only valid for IPv4.",
)
+ parser.add_argument(
+ "-e",
+ "--extra-field",
+ action="append",
+ nargs=2,
+ metavar=("NAME", "VALUE"),
+ help="Extra fields to pass in NOTIFY packets. Pass multiple times for multiple extra headers",
+ )
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
+ extra_fields = None
+ if args.extra_field is not None:
+ extra_fields = dict(args.extra_field)
if args.ipv6:
proto = "ipv6"
@@ -76,6 +81,7 @@ def main(argv=None):
max_age=args.max_age,
al=args.location,
location=args.location,
+ extra_fields=extra_fields,
)
logger = logging.getLogger("ssdpy.server")
diff --git a/ssdpy/protocol.py b/ssdpy/protocol.py
index a3b24e3..4961366 100644
--- a/ssdpy/protocol.py
+++ b/ssdpy/protocol.py
@@ -29,7 +29,7 @@ def create_msearch_payload(host, st, mx=1):
return data.encode("utf-8")
-def create_notify_payload(host, nt, usn, location=None, al=None, max_age=None):
+def create_notify_payload(host, nt, usn, location=None, al=None, max_age=None, extra_fields=None):
"""
Create a NOTIFY packet using the given parameters.
Returns a bytes object containing a valid NOTIFY request.
@@ -63,6 +63,8 @@ def create_notify_payload(host, nt, usn, location=None, al=None, max_age=None):
:param max_age: Amount of time in seconds that the NOTIFY packet should be cached by clients receiving it. In UPnP, this header is required.
:type max_age: int
+ :param extra_fields: Extra header fields to send. UPnP SSDP section 1.1.3 allows for extra vendor-specific fields to be sent in the NOTIFY packet. According to the spec, the field names MUST be in the format of `token`.`domain-name`, for example `myheader.philips.com`. SSDPy, however, does not check this. Normally, headers should be in ASCII - but this function does not enforce that.
+
:return: A bytes object containing the generated NOTIFY payload.
"""
if max_age is not None and not isinstance(max_age, int):
@@ -80,5 +82,8 @@ def create_notify_payload(host, nt, usn, location=None, al=None, max_age=None):
data += "AL:{}\r\n".format(al)
if max_age is not None:
data += "Cache-Control:max-age={}\r\n".format(max_age)
+ if extra_fields is not None:
+ for field, value in extra_fields.items():
+ data += "{}:{}\r\n".format(field, value)
data += "\r\n"
return data.encode("utf-8")
diff --git a/ssdpy/server.py b/ssdpy/server.py
index 1a2f026..28d789d 100644
--- a/ssdpy/server.py
+++ b/ssdpy/server.py
@@ -42,6 +42,8 @@ class SSDPServer(object):
:type location: str
:param al: Canonical URL of the service, but only supported in the IETF version of SSDP. Should be the same as ``location``.
:type al: str
+ :param extra_fields: Extra header fields to send. UPnP SSDP section 1.1.3 allows for extra vendor-specific fields to be sent in the NOTIFY packet. According to the spec, the field names MUST be in the format of `token`.`domain-name`, for example `myheader.philips.com`. SSDPy, however, does not check this and allows any field name - as long as it's ASCII.
+ :type extra_fields: dict
"""
def __init__(
@@ -55,6 +57,7 @@ class SSDPServer(object):
max_age=None,
location=None,
al=None,
+ extra_fields=None,
):
allowed_protos = ("ipv4", "ipv6")
if proto not in allowed_protos:
@@ -67,6 +70,14 @@ class SSDPServer(object):
self.max_age = max_age
self._iface = iface
+ self._extra_fields = {}
+ if extra_fields is not None:
+ for field, value in extra_fields.items():
+ try:
+ self._extra_fields[field.encode("ascii")] = value.encode("ascii")
+ except (UnicodeDecodeError, UnicodeEncodeError):
+ raise ValueError("Invalid value for extra_field: %s=%s is not ASCII", field, value)
+
if proto == "ipv4":
self._af_type = socket.AF_INET
self._broadcast_ip = ipv4_multicast_ip
@@ -125,7 +136,13 @@ class SSDPServer(object):
logger.info("Received qualifying M-SEARCH from {}".format(address))
logger.debug("M-SEARCH data: {}".format(headers))
notify = create_notify_payload(
- self._broadcast_ip, self.device_type, self.usn, self.location, self.al, self.max_age,
+ host=self._broadcast_ip,
+ nt=self.device_type,
+ usn=self.usn,
+ location=self.location,
+ al=self.al,
+ max_age=self.max_age,
+ extra_fields=self._extra_fields,
)
logger.debug("Created NOTIFY: {}".format(notify))
try:
| MoshiBin/ssdpy | f26ca358867467a58e79c65c4813f47431a5a627 | diff --git a/tests/test_cli_server.py b/tests/test_cli_server.py
index 77b4b5f..bd221d7 100644
--- a/tests/test_cli_server.py
+++ b/tests/test_cli_server.py
@@ -35,6 +35,7 @@ def test_ssdpserver_init(mocker):
max_age=None,
port=1900,
proto="ipv4",
+ extra_fields=None,
)
@@ -51,6 +52,7 @@ def test_ssdpserver_init_with_ipv6(mocker):
max_age=None,
port=1900,
proto="ipv6",
+ extra_fields=None,
)
mocker.patch.object(server_cli, "SSDPServer")
@@ -65,6 +67,7 @@ def test_ssdpserver_init_with_ipv6(mocker):
max_age=None,
port=1900,
proto="ipv6",
+ extra_fields=None,
)
@@ -86,6 +89,9 @@ def test_ssdpserver_init_with_args(mocker):
"test-device",
"--max-age",
"0",
+ "-e",
+ "test-field",
+ "foo"
)
)
server_cli.SSDPServer.assert_called_once_with(
@@ -98,4 +104,5 @@ def test_ssdpserver_init_with_args(mocker):
max_age=0,
port=0,
proto="ipv6",
+ extra_fields={"test-field": "foo"},
)
diff --git a/tests/test_protocol.py b/tests/test_protocol.py
index f578525..67cc065 100644
--- a/tests/test_protocol.py
+++ b/tests/test_protocol.py
@@ -39,9 +39,7 @@ def test_notify_location():
def test_notify_al():
- data = create_notify_payload(
- "239.255.255.250:1900", "testdevice", "ssdpy-test", al="http://localhost"
- )
+ data = create_notify_payload("239.255.255.250:1900", "testdevice", "ssdpy-test", al="http://localhost")
data_headers = parse_headers(data)
assert data_headers.get("host") == "239.255.255.250:1900"
assert data_headers.get("nt") == "testdevice"
@@ -51,9 +49,7 @@ def test_notify_al():
def test_notify_age():
- data = create_notify_payload(
- "239.255.255.250:1900", "testdevice", "ssdpy-test", max_age=999
- )
+ data = create_notify_payload("239.255.255.250:1900", "testdevice", "ssdpy-test", max_age=999)
data_headers = parse_headers(data)
assert data_headers.get("host") == "239.255.255.250:1900"
assert data_headers.get("nt") == "testdevice"
@@ -65,3 +61,16 @@ def test_notify_age():
def test_notify_edge_cases():
with pytest.raises(ValueError):
create_notify_payload("x", "y", "z", max_age="not-a-number")
+
+
+def test_notify_extra_fields():
+ data = create_notify_payload(
+ "239.255.255.250:1900",
+ "testdevice",
+ "ssdpy-test",
+ extra_fields={"test-header": "test-value", "test-header.domain.com": "test-value2"},
+ )
+ data_headers = parse_headers(data)
+ assert data_headers.get("test-header") == "test-value"
+ assert data_headers.get("test-header.domain.com") == "test-value2"
+ assert data_headers.get("non-existant-header") is None
diff --git a/tests/test_server.py b/tests/test_server.py
index edbeb5d..c16b507 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -67,3 +67,12 @@ def test_server_bind_address_and_iface_ipv6():
except OSError as e:
if e.errno != errno.ENOPROTOOPT: # Protocol not supported
raise
+
+
+def test_server_extra_fields():
+ SSDPServer("test-server", extra_fields={"test-field": "foo", "test-field2": "bar"})
+
+
+def test_server_extra_fields_non_ascii():
+ with pytest.raises(ValueError):
+ SSDPServer("test-server", extra_fields={"invalid-field™": "foo"})
| No support for custom fields
SSDP allows custom fields to be send by the server. However ssdpy does not support them.
See https://web.archive.org/web/20150905102426/http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf chapter 1.1.3, SSDP header field extensions. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_cli_server.py::test_ssdpserver_init",
"tests/test_cli_server.py::test_ssdpserver_init_with_ipv6",
"tests/test_cli_server.py::test_ssdpserver_init_with_args",
"tests/test_protocol.py::test_notify_extra_fields",
"tests/test_server.py::test_server_extra_fields",
"tests/test_server.py::test_server_extra_fields_non_ascii"
] | [
"tests/test_cli_server.py::test_invalid_arguments",
"tests/test_cli_server.py::test_version",
"tests/test_cli_server.py::test_verbose",
"tests/test_protocol.py::test_msearch_payload",
"tests/test_protocol.py::test_notify_payload",
"tests/test_protocol.py::test_notify_location",
"tests/test_protocol.py::test_notify_al",
"tests/test_protocol.py::test_notify_age",
"tests/test_protocol.py::test_notify_edge_cases",
"tests/test_server.py::test_server_ipv4",
"tests/test_server.py::test_server_ipv6",
"tests/test_server.py::test_server_invalid_proto",
"tests/test_server.py::test_server_binds_iface",
"tests/test_server.py::test_server_bind_address_ipv4",
"tests/test_server.py::test_server_bind_address_ipv6",
"tests/test_server.py::test_server_bind_address_and_iface_ipv6"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-12-06T17:22:41Z" | mit |
|
MycroftAI__lingua-franca-118 | diff --git a/lingua_franca/lang/parse_da.py b/lingua_franca/lang/parse_da.py
index 76bca9c..e513b47 100644
--- a/lingua_franca/lang/parse_da.py
+++ b/lingua_franca/lang/parse_da.py
@@ -733,7 +733,7 @@ def extract_datetime_da(string, currentDate, default_time):
found = True
# check that we found a date
- if not date_found:
+ if not date_found():
return None
if dayOffset is False:
diff --git a/lingua_franca/lang/parse_de.py b/lingua_franca/lang/parse_de.py
index 4f21c54..9f6d447 100644
--- a/lingua_franca/lang/parse_de.py
+++ b/lingua_franca/lang/parse_de.py
@@ -745,7 +745,7 @@ def extract_datetime_de(string, currentDate, default_time):
found = True
# check that we found a date
- if not date_found:
+ if not date_found():
return None
if dayOffset is False:
diff --git a/lingua_franca/lang/parse_es.py b/lingua_franca/lang/parse_es.py
index 66dfbd6..536f980 100644
--- a/lingua_franca/lang/parse_es.py
+++ b/lingua_franca/lang/parse_es.py
@@ -993,7 +993,7 @@ def extract_datetime_es(input_str, currentDate=None, default_time=None):
found = True
# check that we found a date
- if not date_found:
+ if not date_found():
return None
if dayOffset is False:
diff --git a/lingua_franca/lang/parse_nl.py b/lingua_franca/lang/parse_nl.py
index eb909e0..a656779 100644
--- a/lingua_franca/lang/parse_nl.py
+++ b/lingua_franca/lang/parse_nl.py
@@ -1305,7 +1305,7 @@ def extract_datetime_nl(string, dateNow, default_time):
idx += used - 1
found = True
# check that we found a date
- if not date_found:
+ if not date_found():
return None
if dayOffset is False:
diff --git a/lingua_franca/lang/parse_sv.py b/lingua_franca/lang/parse_sv.py
index 1a6e0ec..af34e9a 100644
--- a/lingua_franca/lang/parse_sv.py
+++ b/lingua_franca/lang/parse_sv.py
@@ -653,7 +653,7 @@ def extract_datetime_sv(string, currentDate, default_time):
found = True
# check that we found a date
- if not date_found:
+ if not date_found():
return None
if dayOffset is False:
| MycroftAI/lingua-franca | d726b0562400769577e724c6fcb12307934435b6 | diff --git a/test/test_parse_da.py b/test/test_parse_da.py
index 8342a99..8dfddb5 100644
--- a/test/test_parse_da.py
+++ b/test/test_parse_da.py
@@ -71,7 +71,7 @@ class TestNormalize(unittest.TestCase):
# self.assertEqual(extract_number("tre fjerdedel kop", lang="da-dk"),
# 3.0 / 4.0)
- def test_extractdatetime_de(self):
+ def test_extractdatetime_da(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 0, 0)
[extractedDate, leftover] = extract_datetime(text, date,
@@ -158,6 +158,10 @@ class TestNormalize(unittest.TestCase):
# testExtract("lad os mødes klokken 8:00 om aftenen",
# "2017-06-27 20:00:00", "lad os mødes")
+ def test_extractdatetime_no_time(self):
+ """Check that None is returned if no time is found in sentence."""
+ self.assertEqual(extract_datetime('ingen tid', lang='da-da'), None)
+
def test_extractdatetime_default_da(self):
default = time(9, 0, 0)
anchor = datetime(2017, 6, 27, 0, 0)
diff --git a/test/test_parse_de.py b/test/test_parse_de.py
index 1a10f0a..e0d097c 100644
--- a/test/test_parse_de.py
+++ b/test/test_parse_de.py
@@ -159,6 +159,10 @@ class TestNormalize(unittest.TestCase):
testExtract("lass uns treffen um 8:00 abends",
"2017-06-27 20:00:00", "lass uns treffen")
+ def test_extractdatetime_no_time(self):
+ """Check that None is returned if no time is found in sentence."""
+ self.assertEqual(extract_datetime('kein zeit', lang='de-de'), None)
+
def test_extractdatetime_default_de(self):
default = time(9, 0, 0)
anchor = datetime(2017, 6, 27, 0, 0)
diff --git a/test/test_parse_es.py b/test/test_parse_es.py
index 355554e..a597cdb 100644
--- a/test/test_parse_es.py
+++ b/test/test_parse_es.py
@@ -202,10 +202,6 @@ class TestDatetime_es(unittest.TestCase):
"ayer por la tarde", anchorDate=datetime(1998, 1, 1),
lang='es')[0], datetime(1997, 12, 31, 15))
- self.assertEqual(extract_datetime(
- "qué año es", anchorDate=datetime(1998, 1, 1),
- lang='es')[0], datetime(1998, 1, 1))
-
self.assertEqual(extract_datetime("hoy 2 de la mañana", lang='es',
anchorDate=datetime(1998, 1, 1))[0],
datetime(1998, 1, 1, 2))
@@ -213,6 +209,10 @@ class TestDatetime_es(unittest.TestCase):
anchorDate=datetime(1998, 1, 1))[0],
datetime(1998, 1, 1, 14))
+ def test_extractdatetime_no_time(self):
+ """Check that None is returned if no time is found in sentence."""
+ self.assertEqual(extract_datetime('no hay tiempo', lang='es-es'), None)
+
@unittest.skip("These phrases are not parsing correctly.")
def test_extract_datetime_relative_failing(self):
# parses as "morning" and returns 8:00 on anchorDate
diff --git a/test/test_parse_nl.py b/test/test_parse_nl.py
index fe2148f..edd3ec7 100644
--- a/test/test_parse_nl.py
+++ b/test/test_parse_nl.py
@@ -146,6 +146,10 @@ class TestParsing(unittest.TestCase):
anchor, lang=LANG, default_time=default)
self.assertEqual(default, res[0].time())
+ def test_extractdatetime_no_time(self):
+ """Check that None is returned if no time is found in sentence."""
+ self.assertEqual(extract_datetime('geen tijd', lang=LANG), None)
+
def test_spaces(self):
self.assertEqual(normalize(" dit is een test", LANG),
"dit is 1 test")
diff --git a/test/test_parse_sv.py b/test/test_parse_sv.py
index d577bed..c5e02cd 100644
--- a/test/test_parse_sv.py
+++ b/test/test_parse_sv.py
@@ -97,6 +97,10 @@ class TestNormalize(unittest.TestCase):
anchor, lang='sv-se', default_time=default)
self.assertEqual(default, res[0].time())
+ def test_extractdatetime_no_time(self):
+ """Check that None is returned if no time is found in sentence."""
+ self.assertEqual(extract_datetime('Ingen tid', lang='sv-se'), None)
+
def test_numbers(self):
self.assertEqual(normalize("det här är ett ett två tre test",
lang='sv-se'),
| extract_datetime() returns 8 AM results in utterance where no time specification was made
forslund has backtracked this to lingua franca in [mycroft-core issue #2647](https://github.com/MycroftAI/mycroft-core/issues/2647)
> Found this while reworking the skill-reminder. While processing non timed reminder there is a safeguard mechanism to check [extract_datetime](https://github.com/MycroftAI/mycroft-core/blob/a6cb0c5258dbc34e7f80daf50ed751c87a40c61e/mycroft/util/parse.py#L145) in ["the case where padatious misses the time/date"](https://github.com/MycroftAI/skill-reminder/blob/b8dc13ec00bb0feb361deb58fd71080e98ed1325/__init__.py#L251) with the result of returning 8 AM and blocking the non timed reminder alltogether. (tested in german language. Phrase: "Erinnere mich an Meeting"; changed the vocab locally and tested it also with "Erinnere mich Meeting" with no difference)
-------------------
> Seems to be a german (or non-english) issue in lingua franca:
>
> Mycroft calls:
>
> from mycroft.util.parse import extract_datetime
> print(extract_datetime('Remind me of the meeting', lang='en-us'))
> None
> print(extract_datetime('Remind me of the meeting', lang='de-de'))
> [datetime.datetime(2020, 8, 5, 0, 0, tzinfo=tzfile('/usr/share/zoneinfo/Europe/Stockholm')), 'remind me of the meeting']
>
> Lingua franca calls:
>
> from lingua_franca.parse import extract_datetime
> print(extract_datetime('Remind me of the meeting', lang='en-us'))
> None
> print(extract_datetime('Remind me of the meeting', lang='de-de'))
> [datetime.datetime(2020, 8, 5, 0, 0, tzinfo=tzlocal()), 'remind me of the meeting'] | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_parse_sv.py::TestNormalize::test_extractdatetime_no_time",
"test/test_parse_nl.py::TestParsing::test_extractdatetime_no_time",
"test/test_parse_es.py::TestDatetime_es::test_extractdatetime_no_time",
"test/test_parse_da.py::TestNormalize::test_extractdatetime_no_time",
"test/test_parse_de.py::TestNormalize::test_extractdatetime_no_time"
] | [
"test/test_parse_sv.py::TestNormalize::test_extractnumber_sv",
"test/test_parse_sv.py::TestNormalize::test_extractdatetime_sv",
"test/test_parse_sv.py::TestNormalize::test_extractdatetime_default_sv",
"test/test_parse_sv.py::TestNormalize::test_numbers",
"test/test_parse_nl.py::TestParsing::test_extractdatetime_nl",
"test/test_parse_nl.py::TestParsing::test_articles",
"test/test_parse_nl.py::TestParsing::test_extractdatetime_default_nl",
"test/test_parse_nl.py::TestParsing::test_spaces",
"test/test_parse_nl.py::TestParsing::test_numbers",
"test/test_parse_nl.py::TestParsing::test_extract_number",
"test/test_parse_es.py::TestNormalize::test_isFraction_es",
"test/test_parse_es.py::TestNormalize::test_articles_es",
"test/test_parse_es.py::TestNormalize::test_extract_number_es",
"test/test_parse_es.py::TestNormalize::test_numbers_es",
"test/test_parse_es.py::TestDatetime_es::test_extract_datetime_relative",
"test/test_parse_es.py::TestDatetime_es::test_datetime_by_date_es",
"test/test_parse_da.py::TestNormalize::test_spaces",
"test/test_parse_da.py::TestNormalize::test_extract_number",
"test/test_parse_da.py::TestNormalize::test_articles",
"test/test_parse_da.py::TestNormalize::test_extractdatetime_default_da",
"test/test_parse_da.py::TestNormalize::test_extractdatetime_da",
"test/test_parse_da.py::TestNormalize::test_numbers",
"test/test_parse_de.py::TestNormalize::test_spaces",
"test/test_parse_de.py::TestNormalize::test_extract_number",
"test/test_parse_de.py::TestNormalize::test_numbers",
"test/test_parse_de.py::TestNormalize::test_articles",
"test/test_parse_de.py::TestNormalize::test_extractdatetime_default_de",
"test/test_parse_de.py::TestNormalize::test_extractdatetime_de"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-08-05T13:52:36Z" | apache-2.0 |
|
MycroftAI__lingua-franca-226 | diff --git a/lingua_franca/lang/parse_en.py b/lingua_franca/lang/parse_en.py
index 0a8b8e1..3075715 100644
--- a/lingua_franca/lang/parse_en.py
+++ b/lingua_franca/lang/parse_en.py
@@ -694,7 +694,8 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
timeQualifiersAM = ['morning']
timeQualifiersPM = ['afternoon', 'evening', 'night', 'tonight']
timeQualifiersList = set(timeQualifiersAM + timeQualifiersPM)
- markers = ['at', 'in', 'on', 'by', 'this', 'around', 'for', 'of', "within"]
+ year_markers = ['in', 'on', 'of']
+ markers = year_markers + ['at', 'by', 'this', 'around', 'for', "within"]
days = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
months = ['january', 'february', 'march', 'april', 'may', 'june',
@@ -743,6 +744,10 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
yearOffset = multiplier * 100
elif wordNext == "millennium":
yearOffset = multiplier * 1000
+ elif word in year_markers and is_numeric(wordNext) and len(wordNext) == 4:
+ yearOffset = int(wordNext) - int(currentYear)
+ used += 2
+ hasYear = True
# couple of
elif word == "2" and wordNext == "of" and \
wordNextNext in year_multiples:
@@ -792,7 +797,7 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
if wordPrev == "the":
start -= 1
used += 1
- # parse 5 days, 10 weeks, last week, next week
+ # parse 5 days, 10 weeks, last week, next week
elif word == "day":
if wordPrev and wordPrev[0].isdigit():
dayOffset += int(wordPrev)
@@ -811,7 +816,7 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
dayOffset = -7
start -= 1
used = 2
- # parse 10 months, next month, last month
+ # parse 10 months, next month, last month
elif word == "month" and not fromFlag and wordPrev:
if wordPrev[0].isdigit():
monthOffset = int(wordPrev)
@@ -856,7 +861,7 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
dayOffset -= 7
used += 1
start -= 1
- # parse 15 of July, June 20th, Feb 18, 19 of February
+ # parse 15 of July, June 20th, Feb 18, 19 of February
elif word in months or word in monthsShort and not fromFlag:
try:
m = months.index(word)
diff --git a/requirements.txt b/requirements.txt
index d431540..be56c7a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,1 +1,1 @@
-python-dateutil==2.6.0
\ No newline at end of file
+python-dateutil~=2.6.0
\ No newline at end of file
| MycroftAI/lingua-franca | eb6d677d4f8d201e1bcf645382eb262c7d0688f1 | diff --git a/test/test_parse.py b/test/test_parse.py
index a494cc2..7ee6b78 100644
--- a/test/test_parse.py
+++ b/test/test_parse.py
@@ -710,6 +710,11 @@ class TestNormalize(unittest.TestCase):
testExtract("what's the weather like next tuesday night",
"2017-07-04 22:00:00", "what is weather like night")
+ def test_extract_date_years(self):
+ date = datetime(2017, 6, 27, tzinfo=default_timezone()) # Tue June 27, 2017
+ self.assertEqual(extract_datetime('in 2007', date)[0],
+ datetime(2007, 6, 27, tzinfo=date.tzinfo))
+
def test_extract_ambiguous_time_en(self):
morning = datetime(2017, 6, 27, 8, 1, 2, tzinfo=default_timezone())
evening = datetime(2017, 6, 27, 20, 1, 2, tzinfo=default_timezone())
| "In 2007" should be parsed as year
**Describe the bug**
I know for Mycroft it reasonable to parse 2007 as 20:07. But this is a separate library, and for 2007 should be a year.
**To Reproduce**
```python
from lingua_franca.parse import *
lingua_franca.load_language('en')
print(lingua_franca.parse.extract_datetime('in 2007'))
[datetime.datetime(2022, 2, 21, 20, 7), '']
```
**Expected behavior**
[datetime.datetime(2007, 0, 0, 0, 0), '']
**env**
lingua_franca-0.4.2-py3-none-any.whl
python3.9 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_parse.py::TestNormalize::test_extract_date_years"
] | [
"test/test_parse.py::TestFuzzyMatch::test_matches",
"test/test_parse.py::TestFuzzyMatch::test_match_one",
"test/test_parse.py::TestNormalize::test_extract_ambiguous_time_en",
"test/test_parse.py::TestNormalize::test_articles",
"test/test_parse.py::TestNormalize::test_extract_number_ambiguous",
"test/test_parse.py::TestNormalize::test_extract_with_other_tzinfo",
"test/test_parse.py::TestNormalize::test_extract_number_priority",
"test/test_parse.py::TestNormalize::test_spaces",
"test/test_parse.py::TestNormalize::test_extractdatetime_fractions_en",
"test/test_parse.py::TestNormalize::test_multiple_numbers",
"test/test_parse.py::TestNormalize::test_normalize_numbers",
"test/test_parse.py::TestNormalize::test_extract_duration_en",
"test/test_parse.py::TestNormalize::test_extract_duration_case_en",
"test/test_parse.py::TestNormalize::test_numbers",
"test/test_parse.py::TestNormalize::test_extract_date_with_may_I_en",
"test/test_parse.py::TestNormalize::test_extract_relativedatetime_en",
"test/test_parse.py::TestNormalize::test_combinations",
"test/test_parse.py::TestNormalize::test_contractions",
"test/test_parse.py::TestNormalize::test_extractdatetime_en",
"test/test_parse.py::TestNormalize::test_gender",
"test/test_parse.py::TestNormalize::test_extract_date_with_number_words",
"test/test_parse.py::TestNormalize::test_extract_number"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-02-28T06:56:12Z" | apache-2.0 |
|
MycroftAI__lingua-franca-23 | diff --git a/lingua_franca/lang/parse_en.py b/lingua_franca/lang/parse_en.py
index 872c478..2136423 100644
--- a/lingua_franca/lang/parse_en.py
+++ b/lingua_franca/lang/parse_en.py
@@ -1363,7 +1363,7 @@ def extract_datetime_en(string, dateNow, default_time):
idx += used - 1
found = True
# check that we found a date
- if not date_found:
+ if not date_found():
return None
if dayOffset is False:
| MycroftAI/lingua-franca | 4c66a5ec87113842519b5f6166695b75ed01a46f | diff --git a/test/test_parse.py b/test/test_parse.py
index a6533cf..c1510d9 100644
--- a/test/test_parse.py
+++ b/test/test_parse.py
@@ -481,6 +481,10 @@ class TestNormalize(unittest.TestCase):
morning = datetime(2017, 6, 27, 8, 1, 2)
evening = datetime(2017, 6, 27, 20, 1, 2)
noonish = datetime(2017, 6, 27, 12, 1, 2)
+ self.assertEqual(
+ extract_datetime('feed the fish'), None)
+ self.assertEqual(
+ extract_datetime(' '), None)
self.assertEqual(
extract_datetime('feed fish at 10 o\'clock', morning)[0],
datetime(2017, 6, 27, 10, 0, 0))
| extract_datetime(" ") returns today not None
Clearly this is my favourite method looking at the issues list :yum:
An empty string passed to `extract_datetime("")` correctly returns `None`.
However a non-empty string that does not contain a date eg `extract_datetime(" ")` returns a datetime of the current day at midnight.
Documentation for the method:
https://mycroft-core.readthedocs.io/en/stable/source/mycroft.util.html#extract-datetime | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_parse.py::TestNormalize::test_extract_ambiguous_time_en"
] | [
"test/test_parse.py::TestFuzzyMatch::test_match_one",
"test/test_parse.py::TestFuzzyMatch::test_matches",
"test/test_parse.py::TestNormalize::test_articles",
"test/test_parse.py::TestNormalize::test_combinations",
"test/test_parse.py::TestNormalize::test_contractions",
"test/test_parse.py::TestNormalize::test_extract_duration_en",
"test/test_parse.py::TestNormalize::test_extract_number",
"test/test_parse.py::TestNormalize::test_extract_relativedatetime_en",
"test/test_parse.py::TestNormalize::test_extractdatetime_en",
"test/test_parse.py::TestNormalize::test_gender",
"test/test_parse.py::TestNormalize::test_multiple_numbers",
"test/test_parse.py::TestNormalize::test_numbers",
"test/test_parse.py::TestNormalize::test_spaces"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2019-09-17T23:00:32Z" | apache-2.0 |
|
MycroftAI__lingua-franca-230 | diff --git a/lingua_franca/lang/parse_en.py b/lingua_franca/lang/parse_en.py
index 3075715..e5e9ed2 100644
--- a/lingua_franca/lang/parse_en.py
+++ b/lingua_franca/lang/parse_en.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, time
from dateutil.relativedelta import relativedelta
@@ -678,7 +678,7 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
if text == "":
return None
-
+ default_time = default_time or time(0, 0, 0)
found = False
daySpecified = False
dayOffset = False
@@ -1390,7 +1390,9 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
else:
# ignore the current HH:MM:SS if relative using days or greater
if hrOffset == 0 and minOffset == 0 and secOffset == 0:
- extractedDate = extractedDate.replace(hour=0, minute=0, second=0)
+ extractedDate = extractedDate.replace(hour=default_time.hour,
+ minute=default_time.minute,
+ second=default_time.second)
if yearOffset != 0:
extractedDate = extractedDate + relativedelta(years=yearOffset)
@@ -1398,7 +1400,15 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
extractedDate = extractedDate + relativedelta(months=monthOffset)
if dayOffset != 0:
extractedDate = extractedDate + relativedelta(days=dayOffset)
- if hrAbs != -1 and minAbs != -1:
+ if hrOffset != 0:
+ extractedDate = extractedDate + relativedelta(hours=hrOffset)
+ if minOffset != 0:
+ extractedDate = extractedDate + relativedelta(minutes=minOffset)
+ if secOffset != 0:
+ extractedDate = extractedDate + relativedelta(seconds=secOffset)
+
+
+ if hrAbs != -1 and minAbs != -1 and not hrOffset and not minOffset and not secOffset:
# If no time was supplied in the string set the time to default
# time if it's available
if hrAbs is None and minAbs is None and default_time is not None:
@@ -1407,17 +1417,13 @@ def extract_datetime_en(text, anchorDate=None, default_time=None):
hrAbs = hrAbs or 0
minAbs = minAbs or 0
- extractedDate = extractedDate + relativedelta(hours=hrAbs,
- minutes=minAbs)
+ extractedDate = extractedDate.replace(hour=hrAbs,
+ minute=minAbs)
+
if (hrAbs != 0 or minAbs != 0) and datestr == "":
if not daySpecified and anchorDate > extractedDate:
extractedDate = extractedDate + relativedelta(days=1)
- if hrOffset != 0:
- extractedDate = extractedDate + relativedelta(hours=hrOffset)
- if minOffset != 0:
- extractedDate = extractedDate + relativedelta(minutes=minOffset)
- if secOffset != 0:
- extractedDate = extractedDate + relativedelta(seconds=secOffset)
+
for idx, word in enumerate(words):
if words[idx] == "and" and \
words[idx - 1] == "" and words[idx + 1] == "":
| MycroftAI/lingua-franca | b76bc51a65e558a8cc214f28f62d3db680132ac0 | diff --git a/test/test_parse.py b/test/test_parse.py
index 7ee6b78..542b572 100644
--- a/test/test_parse.py
+++ b/test/test_parse.py
@@ -14,7 +14,7 @@
# limitations under the License.
#
import unittest
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, time
from dateutil import tz
from lingua_franca import load_language, unload_language, set_default_lang
@@ -161,7 +161,7 @@ class TestNormalize(unittest.TestCase):
self.assertEqual(extract_number("2 fifth",
ordinals=True), 5)
self.assertEqual(extract_number("2 fifths",
- ordinals=False), 2/5)
+ ordinals=False), 2 / 5)
self.assertEqual(extract_number("2 fifths",
ordinals=None), 2)
@@ -213,7 +213,6 @@ class TestNormalize(unittest.TestCase):
ordinals=None), 8)
def test_extract_number(self):
-
self.assertEqual(extract_number("this is 2 test"), 2)
self.assertEqual(extract_number("this is test number 4"), 4)
self.assertEqual(extract_number("three cups"), 3)
@@ -331,7 +330,7 @@ class TestNormalize(unittest.TestCase):
self.assertEqual(extract_duration("The movie is one hour, fifty seven"
" and a half minutes long"),
(timedelta(hours=1, minutes=57.5),
- "The movie is , long"))
+ "The movie is , long"))
self.assertEqual(extract_duration("Four and a Half minutes until"
" sunset"),
(timedelta(minutes=4.5), "until sunset"))
@@ -713,7 +712,67 @@ class TestNormalize(unittest.TestCase):
def test_extract_date_years(self):
date = datetime(2017, 6, 27, tzinfo=default_timezone()) # Tue June 27, 2017
self.assertEqual(extract_datetime('in 2007', date)[0],
- datetime(2007, 6, 27, tzinfo=date.tzinfo))
+ datetime(2007, 6, 27, tzinfo=date.tzinfo))
+
+
+ def test_extractdatetime_with_default_time_en(self):
+ def extractWithFormat(text):
+ default_time = time(15, 4, tzinfo=default_timezone())
+ date = datetime(2017, 6, 27, 13, 4, tzinfo=default_timezone()) # Tue June 27, 2017 @ 1:04pm
+ [extractedDate, leftover] = extract_datetime(text, date, default_time=default_time)
+ extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
+ return [extractedDate, leftover]
+
+ def testExtract(text, expected_date, expected_leftover):
+ res = extractWithFormat(normalize(text))
+ self.assertEqual(res[0], expected_date, "for=" + text)
+ self.assertEqual(res[1], expected_leftover, "for=" + text)
+
+ # ignore default time arg
+ testExtract("in a second",
+ "2017-06-27 13:04:01", "")
+ testExtract("in a minute",
+ "2017-06-27 13:05:00", "")
+ testExtract("in an hour",
+ "2017-06-27 14:04:00", "")
+
+ # use default time
+ testExtract("in a couple weeks",
+ "2017-07-11 15:04:00", "")
+ testExtract("in a couple of weeks",
+ "2017-07-11 15:04:00", "")
+ testExtract("in a couple months",
+ "2017-08-27 15:04:00", "")
+ testExtract("in a couple years",
+ "2019-06-27 15:04:00", "")
+ testExtract("in a couple of months",
+ "2017-08-27 15:04:00", "")
+ testExtract("in a couple of years",
+ "2019-06-27 15:04:00", "")
+ testExtract("in a decade",
+ "2027-06-27 15:04:00", "")
+ testExtract("in a couple of decades",
+ "2037-06-27 15:04:00", "")
+ testExtract("next decade",
+ "2027-06-27 15:04:00", "")
+ testExtract("in a century",
+ "2117-06-27 15:04:00", "")
+ testExtract("in a millennium",
+ "3017-06-27 15:04:00", "")
+ testExtract("in a couple decades",
+ "2037-06-27 15:04:00", "")
+ testExtract("in 5 decades",
+ "2067-06-27 15:04:00", "")
+ testExtract("in a couple centuries",
+ "2217-06-27 15:04:00", "")
+ testExtract("in a couple of centuries",
+ "2217-06-27 15:04:00", "")
+ testExtract("in 2 centuries",
+ "2217-06-27 15:04:00", "")
+ testExtract("in a couple millenniums",
+ "4017-06-27 15:04:00", "")
+ testExtract("in a couple of millenniums",
+ "4017-06-27 15:04:00", "")
def test_extract_ambiguous_time_en(self):
morning = datetime(2017, 6, 27, 8, 1, 2, tzinfo=default_timezone())
@@ -768,7 +827,7 @@ class TestNormalize(unittest.TestCase):
(not_local_dt.year, not_local_dt.month, not_local_dt.day,
not_local_dt.hour, not_local_dt.minute, not_local_dt.second,
not_local_dt.tzinfo))
- self.assertNotEqual((test_dt.year, test_dt.month, test_dt.day,
+ self.assertNotEqual((test_dt.year, test_dt.month, test_dt.day,
test_dt.hour, test_dt.minute, test_dt.second,
test_dt.tzinfo),
(local_dt.year, local_dt.month, local_dt.day,
| Using default_time arg on extract_datetime skews results
**Describe the bug**
The Reminder Skill uses the `default_time` argument of `extract_datetime` and the extracted datetime is being returned 8 hours later than it should.
Originally reported
https://github.com/MycroftAI/skill-reminder/issues/50
**To Reproduce**
Have confirmed this by doing a quick modification of the LF unit tests - the first succeeds, second fails:
```python
def test_extract_relativedatetime_en(self):
def extractWithFormat(text, default_time):
date = datetime(2017, 6, 27, 10, 1, 2, tzinfo=default_timezone())
[extractedDate, leftover] = extract_datetime(text, date, default_time=default_time)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover, default_time=None):
res = extractWithFormat(normalize(text), default_time)
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("lets meet in a minute",
"2017-06-27 10:02:02", "lets meet")
testExtract("lets meet in a minute",
"2017-06-27 10:02:02", "lets meet", default_time=now_local())
```
**Expected behavior**
If a datetime with a specific time is extracted then this argument should not affect the result. This is likely part of a bigger refactor of this function. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_parse.py::TestNormalize::test_extractdatetime_with_default_time_en"
] | [
"test/test_parse.py::TestFuzzyMatch::test_match_one",
"test/test_parse.py::TestFuzzyMatch::test_matches",
"test/test_parse.py::TestNormalize::test_extract_date_years",
"test/test_parse.py::TestNormalize::test_numbers",
"test/test_parse.py::TestNormalize::test_extract_date_with_may_I_en",
"test/test_parse.py::TestNormalize::test_extractdatetime_en",
"test/test_parse.py::TestNormalize::test_combinations",
"test/test_parse.py::TestNormalize::test_extractdatetime_fractions_en",
"test/test_parse.py::TestNormalize::test_extract_number_ambiguous",
"test/test_parse.py::TestNormalize::test_extract_duration_en",
"test/test_parse.py::TestNormalize::test_extract_with_other_tzinfo",
"test/test_parse.py::TestNormalize::test_extract_number",
"test/test_parse.py::TestNormalize::test_spaces",
"test/test_parse.py::TestNormalize::test_extract_relativedatetime_en",
"test/test_parse.py::TestNormalize::test_contractions",
"test/test_parse.py::TestNormalize::test_extract_number_priority",
"test/test_parse.py::TestNormalize::test_multiple_numbers",
"test/test_parse.py::TestNormalize::test_extract_date_with_number_words",
"test/test_parse.py::TestNormalize::test_gender",
"test/test_parse.py::TestNormalize::test_normalize_numbers",
"test/test_parse.py::TestNormalize::test_extract_ambiguous_time_en",
"test/test_parse.py::TestNormalize::test_extract_duration_case_en",
"test/test_parse.py::TestNormalize::test_articles"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-04-12T05:56:29Z" | apache-2.0 |
|
MycroftAI__lingua-franca-32 | diff --git a/lingua_franca/lang/parse_es.py b/lingua_franca/lang/parse_es.py
index bebda23..d2ebea9 100644
--- a/lingua_franca/lang/parse_es.py
+++ b/lingua_franca/lang/parse_es.py
@@ -20,7 +20,8 @@
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
-from lingua_franca.lang.parse_common import is_numeric, look_for_fractions
+from lingua_franca.lang.format_es import pronounce_number_es
+from lingua_franca.lang.parse_common import *
from lingua_franca.lang.common_data_es import _ARTICLES_ES, _NUM_STRING_ES
@@ -57,7 +58,12 @@ def isFractional_es(input_str):
return False
-def extractnumber_es(text):
+# TODO: short_scale and ordinals don't do anything here.
+# The parameters are present in the function signature for API compatibility
+# reasons.
+#
+# Returns incorrect output on certain fractional phrases like, "cuarto de dos"
+def extractnumber_es(text, short_scale=True, ordinals=False):
"""
This function prepares the given text for parsing by making
numbers consistent, getting rid of contractions, etc.
@@ -108,7 +114,7 @@ def extractnumber_es(text):
result = 0
# handle fractions
if next_word != "avos":
- result += val
+ result = val
else:
result = float(result) / float(val)
@@ -263,6 +269,24 @@ def es_number_parse(words, i):
return es_number(i)
+def extract_numbers_es(text, short_scale=True, ordinals=False):
+ """
+ Takes in a string and extracts a list of numbers.
+
+ Args:
+ text (str): the string to extract a number from
+ short_scale (bool): Use "short scale" or "long scale" for large
+ numbers -- over a million. The default is short scale, which
+ is now common in most English speaking countries.
+ See https://en.wikipedia.org/wiki/Names_of_large_numbers
+ ordinals (bool): consider ordinal numbers, e.g. third=3 instead of 1/3
+ Returns:
+ list: list of extracted numbers as floats
+ """
+ return extract_numbers_generic(text, pronounce_number_es, extractnumber_es,
+ short_scale=short_scale, ordinals=ordinals)
+
+
def normalize_es(text, remove_articles):
""" Spanish string normalization """
diff --git a/lingua_franca/parse.py b/lingua_franca/parse.py
index 69b803d..bcb521f 100644
--- a/lingua_franca/parse.py
+++ b/lingua_franca/parse.py
@@ -105,6 +105,8 @@ def extract_numbers(text, short_scale=True, ordinals=False, lang=None):
return extract_numbers_it(text, short_scale, ordinals)
elif lang_code == "da":
return extract_numbers_da(text, short_scale, ordinals)
+ elif lang_code == "es":
+ return extract_numbers_es(text, short_scale, ordinals)
# TODO: extractnumbers_xx for other languages
_log_unsupported_language(lang_code,
['en', 'it', 'fr', 'de', 'da'])
@@ -145,8 +147,9 @@ def extract_number(text, short_scale=True, ordinals=False, lang=None):
return extractnumber_de(text)
elif lang_code == "da":
return extractnumber_da(text)
+ elif lang_code == "es":
+ return extract_numbers_es(text, short_scale, ordinals)
elif lang_code == "nl":
- print("EXTRACTING NL")
return extractnumber_nl(text, short_scale=short_scale,
ordinals=ordinals)
# TODO: extractnumber_xx for other languages
| MycroftAI/lingua-franca | 6c43ca67438f14891930ec14e56572f2c2815427 | diff --git a/test/test_parse_es.py b/test/test_parse_es.py
index cb92e31..34e7472 100644
--- a/test/test_parse_es.py
+++ b/test/test_parse_es.py
@@ -16,13 +16,14 @@
#
import unittest
-from lingua_franca.parse import normalize
+from lingua_franca.parse import normalize, extract_numbers, extract_number
class TestNormalize(unittest.TestCase):
"""
Test cases for Spanish parsing
"""
+
def test_articles_es(self):
self.assertEqual(normalize("esta es la prueba", lang="es",
remove_articles=True),
@@ -76,6 +77,15 @@ class TestNormalize(unittest.TestCase):
lang="es"),
"999999")
+ def test_extract_number_es(self):
+ self.assertEqual(sorted(extract_numbers(
+ "1 7 cuatro catorce ocho 157", lang='es')), [1, 4, 7, 8, 14, 157])
+ self.assertEqual(sorted(extract_numbers(
+ "1 7 cuatro albuquerque naranja John Doe catorce ocho 157",
+ lang='es')), [1, 4, 7, 8, 14, 157])
+ self.assertEqual(extract_number("seis punto dos", lang='es'), 6.2)
+ self.assertEqual(extract_numbers("un medio", lang='es'), [0.5])
+
if __name__ == "__main__":
unittest.main()
| PR#2347 Fix extractnumber_es, add extract_numbers_es
https://github.com/MycroftAI/mycroft-core/pull/2347
- Fix bug causing extractnumber_es to return a sum instead of a list
- Add Spanish parser to extract_numbers and extract_number
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_parse_es.py::TestNormalize::test_extract_number_es"
] | [
"test/test_parse_es.py::TestNormalize::test_articles_es",
"test/test_parse_es.py::TestNormalize::test_numbers_es"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2019-10-06T16:19:53Z" | apache-2.0 |
|
MycroftAI__lingua-franca-87 | diff --git a/lingua_franca/lang/parse_en.py b/lingua_franca/lang/parse_en.py
index 58207de..ef0f93d 100644
--- a/lingua_franca/lang/parse_en.py
+++ b/lingua_franca/lang/parse_en.py
@@ -444,7 +444,11 @@ def _extract_whole_number_with_text_en(tokens, short_scale, ordinals):
current_val = val
else:
- if prev_word in _SUMS and word not in _SUMS and current_val >= 10:
+ if all([
+ prev_word in _SUMS,
+ word not in _SUMS,
+ word not in multiplies,
+ current_val >= 10]):
# Backtrack - we've got numbers we can't sum.
number_words.pop()
val = prev_val
| MycroftAI/lingua-franca | 5ac3a86f1b5d1f2f97722ecb14408411a0fc663f | diff --git a/test/test_parse.py b/test/test_parse.py
index 01aec52..f5718ae 100644
--- a/test/test_parse.py
+++ b/test/test_parse.py
@@ -123,6 +123,18 @@ class TestNormalize(unittest.TestCase):
short_scale=False), 1e12)
self.assertEqual(extract_number("this is the billionth test",
short_scale=False), 1e-12)
+
+ # Verify non-power multiples of ten no longer discard
+ # adjacent multipliers
+ self.assertEqual(extract_number("twenty thousand"), 20000)
+ self.assertEqual(extract_number("fifty million"), 50000000)
+
+ # This test fails due to
+ # self.assertEqual(extract_number("twenty billion three hundred million \
+ # nine hundred fifty thousand six hundred \
+ # seventy five point eight six"),
+ # 20300950675.86)
+
# TODO handle this case
# self.assertEqual(
# extract_number("6 dot six six six"),
| Multiples of ten describing other multiples of ten require an intermediary number. I.e "twenty thousand" doesn't work
"twenty thousand" returns "20". The only way I have found to get this sort of situation to work is to add an extra number. "twenty four thousand" properly returns "24000". | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_parse.py::TestNormalize::test_extract_number"
] | [
"test/test_parse.py::TestFuzzyMatch::test_match_one",
"test/test_parse.py::TestFuzzyMatch::test_matches",
"test/test_parse.py::TestNormalize::test_articles",
"test/test_parse.py::TestNormalize::test_combinations",
"test/test_parse.py::TestNormalize::test_contractions",
"test/test_parse.py::TestNormalize::test_extract_ambiguous_time_en",
"test/test_parse.py::TestNormalize::test_extract_date_with_may_I_en",
"test/test_parse.py::TestNormalize::test_extract_duration_en",
"test/test_parse.py::TestNormalize::test_extract_relativedatetime_en",
"test/test_parse.py::TestNormalize::test_extractdatetime_en",
"test/test_parse.py::TestNormalize::test_gender",
"test/test_parse.py::TestNormalize::test_multiple_numbers",
"test/test_parse.py::TestNormalize::test_numbers",
"test/test_parse.py::TestNormalize::test_spaces"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2020-03-10T20:33:41Z" | apache-2.0 |
|
MycroftAI__lingua-franca-90 | diff --git a/lingua_franca/lang/parse_en.py b/lingua_franca/lang/parse_en.py
index cf316d9..8054bf0 100644
--- a/lingua_franca/lang/parse_en.py
+++ b/lingua_franca/lang/parse_en.py
@@ -406,7 +406,9 @@ def _extract_whole_number_with_text_en(tokens, short_scale, ordinals):
# is the prev word a number and should we sum it?
# twenty two, fifty six
- if prev_word in _SUMS and val and val < 10:
+ if (prev_word in _SUMS and val and val < 10) or all([prev_word in
+ multiplies,
+ val < prev_val if prev_val else False]):
val = prev_val + val
# is the prev word a number and should we multiply it?
@@ -445,23 +447,86 @@ def _extract_whole_number_with_text_en(tokens, short_scale, ordinals):
else:
if all([
- prev_word in _SUMS,
- word not in _SUMS,
- word not in multiplies,
- current_val >= 10]):
+ prev_word in _SUMS,
+ word not in _SUMS,
+ word not in multiplies,
+ current_val >= 10]):
# Backtrack - we've got numbers we can't sum.
number_words.pop()
val = prev_val
break
prev_val = val
- # handle long numbers
- # six hundred sixty six
- # two million five hundred thousand
if word in multiplies and next_word not in multiplies:
- to_sum.append(val)
- val = 0
- prev_val = 0
+ # handle long numbers
+ # six hundred sixty six
+ # two million five hundred thousand
+ #
+ # This logic is somewhat complex, and warrants
+ # extensive documentation for the next coder's sake.
+ #
+ # The current word is a power of ten. `current_val` is
+ # its integer value. `val` is our working sum
+ # (above, when `current_val` is 1 million, `val` is
+ # 2 million.)
+ #
+ # We have a dict `string_num_scale` containing [value, word]
+ # pairs for "all" powers of ten: string_num_scale[10] == "ten.
+ #
+ # We need go over the rest of the tokens, looking for other
+ # powers of ten. If we find one, we compare it with the current
+ # value, to see if it's smaller than the current power of ten.
+ #
+ # Numbers which are not powers of ten will be passed over.
+ #
+ # If all the remaining powers of ten are smaller than our
+ # current value, we can set the current value aside for later,
+ # and begin extracting another portion of our final result.
+ # For example, suppose we have the following string.
+ # The current word is "million".`val` is 9000000.
+ # `current_val` is 1000000.
+ #
+ # "nine **million** nine *hundred* seven **thousand**
+ # six *hundred* fifty seven"
+ #
+ # Iterating over the rest of the string, the current
+ # value is larger than all remaining powers of ten.
+ #
+ # The if statement passes, and nine million (9000000)
+ # is appended to `to_sum`.
+ #
+ # The main variables are reset, and the main loop begins
+ # assembling another number, which will also be appended
+ # under the same conditions.
+ #
+ # By the end of the main loop, to_sum will be a list of each
+ # "place" from 100 up: [9000000, 907000, 600]
+ #
+ # The final three digits will be added to the sum of that list
+ # at the end of the main loop, to produce the extracted number:
+ #
+ # sum([9000000, 907000, 600]) + 57
+ # == 9,000,000 + 907,000 + 600 + 57
+ # == 9,907,657
+ #
+ # >>> foo = "nine million nine hundred seven thousand six
+ # hundred fifty seven"
+ # >>> extract_number(foo)
+ # 9907657
+
+ time_to_sum = True
+ for other_token in tokens[idx+1:]:
+ if other_token.word in multiplies:
+ if string_num_scale[other_token.word] >= current_val:
+ time_to_sum = False
+ else:
+ continue
+ if not time_to_sum:
+ break
+ if time_to_sum:
+ to_sum.append(val)
+ val = 0
+ prev_val = 0
if val is not None and to_sum:
val += sum(to_sum)
| MycroftAI/lingua-franca | e6837eb2f8fabb72af3a8389c4a85cbcdae5e40b | diff --git a/test/test_parse.py b/test/test_parse.py
index 845dc14..5046f42 100644
--- a/test/test_parse.py
+++ b/test/test_parse.py
@@ -136,11 +136,20 @@ class TestNormalize(unittest.TestCase):
self.assertEqual(extract_number("twenty thousand"), 20000)
self.assertEqual(extract_number("fifty million"), 50000000)
- # This test fails due to
- # self.assertEqual(extract_number("twenty billion three hundred million \
- # nine hundred fifty thousand six hundred \
- # seventy five point eight six"),
- # 20300950675.86)
+ # Verify smaller powers of ten no longer cause miscalculation of larger
+ # powers of ten (see MycroftAI#86)
+ self.assertEqual(extract_number("twenty billion three hundred million \
+ nine hundred fifty thousand six hundred \
+ seventy five point eight"),
+ 20300950675.8)
+ self.assertEqual(extract_number("nine hundred ninety nine million nine \
+ hundred ninety nine thousand nine \
+ hundred ninety nine point nine"),
+ 999999999.9)
+
+ # TODO why does "trillion" result in xxxx.0?
+ self.assertEqual(extract_number("eight hundred trillion two hundred \
+ fifty seven"), 800000000000257.0)
# TODO handle this case
# self.assertEqual(
| extractnumber_en() summing "leading hundreds"
```python
>>> extract_number("ninety nine million")
99000000
>>> extract_number("nine hundred ninety nine million")
99000900
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_parse.py::TestNormalize::test_extract_number"
] | [
"test/test_parse.py::TestFuzzyMatch::test_match_one",
"test/test_parse.py::TestFuzzyMatch::test_matches",
"test/test_parse.py::TestNormalize::test_articles",
"test/test_parse.py::TestNormalize::test_combinations",
"test/test_parse.py::TestNormalize::test_contractions",
"test/test_parse.py::TestNormalize::test_extract_ambiguous_time_en",
"test/test_parse.py::TestNormalize::test_extract_date_with_may_I_en",
"test/test_parse.py::TestNormalize::test_extract_duration_en",
"test/test_parse.py::TestNormalize::test_extract_relativedatetime_en",
"test/test_parse.py::TestNormalize::test_extractdatetime_en",
"test/test_parse.py::TestNormalize::test_gender",
"test/test_parse.py::TestNormalize::test_multiple_numbers",
"test/test_parse.py::TestNormalize::test_numbers",
"test/test_parse.py::TestNormalize::test_spaces"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2020-03-13T01:14:35Z" | apache-2.0 |
|
MycroftAI__mycroft-messagebus-client-30 | diff --git a/mycroft_bus_client/message.py b/mycroft_bus_client/message.py
index 48351ca..037e3e3 100644
--- a/mycroft_bus_client/message.py
+++ b/mycroft_bus_client/message.py
@@ -146,9 +146,7 @@ class Message:
Returns
(Message) message with the type modified to match default response
"""
- response_message = Message(self.msg_type + '.response', data or {},
- context or self.context)
- return response_message
+ return self.reply(self.msg_type + '.response', data, context)
def publish(self, msg_type, data, context=None):
"""
| MycroftAI/mycroft-messagebus-client | 24ca33ae96af0be14ec575c26582aee666233d85 | diff --git a/test/test_message.py b/test/test_message.py
index b1f2077..67d2939 100644
--- a/test/test_message.py
+++ b/test/test_message.py
@@ -40,6 +40,23 @@ class TestMessage(TestCase):
self.assertEqual(response_msg.data, {})
self.assertEqual(response_msg.context, source.context)
+ def test_reply(self):
+ """Assert that the source and destination are swapped"""
+ source = Message('test_type',
+ data={'robot': 'marvin', 'android': 'data'},
+ context={'source': 'earth',
+ 'destination': 'alpha centauri'})
+
+ reply_msg = source.reply('reply_type')
+ self.assertEqual(reply_msg.context["source"],
+ source.context["destination"])
+ self.assertEqual(reply_msg.context["destination"],
+ source.context["source"])
+
+ # assert that .response calls .reply internally as stated in docstrings
+ response_msg = source.response()
+ self.assertEqual(response_msg.context, reply_msg.context)
+
def test_dig_for_message_simple(self):
test_msg = Message("test message", {"test": "data"}, {"time": time()})
self.assertEqual(test_msg, get_message_standard(test_msg))
| message.response should use message.reply internally
to account for proper message routing https://github.com/JarbasHiveMind/HiveMind-core/wiki/Mycroft-Messages + https://github.com/MycroftAI/mycroft-core/pull/2461 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_message.py::TestMessage::test_reply"
] | [
"test/test_message.py::TestMessage::test_dig_for_message_nested",
"test/test_message.py::TestMessage::test_response",
"test/test_message.py::TestMessage::test_dig_for_message_simple",
"test/test_message.py::TestMessage::test_serialize_deserialize",
"test/test_message.py::TestMessage::test_dig_for_message_no_method_call",
"test/test_message.py::TestMessage::test_dig_for_message_invalid_type"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-08-29T13:02:53Z" | apache-2.0 |
|
N-Wouda__ALNS-103 | diff --git a/alns/select/OperatorSelectionScheme.py b/alns/select/OperatorSelectionScheme.py
index 78ddc6b..365f79d 100644
--- a/alns/select/OperatorSelectionScheme.py
+++ b/alns/select/OperatorSelectionScheme.py
@@ -18,7 +18,7 @@ class OperatorSelectionScheme(ABC):
num_repair
Number of repair operators.
op_coupling
- Optional boolean matrix that indicates coupling between destroy and
+ Optional 2D boolean matrix that indicates coupling between destroy and
repair operators. Entry (i, j) is True if destroy operator i can be
used together with repair operator j, and False otherwise.
"""
@@ -29,15 +29,17 @@ class OperatorSelectionScheme(ABC):
num_repair: int,
op_coupling: Optional[np.ndarray] = None,
):
+ if op_coupling is not None:
+ op_coupling = np.asarray(op_coupling, dtype=bool)
+ op_coupling = np.atleast_2d(op_coupling)
+ else:
+ op_coupling = np.ones((num_destroy, num_repair), dtype=bool)
+
self._validate_arguments(num_destroy, num_repair, op_coupling)
self._num_destroy = num_destroy
self._num_repair = num_repair
-
- if op_coupling is not None:
- self._op_coupling = op_coupling.astype(bool)
- else:
- self._op_coupling = np.ones((num_destroy, num_repair), dtype=bool)
+ self._op_coupling = op_coupling
@property
def num_destroy(self) -> int:
@@ -78,8 +80,8 @@ class OperatorSelectionScheme(ABC):
@abstractmethod
def update(self, candidate: State, d_idx: int, r_idx: int, s_idx: int):
"""
- Updates the weights associated with the applied destroy (d_idx) and
- repair (r_idx) operators.
+ Updates the selection schame based on the outcome of the applied
+ destroy (d_idx) and repair (r_idx) operators.
Parameters
----------
@@ -95,12 +97,17 @@ class OperatorSelectionScheme(ABC):
return NotImplemented
@staticmethod
- def _validate_arguments(num_destroy, num_repair, op_coupling):
+ def _validate_arguments(
+ num_destroy: int, num_repair: int, op_coupling: np.ndarray
+ ):
if num_destroy <= 0 or num_repair <= 0:
raise ValueError("Missing destroy or repair operators.")
- if op_coupling is None:
- return
+ if op_coupling.shape != (num_destroy, num_repair):
+ raise ValueError(
+ f"Coupling matrix of shape {op_coupling.shape}, expected "
+ f"{(num_destroy, num_repair)}."
+ )
# Destroy ops. must be coupled with at least one repair operator
d_idcs = np.flatnonzero(np.count_nonzero(op_coupling, axis=1) == 0)
diff --git a/alns/select/RandomSelect.py b/alns/select/RandomSelect.py
new file mode 100644
index 0000000..9a7dd2b
--- /dev/null
+++ b/alns/select/RandomSelect.py
@@ -0,0 +1,22 @@
+import numpy as np
+
+from alns.select.OperatorSelectionScheme import OperatorSelectionScheme
+
+
+class RandomSelect(OperatorSelectionScheme):
+ """
+ Randomly selects operator pairs with uniform probability. The operator
+ pairs respect the operator coupling matrix.
+ """
+
+ def __call__(self, rnd, best, curr):
+ """
+ Selects a (destroy, repair) operator pair with uniform probability.
+ """
+ allowed = np.argwhere(self._op_coupling)
+ idx = rnd.randint(len(allowed))
+
+ return tuple(allowed[idx])
+
+ def update(self, candidate, d_idx, r_idx, s_idx):
+ pass # pragma: no cover
diff --git a/alns/select/__init__.py b/alns/select/__init__.py
index 8161c7f..9b405bc 100644
--- a/alns/select/__init__.py
+++ b/alns/select/__init__.py
@@ -1,4 +1,5 @@
from .AlphaUCB import AlphaUCB
from .OperatorSelectionScheme import OperatorSelectionScheme
+from .RandomSelect import RandomSelect
from .RouletteWheel import RouletteWheel
from .SegmentedRouletteWheel import SegmentedRouletteWheel
diff --git a/docs/source/select.rst b/docs/source/select.rst
index 55b2403..63e5d41 100644
--- a/docs/source/select.rst
+++ b/docs/source/select.rst
@@ -16,6 +16,9 @@ All operator selection schemes inherit from :class:`~alns.select.OperatorSelecti
.. automodule:: alns.select.AlphaUCB
:members:
+.. automodule:: alns.select.RandomSelect
+ :members:
+
.. automodule:: alns.select.RouletteWheel
:members:
| N-Wouda/ALNS | a6d63406a3af05e574ee15b5101959f93f96d8eb | diff --git a/alns/select/tests/test_random_select.py b/alns/select/tests/test_random_select.py
new file mode 100644
index 0000000..ffed712
--- /dev/null
+++ b/alns/select/tests/test_random_select.py
@@ -0,0 +1,70 @@
+import numpy as np
+import numpy.random as rnd
+from numpy.testing import assert_, assert_allclose, assert_approx_equal
+
+from alns.select import RandomSelect
+from alns.tests.states import Zero
+
+
+def test_op_coupling():
+ rnd_state = rnd.RandomState(1)
+
+ # For i in {1..5}, each destroy operator i is coupled with repair operator
+ # i. So only (i, i) pairs can be selected.
+ op_coupling = np.eye(5)
+ select = RandomSelect(5, 5, op_coupling)
+
+ for _ in range(1_000):
+ d_idx, r_idx = select(rnd_state, Zero(), Zero())
+ assert_(d_idx == r_idx)
+
+
+def test_uniform_selection():
+ rnd_state = rnd.RandomState(1)
+ histogram = np.zeros((2, 2))
+
+ select = RandomSelect(2, 2)
+
+ for _ in range(10_000):
+ d_idx, r_idx = select(rnd_state, Zero(), Zero())
+ histogram[d_idx, r_idx] += 1
+
+ # There are four operator pair combinations, so each pair should have a
+ # one in four chance of being selected. We allow a 0.01 margin since this
+ # is based on sampling.
+ histogram /= histogram.sum()
+ assert_allclose(histogram, 0.25, atol=0.01)
+
+
+def test_uniform_selection_op_coupling():
+ rnd_state = rnd.RandomState(1)
+ histogram = np.zeros((2, 2))
+
+ op_coupling = np.eye(2)
+ op_coupling[0, 1] = 1
+
+ select = RandomSelect(2, 2, op_coupling)
+
+ for _ in range(10_000):
+ d_idx, r_idx = select(rnd_state, Zero(), Zero())
+ histogram[d_idx, r_idx] += 1
+
+ # There are three OK operator pair combinations, so each such pair should
+ # have a one in three chance of being selected.
+ histogram /= histogram.sum()
+
+ # These should be sampled uniformly...
+ assert_approx_equal(histogram[0, 0], 1 / 3, significant=2)
+ assert_approx_equal(histogram[0, 1], 1 / 3, significant=2)
+ assert_approx_equal(histogram[1, 1], 1 / 3, significant=2)
+
+ # ...but this one's not allowed by the operator coupling matrix.
+ assert_approx_equal(histogram[1, 0], 0, significant=7)
+
+
+def test_single_operators():
+ rnd_state = rnd.RandomState(1)
+ select = RandomSelect(1, 1)
+
+ # Only one (destroy, repair) operator pair, so should return (0, 0).
+ assert_(select(rnd_state, Zero(), Zero()) == (0, 0))
diff --git a/alns/select/tests/test_roulette_wheel.py b/alns/select/tests/test_roulette_wheel.py
index 6e3b73a..2b43f21 100644
--- a/alns/select/tests/test_roulette_wheel.py
+++ b/alns/select/tests/test_roulette_wheel.py
@@ -105,3 +105,21 @@ def test_raise_uncoupled_destroy_op(op_coupling):
RouletteWheel(
[0, 0, 0, 0], 0, n_destroy, n_repair, op_coupling=op_coupling
)
+
+
[email protected](
+ "n_destroy, n_repair, op_coupling",
+ [
+ (1, 2, [0]), # missing repair column
+ (2, 2, [0, 0]), # missing destroy row
+ (2, 1, [0, 0]), # too many repair, too few destroy
+ ],
+)
+def test_raises_wrong_op_coupling_shape(n_destroy, n_repair, op_coupling):
+ with assert_raises(ValueError):
+ RouletteWheel([0, 0, 0, 0], 0, n_destroy, n_repair, op_coupling)
+
+
+def test_single_destroy_operator_coerces_coupling_matrix():
+ select = RouletteWheel([0, 0, 0, 0], 0, 1, 2, [1, 0])
+ assert_equal(select.op_coupling.shape, (1, 2))
diff --git a/alns/tests/test_alns.py b/alns/tests/test_alns.py
index d3df5c1..1ddd6d3 100644
--- a/alns/tests/test_alns.py
+++ b/alns/tests/test_alns.py
@@ -346,7 +346,7 @@ def test_nonnegative_max_runtime(max_runtime):
)
assert_almost_equal(
- sum(result.statistics.runtimes), max_runtime, decimal=3
+ sum(result.statistics.runtimes), max_runtime, decimal=2
)
| Introduce RandomSelect scheme
Introduce a selection scheme that selects destroy and repair operators with uniform probability. To be done after introducing the new selection interface. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"alns/select/tests/test_random_select.py::test_op_coupling",
"alns/select/tests/test_random_select.py::test_uniform_selection",
"alns/select/tests/test_random_select.py::test_uniform_selection_op_coupling",
"alns/select/tests/test_random_select.py::test_single_operators",
"alns/select/tests/test_roulette_wheel.py::test_properties[scores0-0-1-1-op_coupling0]",
"alns/select/tests/test_roulette_wheel.py::test_properties[scores1-0.2-2-2-op_coupling1]",
"alns/select/tests/test_roulette_wheel.py::test_properties[scores2-1-10-10-op_coupling2]",
"alns/select/tests/test_roulette_wheel.py::test_raises_invalid_decay[1.01]",
"alns/select/tests/test_roulette_wheel.py::test_raises_invalid_decay[-0.01]",
"alns/select/tests/test_roulette_wheel.py::test_raises_invalid_decay[-0.5]",
"alns/select/tests/test_roulette_wheel.py::test_raises_invalid_decay[1.5]",
"alns/select/tests/test_roulette_wheel.py::test_does_not_raise_valid_decay[0.0]",
"alns/select/tests/test_roulette_wheel.py::test_does_not_raise_valid_decay[0.25]",
"alns/select/tests/test_roulette_wheel.py::test_does_not_raise_valid_decay[0.5]",
"alns/select/tests/test_roulette_wheel.py::test_does_not_raise_valid_decay[0.75]",
"alns/select/tests/test_roulette_wheel.py::test_does_not_raise_valid_decay[1.0]",
"alns/select/tests/test_roulette_wheel.py::test_update[scores0-1-expected0]",
"alns/select/tests/test_roulette_wheel.py::test_update[scores1-0-expected1]",
"alns/select/tests/test_roulette_wheel.py::test_update[scores2-0.5-expected2]",
"alns/select/tests/test_roulette_wheel.py::test_select_coupled_operators[op_coupling0]",
"alns/select/tests/test_roulette_wheel.py::test_select_coupled_operators[op_coupling1]",
"alns/select/tests/test_roulette_wheel.py::test_select_coupled_operators[op_coupling2]",
"alns/select/tests/test_roulette_wheel.py::test_select_coupled_operators[op_coupling3]",
"alns/select/tests/test_roulette_wheel.py::test_select_coupled_operators[op_coupling4]",
"alns/select/tests/test_roulette_wheel.py::test_raise_uncoupled_destroy_op[op_coupling0]",
"alns/select/tests/test_roulette_wheel.py::test_raise_uncoupled_destroy_op[op_coupling1]",
"alns/select/tests/test_roulette_wheel.py::test_raises_wrong_op_coupling_shape[1-2-op_coupling0]",
"alns/select/tests/test_roulette_wheel.py::test_raises_wrong_op_coupling_shape[2-2-op_coupling1]",
"alns/select/tests/test_roulette_wheel.py::test_raises_wrong_op_coupling_shape[2-1-op_coupling2]",
"alns/select/tests/test_roulette_wheel.py::test_single_destroy_operator_coerces_coupling_matrix",
"alns/tests/test_alns.py::test_on_best_is_called",
"alns/tests/test_alns.py::test_add_destroy_operator",
"alns/tests/test_alns.py::test_add_destroy_operator_name",
"alns/tests/test_alns.py::test_add_repair_operator",
"alns/tests/test_alns.py::test_add_repair_operator_name",
"alns/tests/test_alns.py::test_raises_missing_destroy_operator",
"alns/tests/test_alns.py::test_raises_missing_repair_operator",
"alns/tests/test_alns.py::test_zero_max_iterations",
"alns/tests/test_alns.py::test_zero_max_runtime",
"alns/tests/test_alns.py::test_iterate_kwargs_are_correctly_passed_to_operators",
"alns/tests/test_alns.py::test_bugfix_pass_kwargs_to_on_best",
"alns/tests/test_alns.py::test_trivial_example",
"alns/tests/test_alns.py::test_fixed_seed_outcomes[0-0.01171]",
"alns/tests/test_alns.py::test_fixed_seed_outcomes[1-0.00011]",
"alns/tests/test_alns.py::test_fixed_seed_outcomes[2-0.01025]",
"alns/tests/test_alns.py::test_nonnegative_max_iterations[1]",
"alns/tests/test_alns.py::test_nonnegative_max_iterations[10]",
"alns/tests/test_alns.py::test_nonnegative_max_iterations[100]",
"alns/tests/test_alns.py::test_nonnegative_max_runtime[0.01]",
"alns/tests/test_alns.py::test_nonnegative_max_runtime[0.05]",
"alns/tests/test_alns.py::test_nonnegative_max_runtime[0.1]"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-11-08T10:03:07Z" | mit |
|
N-Wouda__ALNS-118 | diff --git a/alns/ALNS.py b/alns/ALNS.py
index 3ee933d..0154577 100644
--- a/alns/ALNS.py
+++ b/alns/ALNS.py
@@ -176,7 +176,7 @@ class ALNS:
.. [2] S. Røpke and D. Pisinger (2006). A unified heuristic for a large
class of vehicle routing problems with backhauls. *European
- Journal of Operational Research*, 171: 750–775, 2006.
+ Journal of Operational Research*, 171: 750–775.
"""
if len(self.destroy_operators) == 0 or len(self.repair_operators) == 0:
raise ValueError("Missing destroy or repair operators.")
diff --git a/alns/accept/RecordToRecordTravel.py b/alns/accept/RecordToRecordTravel.py
index 2a6604e..7474d0f 100644
--- a/alns/accept/RecordToRecordTravel.py
+++ b/alns/accept/RecordToRecordTravel.py
@@ -135,7 +135,13 @@ class RecordToRecordTravel(AcceptanceCriterion):
num_iters
The number of iterations that the ALNS algorithm will run.
method
- Updating method. Default 'linear'.
+ The updating method, one of {'linear', 'exponential'}. Default
+ 'linear'.
+
+ Raises
+ ------
+ ValueError
+ When the parameters do not meet requirements.
Returns
-------
@@ -156,11 +162,11 @@ class RecordToRecordTravel(AcceptanceCriterion):
if method == "linear":
step = (start_threshold - end_threshold) / num_iters
- elif method == "exponential":
+ else:
step = (end_threshold / start_threshold) ** (1 / num_iters)
logger.info(
- f"Autofit start_threshold {start_threshold:.2f}"
+ f"Autofit {method} RRT: start_threshold {start_threshold:.2f}, "
f"end_threshold {end_threshold:.2f}, step {step:.2f}."
)
diff --git a/alns/accept/SimulatedAnnealing.py b/alns/accept/SimulatedAnnealing.py
index 142f6a1..ba57ee1 100644
--- a/alns/accept/SimulatedAnnealing.py
+++ b/alns/accept/SimulatedAnnealing.py
@@ -102,7 +102,12 @@ class SimulatedAnnealing(AcceptanceCriterion):
@classmethod
def autofit(
- cls, init_obj: float, worse: float, accept_prob: float, num_iters: int
+ cls,
+ init_obj: float,
+ worse: float,
+ accept_prob: float,
+ num_iters: int,
+ method: str = "exponential",
) -> "SimulatedAnnealing":
"""
Returns an SA object with initial temperature such that there is a
@@ -118,20 +123,22 @@ class SimulatedAnnealing(AcceptanceCriterion):
init_obj
The initial solution objective.
worse
- Percentage (between 0 and 1) the candidate solution may be worse
- than initial solution for it to be accepted with probability
+ Percentage (in (0, 1), exclusive) the candidate solution may be
+ worse than initial solution for it to be accepted with probability
``accept_prob``.
accept_prob
- Initial acceptance probability for a solution at most ``worse``
- worse than the initial solution.
+ Initial acceptance probability (in [0, 1]) for a solution at most
+ ``worse`` worse than the initial solution.
num_iters
Number of iterations the ALNS algorithm will run.
+ method
+ The updating method, one of {'linear', 'exponential'}. Default
+ 'exponential'.
Raises
------
ValueError
- When ``worse`` not in [0, 1] or when ``accept_prob`` is not in
- (0, 1).
+ When the parameters do not meet requirements.
Returns
-------
@@ -154,12 +161,22 @@ class SimulatedAnnealing(AcceptanceCriterion):
if not (0 < accept_prob < 1):
raise ValueError("accept_prob outside (0, 1) not understood.")
- if num_iters < 0:
- raise ValueError("Negative number of iterations not understood.")
+ if num_iters <= 0:
+ raise ValueError("Non-positive num_iters not understood.")
+
+ if method not in ["linear", "exponential"]:
+ raise ValueError("Method must be one of ['linear', 'exponential']")
start_temp = -worse * init_obj / np.log(accept_prob)
- step = (1 / start_temp) ** (1 / num_iters)
- logger.info(f"Autofit start_temp {start_temp:.2f}, step {step:.2f}.")
+ if method == "linear":
+ step = (start_temp - 1) / num_iters
+ else:
+ step = (1 / start_temp) ** (1 / num_iters)
+
+ logger.info(
+ f"Autofit {method} SA: start_temp {start_temp:.2f}, "
+ f"step {step:.2f}."
+ )
- return cls(start_temp, 1, step, method="exponential")
+ return cls(start_temp, 1, step, method=method)
| N-Wouda/ALNS | 8ef4665c9cb27923497e11dccf3e75d9b58821bd | diff --git a/alns/accept/tests/test_simulated_annealing.py b/alns/accept/tests/test_simulated_annealing.py
index 46998aa..f7742db 100644
--- a/alns/accept/tests/test_simulated_annealing.py
+++ b/alns/accept/tests/test_simulated_annealing.py
@@ -65,7 +65,10 @@ def test_does_not_raise():
These sets of parameters should work correctly.
"""
SimulatedAnnealing(10, 5, 1, "exponential")
+ SimulatedAnnealing(10, 5, 1, "EXPONENTIAL")
+
SimulatedAnnealing(10, 5, 2, "linear")
+ SimulatedAnnealing(10, 5, 2, "LINEAR")
@mark.parametrize("step", range(10))
@@ -163,22 +166,24 @@ def test_accepts_generator_and_random_state():
@mark.parametrize(
- "worse,accept_prob,iters",
+ "worse, accept_prob, iters, method",
[
- (1, 0, 10), # zero accept prob
- (1, 1.2, 10), # prob outside unit interval
- (1, 1, 10), # unit accept prob
- (-1, 0.5, 10), # negative worse
- (0, -1, 10), # negative prob
- (1.5, 0.5, 10), # worse outside unit interval
- (1, 0.9, -10),
+ (1, 0, 10, "exponential"), # zero accept prob
+ (1, 1.2, 10, "exponential"), # prob outside unit interval
+ (1, 1, 10, "exponential"), # unit accept prob
+ (-1, 0.5, 10, "exponential"), # negative worse
+ (0, -1, 10, "exponential"), # negative prob
+ (1.5, 0.5, 10, "exponential"), # worse outside unit interval
+ (1, 0.9, -10, "exponential"), # negative iterations
+ (1, 0.9, 0, "exponential"), # zero iterations
+ (1, 0.9, 10, "abc"), # unknown method
],
-) # negative number of iterations
+)
def test_autofit_raises_for_invalid_inputs(
- worse: float, accept_prob: float, iters: int
+ worse: float, accept_prob: float, iters: int, method: str
):
with assert_raises(ValueError):
- SimulatedAnnealing.autofit(1.0, worse, accept_prob, iters)
+ SimulatedAnnealing.autofit(1.0, worse, accept_prob, iters, method)
@mark.parametrize(
@@ -206,3 +211,14 @@ def test_autofit_on_several_examples(
assert_almost_equal(sa.end_temperature, sa_end)
assert_almost_equal(sa.step, sa_step)
assert_equal(sa.method, "exponential")
+
+
+def test_linear_autofit():
+ sa = SimulatedAnnealing.autofit(100, 0.05, 0.5, 100, "linear")
+ sa_start = -0.05 * 100 / np.log(0.5)
+ sa_step = (sa_start - 1) / 100
+
+ assert_almost_equal(sa.start_temperature, sa_start)
+ assert_almost_equal(sa.end_temperature, 1)
+ assert_almost_equal(sa.step, sa_step)
+ assert_equal(sa.method, "linear")
| Add linear updating method to SimulatedAnnealing.autofit
`SimulatedAnnealing.autofit` assumes that the final temperature always goes to 1:
https://github.com/N-Wouda/ALNS/blob/8ef4665c9cb27923497e11dccf3e75d9b58821bd/alns/accept/SimulatedAnnealing.py#L104-L128
It would be nice to have an `end_worse` argument for simulated annealing, so that we can have final temperatures other than 1. The current `worse` parameter could rephrased to `start_worse`.
- Issue: I don't know if there is a good default value for `start_worse` to make this a non-breaking change
It would also be nice to have an `method` argument to choose which updating method should be used. The current implementation assumes exponential updating. This is definitely not breaking.
For reference, RRT already has the two above features:
https://github.com/N-Wouda/ALNS/blob/8ef4665c9cb27923497e11dccf3e75d9b58821bd/alns/accept/RecordToRecordTravel.py#L109-L167 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[0--1-10-exponential]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[1.5-0.5-10-exponential]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[1-0.9-0-exponential]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[-1-0.5-10-exponential]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[1-0.9--10-exponential]",
"alns/accept/tests/test_simulated_annealing.py::test_linear_autofit",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[1-0.9-10-abc]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[1-1.2-10-exponential]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[1-0-10-exponential]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_raises_for_invalid_inputs[1-1-10-exponential]"
] | [
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[6]",
"alns/accept/tests/test_simulated_annealing.py::test_step[9]",
"alns/accept/tests/test_simulated_annealing.py::test_step[3]",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[1]",
"alns/accept/tests/test_simulated_annealing.py::test_step[1]",
"alns/accept/tests/test_simulated_annealing.py::test_raises_negative_parameters[1-1--1]",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[2]",
"alns/accept/tests/test_simulated_annealing.py::test_raises_explosive_step",
"alns/accept/tests/test_simulated_annealing.py::test_accepts_generator_and_random_state",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[5]",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[3]",
"alns/accept/tests/test_simulated_annealing.py::test_linear_random_solutions",
"alns/accept/tests/test_simulated_annealing.py::test_raises_negative_parameters[1--1-1]",
"alns/accept/tests/test_simulated_annealing.py::test_step[8]",
"alns/accept/tests/test_simulated_annealing.py::test_step[4]",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[1]",
"alns/accept/tests/test_simulated_annealing.py::test_does_not_raise",
"alns/accept/tests/test_simulated_annealing.py::test_exponential_random_solutions",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[3]",
"alns/accept/tests/test_simulated_annealing.py::test_step[7]",
"alns/accept/tests/test_simulated_annealing.py::test_raises_start_smaller_than_end",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[9]",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[8]",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[7]",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[5]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_on_several_examples[1000-1-0.9-1]",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[2]",
"alns/accept/tests/test_simulated_annealing.py::test_step[0]",
"alns/accept/tests/test_simulated_annealing.py::test_raises_negative_parameters[-1-1-1]",
"alns/accept/tests/test_simulated_annealing.py::test_accepts_better",
"alns/accept/tests/test_simulated_annealing.py::test_step[5]",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[4]",
"alns/accept/tests/test_simulated_annealing.py::test_step[2]",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[9]",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[4]",
"alns/accept/tests/test_simulated_annealing.py::test_autofit_on_several_examples[1000-0.5-0.05-1]",
"alns/accept/tests/test_simulated_annealing.py::test_temperature_boundary",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[8]",
"alns/accept/tests/test_simulated_annealing.py::test_end_temperature[7]",
"alns/accept/tests/test_simulated_annealing.py::test_step[6]",
"alns/accept/tests/test_simulated_annealing.py::test_accepts_equal",
"alns/accept/tests/test_simulated_annealing.py::test_start_temperature[6]"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-11-17T22:27:12Z" | mit |
|
N-Wouda__ALNS-156 | diff --git a/alns/accept/AdaptiveThreshold.py b/alns/accept/AdaptiveThreshold.py
new file mode 100644
index 0000000..7cf0abe
--- /dev/null
+++ b/alns/accept/AdaptiveThreshold.py
@@ -0,0 +1,82 @@
+import collections
+from statistics import mean
+from typing import Deque, List
+
+
+class AdaptiveThreshold:
+ """
+ The Adaptive Threshold (AT) criterion accepts solutions
+ if the candidate solution has a value lower than an
+ adaptive threshold. The adaptive threshold is computed as:
+
+ ''adaptive_threshold = best_solution +
+ eta_parameter * (average_solution - best_solution)''
+
+ where
+ ``best_solution`` is the best solution received so far,
+ ``average_solution`` is the average of the last
+ ``gamma_parameter`` solutions received, and
+ ``eta_parameter`` is a parameter between 0 and 1,
+ the greater the value of
+ ``eta_parameter``, the more likely it is that a solution
+ will be accepted.
+
+ Each time a new solution is received,
+ the threshold is updated. The average solution
+ and best solution are taken by the last "gamma_parameter"
+ solutions received. If the number of solutions received
+ is less than"gamma_parameter" then the threshold
+ is updated with the average of all the solutions
+ received so far.
+
+ The implementation is based on the description of AT in [1].
+
+ Parameters
+ ----------
+ eta: float
+ Used to update/tune the threshold,
+ the greater the value of ``eta_parameter``,
+ the more likely it is that a solution will be accepted.
+ gamma: int
+ Used to update the threshold, the number of solutions
+ received to compute the average & best solution.
+
+ References
+ ----------
+ .. [1] Vinícius R. Máximo, Mariá C.V. Nascimento 2021.
+ "A hybrid adaptive iterated local search with
+ diversification control to the capacitated
+ vehicle routing problem."
+ *European Journal of Operational Research*
+ 294 (3): 1108 - 1119.
+ """
+
+ def __init__(self, eta: float, gamma: int):
+ if not (0 <= eta <= 1):
+ raise ValueError("eta must be in [0, 1].")
+
+ if gamma <= 0:
+ raise ValueError("gamma must be positive.")
+
+ self._eta = eta
+ self._gamma = gamma
+ self._history: Deque[float] = collections.deque(maxlen=gamma)
+
+ @property
+ def eta(self) -> float:
+ return self._eta
+
+ @property
+ def gamma(self) -> int:
+ return self._gamma
+
+ @property
+ def history(self) -> List[float]:
+ return list(self._history)
+
+ def __call__(self, rnd, best, current, candidate) -> bool:
+ self._history.append(candidate.objective())
+ best_solution = min(self._history)
+ avg_solution = mean(self._history)
+ threshold = best_solution + self._eta * (avg_solution - best_solution)
+ return candidate.objective() <= threshold
| N-Wouda/ALNS | bdcd64b69d23b5566b3354ad7edd9935c515137e | diff --git a/alns/accept/tests/test_adaptive_threshold.py b/alns/accept/tests/test_adaptive_threshold.py
new file mode 100644
index 0000000..e22ad39
--- /dev/null
+++ b/alns/accept/tests/test_adaptive_threshold.py
@@ -0,0 +1,122 @@
+import numpy.random as rnd
+from numpy.testing import assert_, assert_equal, assert_raises
+from pytest import mark
+
+from alns.accept.AdaptiveThreshold import AdaptiveThreshold
+from alns.tests.states import One, Two, VarObj, Zero
+
+
[email protected](
+ "eta, gamma",
+ [
+ (-1, 3), # eta cannot be < 0
+ (2, 3), # eta cannot be > 1
+ (0.5, -2), # gamma cannot be < 0
+ ],
+)
+def test_raise_invalid_parameters(eta, gamma):
+ with assert_raises(ValueError):
+ AdaptiveThreshold(eta=eta, gamma=gamma)
+
+
[email protected]("eta, gamma", [(1, 3), (0.4, 4)])
+def test_no_raise_valid_parameters(eta, gamma):
+ AdaptiveThreshold(eta=eta, gamma=gamma)
+
+
[email protected]("eta", [0, 0.01, 0.5, 0.99, 1])
+def test_eta(eta):
+ adaptive_threshold = AdaptiveThreshold(eta, 3)
+ assert_equal(adaptive_threshold.eta, eta)
+
+
[email protected]("gamma", range(1, 10))
+def test_gamma(gamma):
+ adaptive_threshold = AdaptiveThreshold(0.5, gamma)
+ assert_equal(adaptive_threshold.gamma, gamma)
+
+
+def test_accepts_below_threshold():
+ adaptive_threshold = AdaptiveThreshold(eta=0.5, gamma=4)
+ adaptive_threshold(rnd.RandomState(), One(), One(), One())
+ adaptive_threshold(rnd.RandomState(), One(), One(), Zero())
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), Zero())
+
+ # The threshold is set at 0 + 0.5 * (0.5 - 0) = 0.25
+ assert_(result)
+
+
+def test_rejects_above_threshold():
+ adaptive_threshold = AdaptiveThreshold(eta=0.5, gamma=4)
+ adaptive_threshold(rnd.RandomState(), One(), One(), Two())
+ adaptive_threshold(rnd.RandomState(), One(), One(), Zero())
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), One())
+
+ # The threshold is set at 0 + 0.5 * (1 - 0) = 0.5
+ assert_(not result)
+
+
+def test_accepts_equal_threshold():
+ adaptive_threshold = AdaptiveThreshold(eta=0.5, gamma=4)
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7100))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7120))
+
+ # The threshold is set at 7100 + 0.5 * (7140 - 7100) = 7120
+ assert_(result)
+
+
+def test_accepts_over_gamma_candidates():
+ adaptive_threshold = AdaptiveThreshold(eta=0.2, gamma=3)
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7100))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7000))
+
+ # The threshold is set at 7000 + 0.2 * (7133.33 - 7000) = 7013.33
+ assert_(result)
+
+
+def test_rejects_over_gamma_candidates():
+ adaptive_threshold = AdaptiveThreshold(eta=0.2, gamma=3)
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7100))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7000))
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7100))
+
+ # The threshold is set at 7000 + 0.2 * (7100 - 7000) = 7020
+ assert_(not result)
+
+
+def test_evaluate_consecutive_solutions():
+ """
+ Test if AT correctly accepts and rejects consecutive solutions.
+ """
+ adaptive_threshold = AdaptiveThreshold(eta=0.5, gamma=4)
+
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7100))
+ # The threshold is set at 7100, hence the solution is accepted
+ assert_(result)
+
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ # The threshold is set at 7125, hence the solution is accepted
+ assert_(not result)
+
+ result = adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7120))
+ # The threshold is set at 7120, hence the solution is accepted
+ assert_(result)
+
+
+def test_history():
+ """
+ Test if AT correctly stores the history of the thresholds correctly.
+ """
+ adaptive_threshold = AdaptiveThreshold(eta=0.5, gamma=4)
+
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7100))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7120))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7100))
+ adaptive_threshold(rnd.RandomState(), One(), One(), VarObj(7200))
+ assert_equal(adaptive_threshold.history, [7200, 7120, 7100, 7200])
| Adaptive threshold acceptance criterion
[Máximo et al. (2021)](https://arxiv.org/abs/2012.11021v1) implements a threshold-based acceptance criterion. They do not give a specific name, but the threshold changes adaptively during the search, which is why I named it "adaptive threshold".
>This threshold is calculated according to the average quality of the solutions obtained after the local search, which is denoted by $\bar{f}$. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"alns/accept/tests/test_adaptive_threshold.py::test_raise_invalid_parameters[-1-3]",
"alns/accept/tests/test_adaptive_threshold.py::test_raise_invalid_parameters[2-3]",
"alns/accept/tests/test_adaptive_threshold.py::test_raise_invalid_parameters[0.5--2]",
"alns/accept/tests/test_adaptive_threshold.py::test_no_raise_valid_parameters[1-3]",
"alns/accept/tests/test_adaptive_threshold.py::test_no_raise_valid_parameters[0.4-4]",
"alns/accept/tests/test_adaptive_threshold.py::test_eta[0]",
"alns/accept/tests/test_adaptive_threshold.py::test_eta[0.01]",
"alns/accept/tests/test_adaptive_threshold.py::test_eta[0.5]",
"alns/accept/tests/test_adaptive_threshold.py::test_eta[0.99]",
"alns/accept/tests/test_adaptive_threshold.py::test_eta[1]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[1]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[2]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[3]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[4]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[5]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[6]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[7]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[8]",
"alns/accept/tests/test_adaptive_threshold.py::test_gamma[9]",
"alns/accept/tests/test_adaptive_threshold.py::test_accepts_below_threshold",
"alns/accept/tests/test_adaptive_threshold.py::test_rejects_above_threshold",
"alns/accept/tests/test_adaptive_threshold.py::test_accepts_equal_threshold",
"alns/accept/tests/test_adaptive_threshold.py::test_accepts_over_gamma_candidates",
"alns/accept/tests/test_adaptive_threshold.py::test_rejects_over_gamma_candidates",
"alns/accept/tests/test_adaptive_threshold.py::test_evaluate_consecutive_solutions",
"alns/accept/tests/test_adaptive_threshold.py::test_history"
] | [] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
} | "2023-06-20T15:36:50Z" | mit |
|
N-Wouda__ALNS-21 | diff --git a/alns/Result.py b/alns/Result.py
index a788279..beec688 100644
--- a/alns/Result.py
+++ b/alns/Result.py
@@ -165,9 +165,9 @@ class Result:
operator_names = list(operator_counts.keys())
operator_counts = np.array(list(operator_counts.values()))
- cumulative_counts = operator_counts.cumsum(axis=1)
+ cumulative_counts = operator_counts[:, :num_types].cumsum(axis=1)
- ax.set_xlim(right=np.sum(operator_counts, axis=1).max())
+ ax.set_xlim(right=cumulative_counts[:, -1].max())
for idx in range(num_types):
widths = operator_counts[:, idx]
diff --git a/setup.py b/setup.py
index 7c2d8cb..adb3f2b 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ import setuptools
MAJOR = 1
MINOR = 2
-MAINTENANCE = 0
+MAINTENANCE = 1
MODIFIER = ""
VERSION = "{0}.{1}.{2}{3}".format(MAJOR, MINOR, MAINTENANCE, MODIFIER)
| N-Wouda/ALNS | c669e907808fa02138a55a68877d02021272af30 | diff --git a/alns/tests/test_result.py b/alns/tests/test_result.py
index 7f309f2..bc7c663 100644
--- a/alns/tests/test_result.py
+++ b/alns/tests/test_result.py
@@ -13,7 +13,7 @@ from .states import Sentinel
try:
from matplotlib.testing.decorators import check_figures_equal
except ImportError:
- def check_figures_equal(*args, **kwargs): # placeholder
+ def check_figures_equal(*args, **kwargs): # placeholder
return check_figures_equal
@@ -69,19 +69,21 @@ def get_objective_plot(ax, *args, **kwargs):
ax.set_xlabel("Iteration (#)")
-def get_operator_plot(figure, destroy, repair, title=None, **kwargs):
+def get_operator_plot(figure, destroy, repair, legend=None, suptitle=None,
+ **kwargs):
"""
Helper method.
"""
- def _helper(ax, operator_counts, title, **kwargs):
+
+ def _helper(ax, operator_counts, title):
operator_names = list(operator_counts.keys())
operator_counts = np.array(list(operator_counts.values()))
- cumulative_counts = operator_counts.cumsum(axis=1)
+ cumulative_counts = operator_counts[:, :len(legend)].cumsum(axis=1)
- ax.set_xlim(right=np.sum(operator_counts, axis=1).max())
+ ax.set_xlim(right=cumulative_counts[:, -1].max())
- for idx in range(4):
+ for idx in range(len(legend)):
widths = operator_counts[:, idx]
starts = cumulative_counts[:, idx] - widths
@@ -94,16 +96,19 @@ def get_operator_plot(figure, destroy, repair, title=None, **kwargs):
ax.set_xlabel("Iterations where operator resulted in this outcome (#)")
ax.set_ylabel("Operator")
- if title is not None:
- figure.suptitle(title)
+ if suptitle is not None:
+ figure.suptitle(suptitle)
+
+ if legend is None:
+ legend = ["Best", "Better", "Accepted", "Rejected"]
d_ax, r_ax = figure.subplots(nrows=2)
- _helper(d_ax, destroy, "Destroy operators", **kwargs)
- _helper(r_ax, repair, "Repair operators", **kwargs)
+ _helper(d_ax, destroy, "Destroy operators")
+ _helper(r_ax, repair, "Repair operators")
- figure.legend(["Best", "Better", "Accepted", "Rejected"],
- ncol=4,
+ figure.legend(legend,
+ ncol=len(legend),
loc="lower center")
@@ -161,7 +166,7 @@ def test_plot_objectives_kwargs(fig_test, fig_ref):
correctly passed to the ``plot`` method.
"""
result = get_result(Sentinel())
- kwargs = dict(lw=5, marker='*', title="Test title")
+ kwargs = dict(lw=5, marker='*')
# Tested plot
result.plot_objectives(fig_test.subplots(), **kwargs)
@@ -241,7 +246,7 @@ def test_plot_operator_counts_title(fig_test, fig_ref):
get_operator_plot(fig_ref,
result.statistics.destroy_operator_counts,
result.statistics.repair_operator_counts,
- title="A random test title")
+ suptitle="A random test title")
@pytest.mark.matplotlib
@@ -278,3 +283,24 @@ def test_plot_operator_counts_kwargs(fig_test, fig_ref):
result.statistics.destroy_operator_counts,
result.statistics.repair_operator_counts,
**kwargs)
+
+
[email protected]
[email protected](sys.version_info < (3, 5),
+ reason="Plot testing is not reliably available for Py3.4")
+@check_figures_equal(extensions=['png'])
+def test_plot_operator_counts_legend_length(fig_test, fig_ref):
+ """
+ Tests if the length of the passed-in legend is used to determine which
+ counts to show.
+ """
+ result = get_result(Sentinel())
+
+ # Tested plot
+ result.plot_operator_counts(fig_test, legend=["Best"])
+
+ # Reference plot
+ get_operator_plot(fig_ref,
+ result.statistics.destroy_operator_counts,
+ result.statistics.repair_operator_counts,
+ legend=["Best"])
| Fix Result.plot_operator_counts plot looking 'weird'
E.g. when not plotting all possible outcome types, the axes are still fixed as if one did. Like so:
![image](https://user-images.githubusercontent.com/16272507/74932339-d684dd00-53e1-11ea-8fb2-8c6a08e098ea.png)
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"alns/tests/test_result.py::test_plot_operator_counts_legend_length[png]"
] | [
"alns/tests/test_result.py::test_result_state",
"alns/tests/test_result.py::test_raises_missing_statistics",
"alns/tests/test_result.py::test_plot_objectives[png]",
"alns/tests/test_result.py::test_plot_objectives_kwargs[png]",
"alns/tests/test_result.py::test_plot_objectives_default_axes",
"alns/tests/test_result.py::test_plot_operator_counts[png]",
"alns/tests/test_result.py::test_plot_operator_counts_raises_legend",
"alns/tests/test_result.py::test_plot_operator_counts_title[png]",
"alns/tests/test_result.py::test_plot_operator_counts_default_figure",
"alns/tests/test_result.py::test_plot_operator_counts_kwargs[png]"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2020-02-23T14:52:00Z" | mit |
|
N3PDF__vegasflow-82 | diff --git a/doc/source/how_to.rst b/doc/source/how_to.rst
index 488d6cc..3bc787d 100644
--- a/doc/source/how_to.rst
+++ b/doc/source/how_to.rst
@@ -189,6 +189,22 @@ The full list of integration algorithms and wrappers can be consulted at: :ref:`
Tips and Tricks
===============
+Changing the integration limits
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default ``VegasFlow`` provides random number only in the 0 to 1 range (and so all integrals are expected to be integrals from 0 to 1).
+But it is possible to choose any other ranges by passing to the initializer of the algorithm the ``xmin`` and ``xman`` variables.
+
+Note that if any limit is to be changed all ``xmin`` and ``xmax`` must be provided:
+
+.. code-block:: python
+
+ from vegasflow import VegasFlow
+
+ dimensions = 2
+ vegas_instance = VegasFlow(dimensions, n_calls, xmin=[0, -4], xmax=[1, 10])
+
+
Seeding the random number generator
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/examples/simgauss_tf.py b/examples/simgauss_tf.py
index 4232a00..78fa172 100644
--- a/examples/simgauss_tf.py
+++ b/examples/simgauss_tf.py
@@ -9,6 +9,7 @@ import time
import numpy as np
import tensorflow as tf
from vegasflow.vflow import vegas_wrapper
+from vegasflow import PlainFlow
from vegasflow.plain import plain_wrapper
@@ -35,13 +36,6 @@ if __name__ == "__main__":
"""Testing several different integrations"""
print(f"VEGAS MC, ncalls={ncalls}:")
start = time.time()
- ncalls = 10*ncalls
r = vegas_wrapper(symgauss, dim, n_iter, ncalls)
end = time.time()
print(f"Vegas took: time (s): {end-start}")
-
-# print(f"Plain MC, ncalls={ncalls}:")
-# start = time.time()
-# r = plain_wrapper(symgauss, dim, n_iter, ncalls)
-# end = time.time()
-# print(f"Plain took: time (s): {end-start}")
diff --git a/src/vegasflow/monte_carlo.py b/src/vegasflow/monte_carlo.py
index f8bc328..029c8f3 100644
--- a/src/vegasflow/monte_carlo.py
+++ b/src/vegasflow/monte_carlo.py
@@ -49,6 +49,8 @@ from vegasflow.configflow import (
TECH_CUT,
float_me,
int_me,
+ fone,
+ fzero,
)
@@ -114,6 +116,8 @@ class MonteCarloFlow(ABC):
events_limit=MAX_EVENTS_LIMIT,
list_devices=DEFAULT_ACTIVE_DEVICES, # pylint: disable=dangerous-default-value
verbose=True,
+ xmin=None,
+ xmax=None,
**kwargs,
):
if "simplify_signature" in kwargs:
@@ -151,6 +155,24 @@ class MonteCarloFlow(ABC):
else:
self.devices = None
+ if xmin is not None or xmax is not None:
+ # If the ranges are provided, check that they are correct
+ if xmin is None or xmax is None:
+ raise ValueError(
+ "Both xmin and xmax must be provided if the integration limits are to change"
+ )
+ if not (len(xmin) == len(xmax) == n_dim):
+ raise ValueError("The integration limits must be given for all dimensions")
+ self._xmin = float_me(xmin)
+ self._xdelta = float_me(xmax) - float_me(xmin)
+ if any(self._xdelta < 0.0):
+ raise ValueError(f"No xmin ({xmin}) can be bigger than xmax ({xmax})")
+ self._xdeltajac = tf.reduce_prod(self._xdelta)
+ else:
+ self._xmin = None
+ self._xdelta = None
+ self._xdeltajac = None
+
# Note:
# The number of events to run in a single iteration is `n_events`
# while the total number of events to be run per step (so, for instance, per GPU call)
@@ -203,7 +225,7 @@ class MonteCarloFlow(ABC):
"""The default jacobian is 1 / total number of events"""
return float_me([1.0 / self.n_events])
- def generate_random_array(self, n_events):
+ def generate_random_array(self, n_events, *args):
"""External interface for the generation of random
points as a 2D array of (n_events, n_dim).
It calls the internal version of ``_generate_random_array``
@@ -216,16 +238,14 @@ class MonteCarloFlow(ABC):
Returns
-------
`rnds`: array of (n_events, n_dim) random points
- `idx` : index associated to each random point
`p(x)` : p(x) associated to the random points
"""
- rnds, idx, xjac_raw = self._generate_random_array(n_events)
- # returns a p(x) corresponding to the number of events
- # the algorithm was trained with, reweight
- xjac = xjac_raw / self.xjac / n_events
- return rnds, idx, xjac
+ rnds, xjac_raw, *extra = self._generate_random_array(n_events, *args)
+ # Since the n_events of this method might not be the "evaluation" value, reweight
+ xjac = xjac_raw / (self.xjac * n_events)
+ return rnds, xjac
- def _generate_random_array(self, n_events):
+ def _generate_random_array(self, n_events, *args):
"""Generate a 2D array of (n_events, n_dim) points
For the weight of the given point, this function is considered
as part of an integration with ``self.n_events`` calls.
@@ -240,11 +260,18 @@ class MonteCarloFlow(ABC):
`idx` : index associated to each random point
`wgt` : wgt associated to the random point
"""
- rnds = tf.random.uniform(
+ rnds_raw = tf.random.uniform(
(n_events, self.n_dim), minval=TECH_CUT, maxval=1.0 - TECH_CUT, dtype=DTYPE
)
- idx = 0
- return rnds, idx, self.xjac
+ # Now allow for the algorithm to produce the random numbers for the integration
+ rnds, wgts_raw, *extra = self._digest_random_generation(rnds_raw, *args)
+
+ wgts = wgts_raw * self.xjac
+ if self._xdelta is not None:
+ # Now apply integration limits
+ rnds = self._xmin + rnds * self._xdelta
+ wgts *= self._xdeltajac
+ return rnds, wgts, *extra
#### Abstract methods
@abstractmethod
@@ -259,6 +286,22 @@ class MonteCarloFlow(ABC):
result = self.event()
return result, pow(result, 2)
+ def _digest_random_generation(self, xrand, *args):
+ """All implemented algorithms will take a vector of uniform noise (n_events, n_dim)
+ and make it into a vector of random numbers (n_events, n_dim) with an associated weight.
+
+ It must return at least a tensor (n_events, n_dim) of random numbers
+ and of weights (n_events,) and can return any extra parameters
+ which will pass untouched by _generate_random_array
+ """
+ return xrand, 1.0 # , any extra param
+
+ def _apply_integration_limits(self, rand):
+ """Apply the integration limits (if any)
+ Receives a tensor of random numbers (n_events, n_dim) and returns
+ a transformed array (n_events, n_dim) and the associated jacobian (n_events,)
+ """
+
def _can_run_vectorial(self, expected_shape=None):
"""Accepting vectorial integrands depends on the algorithm,
if an algorithm can run on vectorial algorithms it should implement this method and return True"""
@@ -266,7 +309,7 @@ class MonteCarloFlow(ABC):
#### Integration management
def set_seed(self, seed):
- """Sets the interation seed"""
+ """Sets the random seed"""
tf.random.set_seed(seed)
#### Device management methods
@@ -345,7 +388,7 @@ class MonteCarloFlow(ABC):
"""Modifies the attributes of the integration so that it can be compiled inside
Tensorflow functions (and, therefore, gradients calculated)
Returns a reference to `run_event`, a method that upon calling it with no arguments
- will produce results and uncertainties for an intergation iteration of ncalls number of events
+ will produce results and uncertainties for an integration iteration of ncalls number of events
"""
if self.distribute:
raise ValueError("Differentiation is not compatible with distribution")
@@ -623,7 +666,7 @@ if you believe this to be a bug please open an issue in https://github.com/N3PDF
monte carlo error
Note: it is possible not to pass any histogram variable and still fill
- some histogram variable at integration time, but then it is the responsability
+ some histogram variable at integration time, but then it is the responsibility
of the integrand to empty the histograms each iteration and accumulate them.
"""
diff --git a/src/vegasflow/plain.py b/src/vegasflow/plain.py
index 1ddd762..d9fd081 100644
--- a/src/vegasflow/plain.py
+++ b/src/vegasflow/plain.py
@@ -21,7 +21,7 @@ class PlainFlow(MonteCarloFlow):
n_events = ncalls
# Generate all random number for this iteration
- rnds, _, xjac = self._generate_random_array(n_events)
+ rnds, xjac = self._generate_random_array(n_events)
# Compute the integrand
tmp = integrand(rnds, weight=xjac) * xjac
diff --git a/src/vegasflow/vflow.py b/src/vegasflow/vflow.py
index a400098..48323c8 100644
--- a/src/vegasflow/vflow.py
+++ b/src/vegasflow/vflow.py
@@ -1,5 +1,5 @@
"""
- This module contains the VegasFlow class and all its auxuliary functions
+ This module contains the VegasFlow class and all its auxiliary functions
The main interfaces of this class are the class `VegasFlow` and the
`vegas_wrapper`
@@ -103,10 +103,10 @@ def _generate_random_array(rnds, divisions):
-------
x: array (None, n_dim)
Vegas random output
- div_index: array (None, n_dim)
- division index in which each (n_dim) set of random numbers fall
w: array (None,)
Weight of each set of (n_dim) random numbers
+ div_index: array (None, n_dim)
+ division index in which each (n_dim) set of random numbers fall
"""
# Get the boundaries of the random numbers
# reg_i = fzero
@@ -121,7 +121,7 @@ def _generate_random_array(rnds, divisions):
# Compute the random number between the limits
# commented, for now only from 0 to 1
# x = reg_i + rand_x * (reg_f - reg_i)
- return x, ind_xn, weights
+ return x, weights, ind_xn
@tf.function(
@@ -362,12 +362,11 @@ class VegasFlow(MonteCarloFlow):
new_divisions = refine_grid_per_dimension(arr_res2[j, :], self.divisions[j, :])
self.divisions[j, :].assign(new_divisions)
- def _generate_random_array(self, n_events):
- """Uses the internal array to generate ``n_events`` random numbers"""
- rnds, _, xjac = super()._generate_random_array(n_events)
- # Pass them through the Vegas digestion
- x, ind, w = _generate_random_array(rnds, self.divisions)
- return x, ind, w * xjac
+ def _digest_random_generation(self, rnds):
+ """Generates ``n_events`` random numbers sampled in the
+ adapted Vegas Grid"""
+ x, w, ind = _generate_random_array(rnds, self.divisions)
+ return x, w, ind
def _importance_sampling_array_filling(self, results2, indices):
"""Receives an array of results squared for every event
@@ -408,7 +407,7 @@ class VegasFlow(MonteCarloFlow):
n_events = ncalls
# Generate all random number for this iteration
- x, ind, xjac = self._generate_random_array(n_events)
+ x, xjac, ind = self._generate_random_array(n_events)
# Now compute the integrand
int_result = integrand(x, weight=xjac)
@@ -422,7 +421,7 @@ class VegasFlow(MonteCarloFlow):
res = tf.reduce_sum(tmp, axis=0)
res2 = tf.reduce_sum(tmp2, axis=0)
- # If this is a vectorial integrnad, make sure that only the main dimenison
+ # If this is a vectorial integrand, make sure that only the main dimension
# is used for the grid training
if self._vectorial:
tmp2 = tmp2[:, self._main_dimension]
diff --git a/src/vegasflow/vflowplus.py b/src/vegasflow/vflowplus.py
index e3fdb58..57e228f 100644
--- a/src/vegasflow/vflowplus.py
+++ b/src/vegasflow/vflowplus.py
@@ -60,9 +60,9 @@ def generate_samples_in_hypercubes(rnds, n_strat, n_ev, hypercubes, divisions):
`x` : random numbers collocated in hypercubes
`w` : weight of each event
`ind`: division index in which each (n_dim) set of random numbers fall
- `segm` : segmentantion for later computations
+ `segm` : segmentation for later computations
"""
- # Use the event-per-hypercube information to fix each random event to a hypercub
+ # Use the event-per-hypercube information to fix each random event to a hypercube
indices = tf.repeat(tf.range(tf.shape(hypercubes, out_type=DTYPEINT)[0]), n_ev)
points = float_me(tf.gather(hypercubes, indices))
n_evs = float_me(tf.gather(n_ev, indices))
@@ -72,11 +72,11 @@ def generate_samples_in_hypercubes(rnds, n_strat, n_ev, hypercubes, divisions):
ind_xn, x, weights = importance_sampling_digest(xn, divisions)
- # Reweight taking into account the number of events per hypercub
+ # Reweighs taking into account the number of events per hypercube
final_weights = weights / n_evs
segm = indices
- return x, ind_xn, final_weights, segm
+ return x, final_weights, ind_xn, segm
class VegasFlowPlus(VegasFlow):
@@ -135,11 +135,15 @@ class VegasFlowPlus(VegasFlow):
self.n_ev = tf.fill([1, len(hypercubes)], self.min_neval_hcube)
self.n_ev = int_me(tf.reshape(self.n_ev, [-1]))
self._n_events = int(tf.reduce_sum(self.n_ev))
- self.my_xjac = float_me(1 / len(hypercubes))
+ self._modified_jac = float_me(1 / len(hypercubes))
if self._adaptive:
logger.warning("Variable number of events requires function signatures all across")
+ @property
+ def xjac(self):
+ return self._modified_jac
+
def make_differentiable(self):
"""Overrides make_differentiable to make sure the runner has a reference to n_ev"""
runner = super().make_differentiable()
@@ -157,24 +161,31 @@ class VegasFlowPlus(VegasFlow):
self.n_ev = int_me(new_n_ev)
self.n_events = int(tf.reduce_sum(self.n_ev))
- def _generate_random_array(self, n_events):
- """Interface compatible with other algorithms dropping the segmentation in hypercubes"""
- x, ind, w, _ = self._generate_random_array_plus(n_events, self.n_ev)
- return x, ind, w
-
- def _generate_random_array_plus(self, n_events, n_ev):
+ def _digest_random_generation(self, rnds, n_ev):
"""Generate a random array for a given number of events divided in hypercubes"""
- # Needs to skip parent and go directly to the random array generation of MonteCarloFlow
- rnds, _, _ = MonteCarloFlow._generate_random_array(self, n_events)
# Get random numbers from hypercubes
- x, ind, w, segm = generate_samples_in_hypercubes(
+ x, w, ind, segm = generate_samples_in_hypercubes(
rnds,
self._n_strat,
n_ev,
self._hypercubes,
self.divisions,
)
- return x, ind, w * self.my_xjac, segm
+ return x, w, ind, segm
+
+ def generate_random_array(self, n_events, *args):
+ """Override the behaviour of ``generate_random_array``
+ to accomodate for the peculiarities of VegasFlowPlus
+ """
+ rnds = []
+ wgts = []
+ for _ in range(n_events // self.n_events + 1):
+ r, w = super().generate_random_array(self.n_events, self.n_ev)
+ rnds.append(r)
+ wgts.append(w)
+ final_r = tf.concat(rnds, axis=0)[:n_events]
+ final_w = tf.concat(wgts, axis=0)[:n_events] * self.n_events / n_events
+ return final_r, final_w
def _run_event(self, integrand, ncalls=None, n_ev=None):
"""Run one step of VegasFlowPlus
@@ -190,12 +201,12 @@ class VegasFlowPlus(VegasFlow):
Returns
-------
- `res`: sum of the result of the integrand for all events per segement
+ `res`: sum of the result of the integrand for all events per segment
`res2`: sum of the result squared of the integrand for all events per segment
`arr_res2`: result of the integrand squared per dimension and grid bin
"""
# NOTE: needs to receive both ncalls and n_ev
- x, ind, xjac, segm = self._generate_random_array_plus(ncalls, n_ev)
+ x, xjac, ind, segm = self._generate_random_array(ncalls, n_ev)
# compute integrand
tmp = xjac * integrand(x, weight=xjac)
| N3PDF/vegasflow | a11868282dc9cf3ed00488d783b24bac62826a49 | diff --git a/src/vegasflow/tests/test_algs.py b/src/vegasflow/tests/test_algs.py
index caec02b..9d18531 100644
--- a/src/vegasflow/tests/test_algs.py
+++ b/src/vegasflow/tests/test_algs.py
@@ -58,12 +58,12 @@ def instance_and_compile(Integrator, mode=0, integrand_function=example_integran
return int_instance
-def check_is_one(result, sigmas=3):
+def check_is_one(result, sigmas=3, target_result=1.0):
"""Wrapper for convenience"""
res = result[0]
err = np.mean(result[1] * sigmas)
# Check that it passes by {sigmas} number of sigmas
- np.testing.assert_allclose(res, 1.0, atol=err)
+ np.testing.assert_allclose(res, target_result, atol=err)
@pytest.mark.parametrize("mode", range(4))
@@ -166,21 +166,38 @@ def test_PlainFlow_change_nevents():
def helper_rng_tester(sampling_function, n_events):
"""Ensure the random number generated have the correct shape
Return the random numbers and the jacobian"""
- rnds, _, px = sampling_function(n_events)
+ rnds, px = sampling_function(n_events)
np.testing.assert_equal(rnds.shape, (n_events, dim))
return rnds, px
-def test_rng_generation(n_events=100):
- """Test that the random generation genrates the correct type of arrays"""
+def test_rng_generation_plain(n_events=100):
+ """Test the random number generation with plainflow"""
plain_sampler_instance = instance_and_compile(PlainFlow)
_, px = helper_rng_tester(plain_sampler_instance.generate_random_array, n_events)
np.testing.assert_equal(px.numpy(), 1.0 / n_events)
+
+
+def test_rng_generation_vegasflow(n_events=100):
+ """Test the random number generation with vegasflow"""
vegas_sampler_instance = instance_and_compile(VegasFlow)
+ # Train a bit the grid
vegas_sampler_instance.run_integration(2)
_, px = helper_rng_tester(vegas_sampler_instance.generate_random_array, n_events)
np.testing.assert_equal(px.shape, (n_events,))
- # Test the wrappers
+
+
+def test_rng_generation_vegasflowplus(n_events=100):
+ """Test the random number generation with vegasflow"""
+ vegas_sampler_instance = instance_and_compile(VegasFlowPlus)
+ # Train a bit the grid
+ # vegas_sampler_instance.run_integration(2)
+ _, px = helper_rng_tester(vegas_sampler_instance.generate_random_array, n_events)
+ np.testing.assert_equal(px.shape, (n_events,))
+
+
+def test_rng_generation_wrappers(n_events=100):
+ """Test the wrappers for the samplers"""
p = plain_sampler(example_integrand, dim, n_events, training_steps=2, return_class=True)
_ = helper_rng_tester(p.generate_random_array, n_events)
v = vegas_sampler(example_integrand, dim, n_events, training_steps=2)
diff --git a/src/vegasflow/tests/test_misc.py b/src/vegasflow/tests/test_misc.py
index c913ffd..62d50ed 100644
--- a/src/vegasflow/tests/test_misc.py
+++ b/src/vegasflow/tests/test_misc.py
@@ -2,6 +2,7 @@
Miscellaneous tests that don't really fit anywhere else
"""
import pytest
+import numpy as np
from vegasflow import VegasFlow, VegasFlowPlus, PlainFlow
import tensorflow as tf
@@ -19,6 +20,18 @@ def _wrong_integrand(xarr):
return tf.reduce_sum(xarr)
+def _simple_integrand(xarr):
+ """Integrand f(x) = x"""
+ return tf.reduce_prod(xarr, axis=1)
+
+
+def _simple_integral(xmin, xmax):
+ """Integated version of simple_ingrand"""
+ xm = np.array(xmin) ** 2 / 2.0
+ xp = np.array(xmax) ** 2 / 2.0
+ return np.prod(xp - xm)
+
+
def _wrong_vector_integrand(xarr):
"""Vector integrand with the wrong output shape"""
return tf.transpose(xarr)
@@ -30,7 +43,7 @@ def test_working_vectorial(alg, mode):
"""Check that the algorithms that accept integrating vectorial functions can really do so"""
inst = instance_and_compile(alg, mode=mode, integrand_function=_vector_integrand)
result = inst.run_integration(2)
- check_is_one(result, sigmas=4)
+ check_is_one(result, sigmas=5)
@pytest.mark.parametrize("alg", [VegasFlowPlus])
@@ -53,3 +66,31 @@ def test_wrong_shape(wrong_fun):
"""Check that an error is raised by the compilation if the integrand has the wrong shape"""
with pytest.raises(ValueError):
_ = instance_and_compile(PlainFlow, integrand_function=wrong_fun)
+
+
[email protected]("alg", [PlainFlow, VegasFlow, VegasFlowPlus])
+def test_integration_limits(alg, ncalls=int(1e4)):
+ """Test an integration where the integration limits are modified"""
+ dims = np.random.randint(1, 5)
+ xmin = -1.0 + np.random.rand(dims) * 2.0
+ xmax = 3.0 + np.random.rand(dims)
+ inst = alg(dims, ncalls, xmin=xmin, xmax=xmax)
+ inst.compile(_simple_integrand)
+ result = inst.run_integration(5)
+ expected_result = _simple_integral(xmin, xmax)
+ check_is_one(result, target_result=expected_result)
+
+
+def test_integration_limits_checks():
+ """Test that the errors for wrong limits actually work"""
+ # use hypothesis to check other corner cases
+ with pytest.raises(ValueError):
+ PlainFlow(1, 10, xmin=[10], xmax=[1])
+ with pytest.raises(ValueError):
+ PlainFlow(1, 10, xmin=[10])
+ with pytest.raises(ValueError):
+ PlainFlow(1, 10, xmax=[10])
+ with pytest.raises(ValueError):
+ PlainFlow(2, 10, xmin=[0], xmax=[1])
+ with pytest.raises(ValueError):
+ PlainFlow(2, 10, xmin=[0, 1], xmax=[1])
| Specifying integration limits
Hi vegasflow developers!
Can I specify integration (upper and lower) limits in vegasflow? I searched the documentation but didn't found a way. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"src/vegasflow/tests/test_algs.py::test_rng_generation_plain",
"src/vegasflow/tests/test_algs.py::test_rng_generation_vegasflow",
"src/vegasflow/tests/test_algs.py::test_rng_generation_vegasflowplus",
"src/vegasflow/tests/test_algs.py::test_rng_generation_wrappers",
"src/vegasflow/tests/test_misc.py::test_integration_limits[PlainFlow]",
"src/vegasflow/tests/test_misc.py::test_integration_limits[VegasFlow]",
"src/vegasflow/tests/test_misc.py::test_integration_limits[VegasFlowPlus]",
"src/vegasflow/tests/test_misc.py::test_integration_limits_checks"
] | [
"src/vegasflow/tests/test_algs.py::test_VegasFlow[0]",
"src/vegasflow/tests/test_algs.py::test_VegasFlow[1]",
"src/vegasflow/tests/test_algs.py::test_VegasFlow[2]",
"src/vegasflow/tests/test_algs.py::test_VegasFlow[3]",
"src/vegasflow/tests/test_algs.py::test_VegasFlow_grid_management",
"src/vegasflow/tests/test_algs.py::test_VegasFlow_save_grid",
"src/vegasflow/tests/test_algs.py::test_VegasFlow_load_grid",
"src/vegasflow/tests/test_algs.py::test_PlainFlow[0]",
"src/vegasflow/tests/test_algs.py::test_PlainFlow[1]",
"src/vegasflow/tests/test_algs.py::test_PlainFlow[2]",
"src/vegasflow/tests/test_algs.py::test_PlainFlow[3]",
"src/vegasflow/tests/test_algs.py::test_PlainFlow_change_nevents",
"src/vegasflow/tests/test_algs.py::test_VegasFlowPlus_ADAPTIVE_SAMPLING[0]",
"src/vegasflow/tests/test_algs.py::test_VegasFlowPlus_ADAPTIVE_SAMPLING[1]",
"src/vegasflow/tests/test_algs.py::test_VegasFlowPlus_ADAPTIVE_SAMPLING[2]",
"src/vegasflow/tests/test_algs.py::test_VegasFlowPlus_ADAPTIVE_SAMPLING[3]",
"src/vegasflow/tests/test_algs.py::test_VegasFlowPlus_NOT_ADAPTIVE_SAMPLING",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[VegasFlow-0]",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[VegasFlow-1]",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[VegasFlow-2]",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[VegasFlow-3]",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[PlainFlow-0]",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[PlainFlow-1]",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[PlainFlow-2]",
"src/vegasflow/tests/test_misc.py::test_working_vectorial[PlainFlow-3]",
"src/vegasflow/tests/test_misc.py::test_notworking_vectorial[VegasFlowPlus]",
"src/vegasflow/tests/test_misc.py::test_check_wrong_main_dimension",
"src/vegasflow/tests/test_misc.py::test_wrong_shape[_wrong_vector_integrand]",
"src/vegasflow/tests/test_misc.py::test_wrong_shape[_wrong_integrand]"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-07-07T14:27:42Z" | apache-2.0 |
|
NCAR__geocat-comp-65 | diff --git a/src/geocat/comp/month_to_season.py b/src/geocat/comp/month_to_season.py
new file mode 100644
index 0000000..c5b0e94
--- /dev/null
+++ b/src/geocat/comp/month_to_season.py
@@ -0,0 +1,77 @@
+
+
+def month_to_season(xMon, season, time_coord_name='time'):
+ """ This function takes an xarray dataset containing monthly data spanning years and
+ returns a dataset with one sample per year, for a specified three-month season.
+
+ This function requires the number of months to be a multiple of 12, i.e. full years must be provided.
+
+ Time stamps are centered on the season. For example, seasons='DJF' returns January timestamps.
+
+ If a calculated season's timestamp falls outside the original range of monthly values, then the calculated mean
+ is dropped. For example, if the monthly data's time range is [Jan-2000, Dec-2003] and the season is "DJF", the
+ seasonal mean computed from the single month of Dec-2003 is dropped.
+ """
+ mod_check(xMon[time_coord_name].size, 12)
+
+ startDate = xMon[time_coord_name][0]
+ endDate = xMon[time_coord_name][-1]
+ seasons_pd = {
+ 'DJF': (
+ 'QS-DEC',
+ 1),
+ 'JFM': (
+ 'QS-JAN',
+ 2),
+ 'FMA': (
+ 'QS-FEB',
+ 3),
+ 'MAM': (
+ 'QS-MAR',
+ 4),
+ 'AMJ': (
+ 'QS-APR',
+ 5),
+ 'MJJ': (
+ 'QS-MAY',
+ 6),
+ 'JJA': (
+ 'QS-JUN',
+ 7),
+ 'JAS': (
+ 'QS-JUL',
+ 8),
+ 'ASO': (
+ 'QS-AUG',
+ 9),
+ 'SON': (
+ 'QS-SEP',
+ 10),
+ 'OND': (
+ 'QS-OCT',
+ 11),
+ 'NDJ': (
+ 'QS-NOV',
+ 12)}
+ try:
+ (season_pd, season_sel) = seasons_pd[season]
+ except KeyError:
+ raise KeyError(
+ f"contributed: month_to_season: bad season: SEASON = {season}. Valid seasons include: {list(seasons_pd.keys())}")
+
+ # Compute the three-month means, moving time labels ahead to the middle
+ # month.
+ month_offset = 'MS'
+ xSeasons = xMon.resample(
+ {time_coord_name: season_pd}, loffset=month_offset).mean()
+
+ # Filter just the desired season, and trim to the desired time range.
+ xSea = xSeasons.sel(
+ {time_coord_name: xSeasons[time_coord_name].dt.month == season_sel})
+ xSea = xSea.sel({time_coord_name: slice(startDate, endDate)})
+ return xSea
+
+
+def mod_check(value, mod):
+ if value % mod != 0:
+ raise ValueError(f'Expected a multiple of {mod} values')
| NCAR/geocat-comp | b7d4a927dd5b283813aae374097afb4beb2b59ec | diff --git a/test/test_month_to_season.py b/test/test_month_to_season.py
new file mode 100644
index 0000000..48afb06
--- /dev/null
+++ b/test/test_month_to_season.py
@@ -0,0 +1,131 @@
+import unittest
+
+import numpy as np
+import pandas as pd
+import xarray as xr
+from geocat.comp.month_to_season import month_to_season
+
+
+def get_fake_dataset(start_month, nmonths, nlats, nlons):
+ """ Returns a very simple xarray dataset for testing.
+ Data values are equal to "month of year" for monthly time steps.
+ """
+ # Create coordinates
+ months = pd.date_range(
+ start=pd.to_datetime(start_month),
+ periods=nmonths,
+ freq='MS')
+ lats = np.linspace(start=-90, stop=90, num=nlats, dtype='float32')
+ lons = np.linspace(start=-180, stop=180, num=nlons, dtype='float32')
+
+ # Create data variable. Construct a 3D array with time as the first
+ # dimension.
+ month_values = np.expand_dims(
+ np.arange(
+ start=1,
+ stop=nmonths + 1),
+ axis=(
+ 1,
+ 2))
+ var_values = np.tile(month_values, (1, nlats, nlons))
+
+ ds = xr.Dataset(
+ data_vars={
+ 'my_var': (('time', 'lat', 'lon'), var_values.astype('float32')),
+ },
+ coords={'time': months, 'lat': lats, 'lon': lons},
+ )
+ return ds
+
+
+class Test_month_to_season(unittest.TestCase):
+
+ def setUp(self):
+ # Create a dataset for the year 2000.
+ self.ds1 = get_fake_dataset(
+ start_month='2000-01', nmonths=12, nlats=1, nlons=1)
+
+ # Create another dataset for the year 2001.
+ self.ds2 = get_fake_dataset(
+ start_month='2001-01', nmonths=12, nlats=1, nlons=1)
+
+ # Create a dataset that combines the two previous datasets, for two
+ # years of data.
+ self.ds3 = xr.concat([self.ds1, self.ds2], dim='time')
+
+ # Create a dataset with the wrong number of months.
+ self.partial_year_dataset = get_fake_dataset(
+ start_month='2000-01', nmonths=13, nlats=1, nlons=1)
+
+ # Create a dataset with a custom time coordinate.
+ custom_time_dataset = get_fake_dataset(
+ start_month='2000-01', nmonths=12, nlats=1, nlons=1)
+ self.custom_time_dataset = custom_time_dataset.rename(
+ {'time': 'my_time'})
+
+ # Create a more complex dataset just to verify that get_fake_dataset()
+ # is generally working.
+ self.complex_dataset = get_fake_dataset(
+ start_month='2001-01', nmonths=12, nlats=10, nlons=10)
+
+ # Check all possible season choices for some tests.
+ self.all_seasons = [
+ 'DJF',
+ 'JFM',
+ 'FMA',
+ 'MAM',
+ 'AMJ',
+ 'MJJ',
+ 'JJA',
+ 'JAS',
+ 'ASO',
+ 'SON',
+ 'OND',
+ 'NDJ']
+
+ def test_m2s_returns_middle_month_value(self):
+ season_ds = month_to_season(self.ds1, 'JFM')
+ season_value_array = season_ds['my_var'].data
+
+ # Should equal the average of [1.0, 2.0, 3.0]
+ self.assertEqual(season_value_array[0, 0, 0], 2.0)
+
+ season_ds = month_to_season(self.ds1, 'JJA')
+ season_value_array = season_ds['my_var'].data
+
+ # Should equal the average of [6.0, 7.0, 8.0]
+ self.assertEqual(season_value_array[0, 0, 0], 7.0)
+
+ def test_bad_season_returns_exception(self):
+ with self.assertRaises(KeyError):
+ season_ds = month_to_season(self.ds1, 'XXX')
+
+ def test_partial_years_returns_exception(self):
+ with self.assertRaises(ValueError):
+ season_ds = month_to_season(self.partial_year_dataset, 'JFM')
+
+ def test_final_season_returns_2month_average(self):
+ season_ds = month_to_season(self.ds1, 'NDJ')
+ season_value_array = season_ds['my_var'].data
+ self.assertEqual(season_value_array[0, 0, 0], 11.5)
+
+ def test_each_season_returns_one_point_per_year(self):
+ nyears_of_data = self.ds3.sizes['time'] / 12
+ for season in self.all_seasons:
+ season_ds = month_to_season(self.ds3, season)
+ season_value_array = season_ds['my_var'].data
+ self.assertEqual(season_value_array.size, nyears_of_data)
+
+ def test_custom_time_coordinate(self):
+ season_ds = month_to_season(
+ self.custom_time_dataset,
+ 'JFM',
+ time_coord_name='my_time')
+ season_value_array = season_ds['my_var'].data
+
+ # Should equal the average of [1.0, 2.0, 3.0]
+ self.assertEqual(season_value_array[0, 0, 0], 2.0)
+
+
+if __name__ == '__main__':
+ unittest.main()
| month_to_season
Port the NCL **month_to_season** family of functions, including: Month_to_season, month_to_seasonN, month_to_season12. The **month_to_season** function computes a user-specified three-month seasonal mean (DJF, JFM, FMA, MAM, AMJ, MJJ, JJA, JAS, ASO, SON, OND, NDJ). | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_month_to_season.py::Test_month_to_season::test_bad_season_returns_exception",
"test/test_month_to_season.py::Test_month_to_season::test_partial_years_returns_exception"
] | [] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-04-20T20:30:26Z" | apache-2.0 |
|
NCAS-CMS__cfdm-158 | diff --git a/Changelog.rst b/Changelog.rst
index 166631ac5..dda759411 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -25,7 +25,9 @@ version 1.9.0.0
reference parameters (https://github.com/NCAS-CMS/cfdm/issues/148)
* Interpret format specifiers for size 1 `cfdm.Data` arrays
(https://github.com/NCAS-CMS/cfdm/issues/152)
-
+* Fix file name expansions in `cfdm.write`
+ (https://github.com/NCAS-CMS/cfdm/issues/157)
+
version 1.8.9.0
---------------
----
diff --git a/cfdm/read_write/netcdf/netcdfwrite.py b/cfdm/read_write/netcdf/netcdfwrite.py
index 5f7f1ec5b..b59bc0853 100644
--- a/cfdm/read_write/netcdf/netcdfwrite.py
+++ b/cfdm/read_write/netcdf/netcdfwrite.py
@@ -4641,11 +4641,14 @@ class NetCDFWrite(IOWrite):
"""
logger.info(f"Writing to {fmt}") # pragma: no cover
+ # Expand file name
+ filename = os.path.expanduser(os.path.expandvars(filename))
+
# ------------------------------------------------------------
# Initialise netCDF write parameters
# ------------------------------------------------------------
self.write_vars = {
- "filename": os.path.expanduser(os.path.expandvars(filename)),
+ "filename": filename,
# Format of output file
"fmt": None,
# netCDF4.Dataset instance
diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst
index a538ebd9e..dcba560e8 100644
--- a/docs/source/tutorial.rst
+++ b/docs/source/tutorial.rst
@@ -124,6 +124,9 @@ read [#caveat]_.
All formats of netCDF3 and netCDF4 files can be read.
+The file name may describe relative paths, and standard tilde and
+shell parameter expansions are applied to it.
+
The following file types can be read:
* All formats of netCDF3 and netCDF4 files can be read, containing
@@ -3336,6 +3339,9 @@ field constructs, to a netCDF file on disk:
: time(1) = [2019-01-01 00:00:00]
>>> cfdm.write(q, 'q_file.nc')
+The file name may describe relative paths, and standard tilde and
+shell parameter expansions are applied to it.
+
The new dataset is structured as follows:
.. code-block:: console
@@ -3892,6 +3898,9 @@ however, be incorporated into the field constructs of the dataset, as
if they had actually been stored in the same file, simply by providing
the external file names to the `cfdm.read` function.
+An external variables file name may describe relative paths, and
+standard tilde and shell parameter expansions are applied to it.
+
This is illustrated with the files ``parent.nc`` (found in the
:ref:`sample datasets <Sample-datasets>`):
| NCAS-CMS/cfdm | 8d34bc31740c38f1137822c1857b3621576de125 | diff --git a/cfdm/test/test_read_write.py b/cfdm/test/test_read_write.py
index 2b4c4ed0e..8f60c6a3f 100644
--- a/cfdm/test/test_read_write.py
+++ b/cfdm/test/test_read_write.py
@@ -847,6 +847,12 @@ class read_writeTest(unittest.TestCase):
cfdm.write(f, tmpfile)
+ def test_write_filename_expansion(self):
+ """Test the writing to a file name that requires expansions."""
+ f = cfdm.example_field(0)
+ filename = os.path.join("$PWD", os.path.basename(tmpfile))
+ cfdm.write(f, filename)
+
if __name__ == "__main__":
print("Run date:", datetime.datetime.now())
| File name expansions do not work in `cfdm.write`
At cfdm 1.8.9.0, tilde and environment variable file name expansions no longer work in `cfdm.write`:
```python
>>> import cfdm
>>> f = cfdm.example_field(0)
>>> cfdm.write(f, 'test_file.nc') # OK
>>> cfdm.write(f, '~/test_file.nc') # Not OK
...
PermissionError: [Errno 13] Permission denied: b'/home/david/~/test_file.nc'
>>> cfdm.write(f, ''$HOME/test_file.nc') # Not OK
...
PermissionError: [Errno 13] Permission denied: b'/home/david/$HOME/test_file.nc'
```
PR fix to follow. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename_expansion"
] | [
"cfdm/test/test_read_write.py::read_writeTest::test_write_coordinates",
"cfdm/test/test_read_write.py::read_writeTest::test_read_mask",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-09-07T09:02:59Z" | mit |
|
NCAS-CMS__cfdm-175 | diff --git a/Changelog.rst b/Changelog.rst
index 0125dce49..01e43ed93 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -1,3 +1,14 @@
+Version 1.9.?.?
+---------------
+
+**2022-0?-??**
+
+* Fixed bug that caused `cf.Domain.__str__` to fail when a dimension
+ coordinate construct does not have data
+ (https://github.com/NCAS-CMS/cfdm/issues/174)
+
+----
+
Version 1.9.0.2
---------------
diff --git a/cfdm/domain.py b/cfdm/domain.py
index 67c0ea9a0..5745f1cb0 100644
--- a/cfdm/domain.py
+++ b/cfdm/domain.py
@@ -187,15 +187,18 @@ class Domain(
x = []
dimension_coordinates = self.dimension_coordinates(todict=True)
- for axis_cid in sorted(self.domain_axes(todict=True)):
+ for axis_cid, axis in sorted(self.domain_axes(todict=True).items()):
for cid, dim in dimension_coordinates.items():
if construct_data_axes[cid] == (axis_cid,):
name = dim.identity(default=f"key%{0}")
- y = f"{name}({dim.get_data().size})"
+ y = f"{name}({axis.get_size()})"
if y != axis_names[axis_cid]:
y = f"{name}({axis_names[axis_cid]})"
+
if dim.has_data():
y += f" = {dim.get_data()}"
+ else:
+ y += " = "
x.append(y)
diff --git a/joss/paper.bib b/joss/paper.bib
index 02241916b..f0831c708 100644
--- a/joss/paper.bib
+++ b/joss/paper.bib
@@ -40,10 +40,10 @@
title = {{{NetCDF Climate}} and {{Forecast}} ({{CF}}) {{Metadata Conventions v1}}.8},
publisher = {{CF Conventions Committee}},
author = {{Eaton}, Brian and {Gregory}, Jonathan and {Drach}, Bob and {Taylor}, Karl and {Hankin}, Steve and {Caron}, John and {Signell}, Rich and {Bentley}, Phil and {Rappa}, Greg and {H{\"o}ck}, Heinke and {Pamment}, Alison and {Juckes}, Martin and {Raspaud}, Martin and {Horne}, Randy and {Whiteaker}, Timothy and {Blodgett}, David and {Zender}, Charlie and {Lee}, Daniel},
- month = feb,
- year = 2020,
- urldate = {2020-07-27},
- url = {http://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html},
+ month = sep,
+ year = 2021,
+ urldate = {2020-09-10},
+ url = {http://cfconventions.org/Data/cf-conventions/cf-conventions-1.9/cf-conventions.html},
}
@software{Hassell2:2020,
| NCAS-CMS/cfdm | 140dc1bc519a3ba990ea2e1ce46db8d84efc77f4 | diff --git a/cfdm/test/test_Domain.py b/cfdm/test/test_Domain.py
index 624d180b8..bb6836c33 100644
--- a/cfdm/test/test_Domain.py
+++ b/cfdm/test/test_Domain.py
@@ -42,6 +42,13 @@ class DomainTest(unittest.TestCase):
for title in (None, "title"):
d.dump(display=False, _title=title)
+ # Test when dimension coordinate has no data
+ d = d.copy()
+ t = d.construct("time")
+ t.del_data()
+ self.assertFalse(t.has_data())
+ str(d)
+
def test_Domain__init__(self):
"""Test the Domain constructor and source keyword."""
cfdm.Domain(source="qwerty")
| Field/domain print fails when dimension corodinate has no data
(First reported over at https://github.com/NCAS-CMS/cf-python/issues/326 - see there for full details)
When a field has a construct which can have data, e.g. a coordinate, but that construct doesn't have data, the `str()` representation hits a `ValueError` regarding the lack of data on that construct, e.g:
```python
>>> import cfdm
>>> f = cfdm.example_field(2)
>>> t = f.construct('time')
>>> t.del_data()
<Data(36): [1959-12-16 12:00:00, ..., 1962-11-16 00:00:00]>
>>> print(f)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-fc0364975534> in <module>
----> 1 print(f)
~/miniconda3.9/lib/python3.9/site-packages/cfdm/field.py in __str__(self)
239 string.append(f"Field ancils : {field_ancils}")
240
--> 241 string.append(str(self.domain))
242
243 return "\n".join(string)
~/miniconda3.9/lib/python3.9/site-packages/cfdm/domain.py in __str__(self)
194 if construct_data_axes[cid] == (axis_cid,):
195 name = dim.identity(default=f"key%{0}")
--> 196 y = f"{name}({dim.get_data().size})"
197 if y != axis_names[axis_cid]:
198 y = f"{name}({axis_names[axis_cid]})"
~/miniconda3.9/lib/python3.9/site-packages/cfdm/core/abstract/propertiesdata.py in get_data(self, default, _units, _fill_value)
290 return
291
--> 292 return self._default(
293 default, message=f"{self.__class__.__name__} has no data"
294 )
~/miniconda3.9/lib/python3.9/site-packages/cfdm/core/abstract/container.py in _default(self, default, message)
145 default = type(default)(message)
146
--> 147 raise default
148
149 return default
ValueError: DimensionCoordinate has no data
>>>
```
Something sensible should be displayed, instead. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_Domain.py::DomainTest::test_Domain__repr__str__dump"
] | [
"cfdm/test/test_Domain.py::DomainTest::test_Domain_properties",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_del_construct",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_has_bounds",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_climatological_time_axes",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_identites",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_apply_masking",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_identity",
"cfdm/test/test_Domain.py::DomainTest::test_Domain__init__",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_creation_commands",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_equals",
"cfdm/test/test_Domain.py::DomainTest::test_Domain_data"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2022-02-28T12:03:08Z" | mit |
|
NCAS-CMS__cfdm-210 | diff --git a/cfdm/field.py b/cfdm/field.py
index 00ee583fd..8414a356d 100644
--- a/cfdm/field.py
+++ b/cfdm/field.py
@@ -297,6 +297,13 @@ class Field(
# ------------------------------------------------------------
new_data = data[tuple(indices)]
+ if 0 in new_data.shape:
+ raise IndexError(
+ f"Indices {indices!r} result in a subspaced shape of "
+ f"{new_data.shape}, but can't create a subspace of "
+ f"{self.__class__.__name__} that has a size 0 axis"
+ )
+
# Replace domain axes
domain_axes = new.domain_axes(todict=True)
for key, size in zip(data_axes, new_data.shape):
diff --git a/cfdm/mixin/propertiesdata.py b/cfdm/mixin/propertiesdata.py
index e595265c8..a744d0f02 100644
--- a/cfdm/mixin/propertiesdata.py
+++ b/cfdm/mixin/propertiesdata.py
@@ -71,6 +71,13 @@ class PropertiesData(Properties):
if data is not None:
new.set_data(data[indices], copy=False)
+ if 0 in new.shape:
+ raise IndexError(
+ f"Indices {indices!r} result in a subspaced shape of "
+ f"{new.shape}, but can't create a subspace of "
+ f"{self.__class__.__name__} that has a size 0 axis"
+ )
+
return new
def __str__(self):
| NCAS-CMS/cfdm | 0c6f140dc8229d41530d2b9f439f740a28645a33 | diff --git a/cfdm/test/test_DimensionCoordinate.py b/cfdm/test/test_DimensionCoordinate.py
index 0513f9ada..76c345140 100644
--- a/cfdm/test/test_DimensionCoordinate.py
+++ b/cfdm/test/test_DimensionCoordinate.py
@@ -125,6 +125,15 @@ class DimensionCoordinateTest(unittest.TestCase):
)
self.assertEqual(t.datetime_array, t.data.datetime_array)
+ def test_DimensiconCoordinate__getitem__(self):
+ """Test the `DimensionCoordinate.__getitem__` method."""
+ dim = self.dim
+ self.assertTrue((dim[1:3].array == dim.array[1:3]).all())
+
+ # Indices result in a subspaced shape that has a size 0 axis
+ with self.assertRaises(IndexError):
+ dim[[False] * dim.size]
+
if __name__ == "__main__":
print("Run date:", datetime.datetime.now())
diff --git a/cfdm/test/test_Field.py b/cfdm/test/test_Field.py
index 2b5ba7e81..84617e17a 100644
--- a/cfdm/test/test_Field.py
+++ b/cfdm/test/test_Field.py
@@ -152,6 +152,10 @@ class FieldTest(unittest.TestCase):
self.assertEqual(c.data.shape, (4,))
self.assertEqual(b.data.shape, (4, 2))
+ # Indices result in a subspaced shape that has a size 0 axis
+ with self.assertRaises(IndexError):
+ f[..., [False] * f.shape[-1]]
+
# def test_Field___setitem__(self):
# f = self.f.squeeze()
#
| Subspaces of constructs can currently have one or more size 0 axes
`numpy` (and similar data objects that share its API, such as `dask`, `cfdm.Data`) reasonably allow subspaces to have size 0 axes:
```python
>>> import numpy as np
>>> a = np.arange(9)
>>> a[[False] * 9]
array([], dtype=int64)
```
At cfdm `v1.10.0.0`, cfdm _constructs_ also allow this:
```python
>>> import cfdm
>>> f = cfdm.example_field(0)
>>> f.shape
(5, 8)
>>> f[[False] * 5]
<Field: specific_humidity(latitude(0), longitude(8)) 1>
```
... but they shouldn't, because it is not allowed by the CF data model. I.e. in the above example, `f[[False] * 5]` should raise an exception.
PR to follow.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensiconCoordinate__getitem__",
"cfdm/test/test_Field.py::FieldTest::test_Field___getitem__"
] | [
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensionCoordinate_climatology",
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensiconCoordinate_datetime_array",
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensionCoordinate_set_data",
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensiconCoordinate_array",
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensionCoordinate__init__",
"cfdm/test/test_Field.py::FieldTest::test_Field_field_ancillary",
"cfdm/test/test_Field.py::FieldTest::test_Field_coordinate",
"cfdm/test/test_Field.py::FieldTest::test_Field_dimension_coordinate",
"cfdm/test/test_Field.py::FieldTest::test_Field_domain_axis",
"cfdm/test/test_Field.py::FieldTest::test_Field_convert",
"cfdm/test/test_Field.py::FieldTest::test_Field_cell_method",
"cfdm/test/test_Field.py::FieldTest::test_Field_set_get_del_has_data",
"cfdm/test/test_Field.py::FieldTest::test_Field_apply_masking",
"cfdm/test/test_Field.py::FieldTest::test_Field_get_filenames",
"cfdm/test/test_Field.py::FieldTest::test_Field_data_axes",
"cfdm/test/test_Field.py::FieldTest::test_Field_CONSTRUCTS",
"cfdm/test/test_Field.py::FieldTest::test_Field_squeeze_transpose_insert_dimension",
"cfdm/test/test_Field.py::FieldTest::test_Field_construct_item",
"cfdm/test/test_Field.py::FieldTest::test_Field_coordinate_reference",
"cfdm/test/test_Field.py::FieldTest::test_Field_del_construct",
"cfdm/test/test_Field.py::FieldTest::test_Field_domain_ancillary",
"cfdm/test/test_Field.py::FieldTest::test_Field_cell_measure",
"cfdm/test/test_Field.py::FieldTest::test_Field_equals",
"cfdm/test/test_Field.py::FieldTest::test_Field__repr__str__dump_construct_type",
"cfdm/test/test_Field.py::FieldTest::test_Field_auxiliary_coordinate",
"cfdm/test/test_Field.py::FieldTest::test_Field_bounds",
"cfdm/test/test_Field.py::FieldTest::test_Field_has_geometry",
"cfdm/test/test_Field.py::FieldTest::test_Field_PROPERTIES",
"cfdm/test/test_Field.py::FieldTest::test_Field_domain_axes",
"cfdm/test/test_Field.py::FieldTest::test_Field_climatological_time_axes",
"cfdm/test/test_Field.py::FieldTest::test_Field_has_construct",
"cfdm/test/test_Field.py::FieldTest::test_Field__init__",
"cfdm/test/test_Field.py::FieldTest::test_Field_indices",
"cfdm/test/test_Field.py::FieldTest::test_Field_creation_commands"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2022-08-24T12:36:04Z" | mit |
|
NCAS-CMS__cfdm-212 | diff --git a/Changelog.rst b/Changelog.rst
index f16852e30..5f750fc13 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -1,3 +1,12 @@
+Version 1.10.0.1
+----------------
+
+**2022-??-??**
+
+* Fixed bug that caused a failure when printing date-time data with
+ the first element masked
+ (https://github.com/NCAS-CMS/cfdm/issues/211)
+
Version 1.10.0.0
----------------
diff --git a/cfdm/data/data.py b/cfdm/data/data.py
index a48650843..d0c428873 100644
--- a/cfdm/data/data.py
+++ b/cfdm/data/data.py
@@ -506,13 +506,13 @@ class Data(Container, NetCDFHDF5, core.Data):
mask = [False, False, False]
+ if isreftime and first is np.ma.masked:
+ first = 0
+ mask[0] = True
+
if size == 1:
if isreftime:
# Convert reference time to date-time
- if first is numpy.ma.masked:
- first = 0
- mask[0] = True
-
try:
first = type(self)(
numpy.ma.array(first, mask=mask[0]), units, calendar
| NCAS-CMS/cfdm | 223b74bd0cf858870bd29810f51f74712a9980b5 | diff --git a/cfdm/test/test_Data.py b/cfdm/test/test_Data.py
index fd1a15182..4960a4f9b 100644
--- a/cfdm/test/test_Data.py
+++ b/cfdm/test/test_Data.py
@@ -68,6 +68,13 @@ class DataTest(unittest.TestCase):
_ = repr(d)
_ = str(d)
+ # Test when the data contains date-times with the first
+ # element masked
+ dt = numpy.ma.array([10, 20], mask=[True, False])
+ d = cfdm.Data(dt, units="days since 2000-01-01")
+ self.assertTrue(str(d) == "[--, 2000-01-21 00:00:00]")
+ self.assertTrue(repr(d) == "<Data(2): [--, 2000-01-21 00:00:00]>")
+
# def test_Data__getitem__(self):
def test_Data__setitem__(self):
"""Test the assignment of data items on Data."""
| `Data.__str__` fails when the data contains date-times with the first element masked
At cfdm v1.10.0.0 we have:
```python
>>> # v1.10.0.0
>>> import cfdm
>>> import numpy as
>>> dt = np.ma.array([10, 20], mask=[True, False])
>>> d = cfdm.Data(dt, units='days since 2000-01-01')
>>> str(d)
Traceback
...
TypeError: unsupported operand type(s) for +: 'cftime._cftime.DatetimeGregorian' and 'NoneType'
```
but what we expect is
```python
>>> # Expected behaviour:
>>> d = cfdm.Data(dt, units='days since 2000-01-01')
>>> str(d)
'[--, 2000-01-21 00:00:00]'
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_Data.py::DataTest::test_Data__repr__str"
] | [
"cfdm/test/test_Data.py::DataTest::test_Data__format__",
"cfdm/test/test_Data.py::DataTest::test_Data__setitem__",
"cfdm/test/test_Data.py::DataTest::test_Data_any",
"cfdm/test/test_Data.py::DataTest::test_Data_apply_masking",
"cfdm/test/test_Data.py::DataTest::test_Data_array",
"cfdm/test/test_Data.py::DataTest::test_Data_datetime_array",
"cfdm/test/test_Data.py::DataTest::test_Data_dtype_mask",
"cfdm/test/test_Data.py::DataTest::test_Data_equals",
"cfdm/test/test_Data.py::DataTest::test_Data_filled",
"cfdm/test/test_Data.py::DataTest::test_Data_flatten",
"cfdm/test/test_Data.py::DataTest::test_Data_get_compressed_dimension",
"cfdm/test/test_Data.py::DataTest::test_Data_get_count",
"cfdm/test/test_Data.py::DataTest::test_Data_get_index",
"cfdm/test/test_Data.py::DataTest::test_Data_get_list",
"cfdm/test/test_Data.py::DataTest::test_Data_insert_dimension",
"cfdm/test/test_Data.py::DataTest::test_Data_maximum_minimum_sum_squeeze",
"cfdm/test/test_Data.py::DataTest::test_Data_transpose",
"cfdm/test/test_Data.py::DataTest::test_Data_unique"
] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2022-08-26T14:20:13Z" | mit |
|
NCAS-CMS__cfdm-220 | diff --git a/Changelog.rst b/Changelog.rst
index 4c24fc3aa..232c07d6e 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -10,6 +10,8 @@ Version 1.10.0.1
(https://github.com/NCAS-CMS/cfdm/issues/215)
* New method: `cfdm.Field.get_original_filenames`
* New method: `cfdm.Data.get_original_filenames`
+* Fixed bug that caused incorrect data assignment with some multiple
+ list indices (https://github.com/NCAS-CMS/cfdm/issues/217)
* Fixed bug that caused a failure when printing date-time data with
the first element masked
(https://github.com/NCAS-CMS/cfdm/issues/211)
diff --git a/cfdm/data/data.py b/cfdm/data/data.py
index 676ed6945..9699dec6e 100644
--- a/cfdm/data/data.py
+++ b/cfdm/data/data.py
@@ -858,8 +858,83 @@ class Data(Container, NetCDFHDF5, Files, core.Data):
self._set_Array(array, copy=copy)
@classmethod
- def _set_subspace(cls, array, indices, value):
- """Set a subspace of the data array defined by indices."""
+ def _set_subspace(cls, array, indices, value, orthogonal_indexing=True):
+ """Assign to a subspace of an array.
+
+ :Parameters:
+
+ array: array_like
+ The array to be assigned to. Must support
+ `numpy`-style indexing. The array is changed in-place.
+
+ indices: sequence
+ The indices to be applied.
+
+ value: array_like
+ The value being assigned. Must support fancy indexing.
+
+ orthogonal_indexing: `bool`, optional
+ If True then apply 'orthogonal indexing', for which
+ indices that are 1-d arrays or lists subspace along
+ each dimension independently. This behaviour is
+ similar to Fortran but different to, for instance,
+ `numpy` or `dask`.
+
+ :Returns:
+
+ `None`
+
+ **Examples**
+
+ Note that ``a`` is redefined for each example, as it is
+ changed in-place.
+
+ >>> a = np.arange(40).reshape(5, 8)
+ >>> {{package}}.Data._set_subspace(a, [[1, 4 ,3], [7, 6, 1]],
+ ... np.array([[-1, -2, -3]]))
+ >>> print(a)
+ [[ 0 1 2 3 4 5 6 7]
+ [ 8 -3 10 11 12 13 -2 -1]
+ [16 17 18 19 20 21 22 23]
+ [24 -3 26 27 28 29 -2 -1]
+ [32 -3 34 35 36 37 -2 -1]]
+
+ >>> a = np.arange(40).reshape(5, 8)
+ >>> {{package}}.Data._set_subspace(a, [[1, 4 ,3], [7, 6, 1]],
+ ... np.array([[-1, -2, -3]]),
+ ... orthogonal_indexing=False)
+ >>> print(a)
+ [[ 0 1 2 3 4 5 6 7]
+ [ 8 9 10 11 12 13 14 -1]
+ [16 17 18 19 20 21 22 23]
+ [24 -3 26 27 28 29 30 31]
+ [32 33 34 35 36 37 -2 39]]
+
+ >>> a = np.arange(40).reshape(5, 8)
+ >>> value = np.linspace(-1, -9, 9).reshape(3, 3)
+ >>> print(value)
+ [[-1. -2. -3.]
+ [-4. -5. -6.]
+ [-7. -8. -9.]]
+ >>> {{package}}.Data._set_subspace(a, [[4, 4 ,1], [7, 6, 1]], value)
+ >>> print(a)
+ [[ 0 1 2 3 4 5 6 7]
+ [ 8 -9 10 11 12 13 -8 -7]
+ [16 17 18 19 20 21 22 23]
+ [24 25 26 27 28 29 30 31]
+ [32 -6 34 35 36 37 -5 -4]]
+
+ """
+ if not orthogonal_indexing:
+ # --------------------------------------------------------
+ # Apply non-orthogonal indexing
+ # --------------------------------------------------------
+ array[tuple(indices)] = value
+ return
+
+ # ------------------------------------------------------------
+ # Still here? Then apply orthogonal indexing
+ # ------------------------------------------------------------
axes_with_list_indices = [
i
for i, x in enumerate(indices)
@@ -867,55 +942,118 @@ class Data(Container, NetCDFHDF5, Files, core.Data):
]
if len(axes_with_list_indices) < 2:
- # --------------------------------------------------------
# At most one axis has a list-of-integers index so we can
- # do a normal numpy assignment
- # --------------------------------------------------------
+ # do a normal assignment
array[tuple(indices)] = value
else:
- # --------------------------------------------------------
# At least two axes have list-of-integers indices so we
- # can't do a normal numpy assignment
- # --------------------------------------------------------
+ # can't do a normal assignment.
+ #
+ # The brute-force approach would be to do a separate
+ # assignment to each set of elements of 'array' that are
+ # defined by every possible combination of the integers
+ # defined by the two index lists.
+ #
+ # For example, if the input 'indices' are ([1, 2, 4, 5],
+ # slice(0:10), [8, 9]) then the brute-force approach would
+ # be to do 4*2=8 separate assignments of 10 elements each.
+ #
+ # This can be reduced by a factor of ~2 per axis that has
+ # list indices if we convert it to a sequence of "size 2"
+ # slices (with a "size 1" slice at the end if there are an
+ # odd number of list elements).
+ #
+ # In the above example, the input list index [1, 2, 4, 5]
+ # can be mapped to two slices: slice(1,3,1), slice(4,6,1);
+ # the input list index [8, 9] is mapped to slice(8,10,1)
+ # and only 2 separate assignments of 40 elements each are
+ # needed.
indices1 = indices[:]
- for i, x in enumerate(indices):
+ for i, (x, size) in enumerate(zip(indices, array.shape)):
if i in axes_with_list_indices:
- # This index is a list of integers
+ # This index is a list (or similar) of integers
+ if not isinstance(x, list):
+ x = np.asanyarray(x).tolist()
+
y = []
args = [iter(x)] * 2
for start, stop in itertools.zip_longest(*args):
- if not stop:
+ if start < 0:
+ start += size
+
+ if stop is None:
y.append(slice(start, start + 1))
+ break
+
+ if stop < 0:
+ stop += size
+
+ step = stop - start
+ if not step:
+ # (*) There is a repeated index in
+ # positions 2N and 2N+1 (N>=0). Store
+ # this as a single-element list
+ # instead of a "size 2" slice, mainly
+ # as an indicator that a special index
+ # to 'value' might need to be
+ # created. See below, where this
+ # comment is referenced.
+ #
+ # For example, the input list index
+ # [1, 4, 4, 4, 6, 2, 7] will be mapped
+ # to slice(1,5,3), [4], slice(6,1,-4),
+ # slice(7,8,1)
+ y.append([start])
else:
- step = stop - start
- stop += 1
+ if step > 0:
+ stop += 1
+ else:
+ stop -= 1
+
y.append(slice(start, stop, step))
indices1[i] = y
else:
indices1[i] = (x,)
- if numpy.size(value) == 1:
+ if value.size == 1:
+ # 'value' is logically scalar => simply assign it to
+ # all index combinations.
for i in itertools.product(*indices1):
array[i] = value
-
else:
+ # 'value' has two or more elements => for each index
+ # combination for 'array' assign the corresponding
+ # part of 'value'.
indices2 = []
- ndim_difference = array.ndim - numpy.ndim(value)
- for i, n in enumerate(numpy.shape(value)):
- if n == 1:
+ ndim_difference = array.ndim - value.ndim
+ for i2, size in enumerate(value.shape):
+ i1 = i2 + ndim_difference
+ if i1 not in axes_with_list_indices:
+ # The input 'indices[i1]' is a slice
indices2.append((slice(None),))
- elif i + ndim_difference in axes_with_list_indices:
+ continue
+
+ index1 = indices1[i1]
+ if size == 1:
+ indices2.append((slice(None),) * len(index1))
+ else:
y = []
start = 0
- while start < n:
+ for index in index1:
stop = start + 2
+ if isinstance(index, list):
+ # Two consecutive elements of 'value'
+ # are assigned to the same integer
+ # index of 'array'.
+ #
+ # See the (*) comment above.
+ start += 1
+
y.append(slice(start, stop))
start = stop
indices2.append(y)
- else:
- indices2.append((slice(None),))
for i, j in zip(
itertools.product(*indices1), itertools.product(*indices2)
diff --git a/cfdm/field.py b/cfdm/field.py
index 69bc1dc7c..840d8d0e2 100644
--- a/cfdm/field.py
+++ b/cfdm/field.py
@@ -3,7 +3,6 @@ import logging
import numpy as np
from . import Constructs, Count, Domain, Index, List, core, mixin
-from .constants import masked as cfdm_masked
from .data import (
GatheredArray,
RaggedContiguousArray,
@@ -1208,13 +1207,15 @@ class Field(
flattened_data = data.flatten(range(data.ndim - 1))
count = []
+ masked = np.ma.masked
for d in flattened_data:
+ d = d.array
last = d.size
for i in d[::-1]:
- if i is not cfdm_masked:
+ if i is not masked:
break
- else:
- last -= 1
+
+ last -= 1
count.append(last)
| NCAS-CMS/cfdm | 525682552c236b95a2cc8706d3c9f05c152f5a5a | diff --git a/cfdm/test/test_Data.py b/cfdm/test/test_Data.py
index 291c3b495..940a0d585 100644
--- a/cfdm/test/test_Data.py
+++ b/cfdm/test/test_Data.py
@@ -6,6 +6,7 @@ import os
import unittest
import numpy
+import numpy as np
faulthandler.enable() # to debug seg faults and timeouts
@@ -75,10 +76,9 @@ class DataTest(unittest.TestCase):
self.assertTrue(str(d) == "[--, 2000-01-21 00:00:00]")
self.assertTrue(repr(d) == "<Data(2): [--, 2000-01-21 00:00:00]>")
- # def test_Data__getitem__(self):
def test_Data__setitem__(self):
"""Test the assignment of data items on Data."""
- a = numpy.ma.arange(3000).reshape(50, 60)
+ a = np.ma.arange(3000).reshape(50, 60)
d = cfdm.Data(a.filled(), units="m")
@@ -95,47 +95,138 @@ class DataTest(unittest.TestCase):
n = -n - 1
for dvalue, avalue in (
(n, n),
- (cfdm.masked, numpy.ma.masked),
+ (cfdm.masked, np.ma.masked),
(n, n),
):
- message = f"cfdm.Data[{j}, {i}]={dvalue}={avalue} failed"
d[j, i] = dvalue
a[j, i] = avalue
x = d.array
- self.assertTrue(
- (x == a).all() in (True, numpy.ma.masked), message
- )
- m = numpy.ma.getmaskarray(x)
- self.assertTrue(
- (m == numpy.ma.getmaskarray(a)).all(),
- "d.mask.array="
- + repr(m)
- + "\nnumpy.ma.getmaskarray(a)="
- + repr(numpy.ma.getmaskarray(a)),
- )
+ self.assertTrue((x == a).all() in (True, np.ma.masked))
+ m = np.ma.getmaskarray(x)
+ self.assertTrue((m == np.ma.getmaskarray(a)).all())
- a = numpy.ma.arange(3000).reshape(50, 60)
+ a = np.ma.arange(3000).reshape(50, 60)
d = cfdm.Data(a.filled(), "m")
(j, i) = (slice(0, 2), slice(0, 3))
- array = numpy.array([[1, 2, 6], [3, 4, 5]]) * -1
+ array = np.array([[1, 2, 6], [3, 4, 5]]) * -1
- for dvalue in (array, numpy.ma.masked_where(array < -2, array), array):
- message = "cfdm.Data[%s, %s]=%s failed" % (j, i, dvalue)
+ for dvalue in (array, np.ma.masked_where(array < -2, array), array):
d[j, i] = dvalue
a[j, i] = dvalue
x = d.array
- self.assertTrue((x == a).all() in (True, numpy.ma.masked), message)
- m = numpy.ma.getmaskarray(x)
- self.assertTrue((m == numpy.ma.getmaskarray(a)).all(), message)
+ self.assertTrue((x == a).all() in (True, np.ma.masked))
+ m = np.ma.getmaskarray(x)
+ self.assertTrue((m == np.ma.getmaskarray(a)).all())
# Scalar numeric array
d = cfdm.Data(9, units="km")
d[...] = cfdm.masked
a = d.array
self.assertEqual(a.shape, ())
- self.assertIs(a[()], numpy.ma.masked)
+ self.assertIs(a[()], np.ma.masked)
+
+ # Multiple list indices, scalar value
+ d = cfdm.Data(np.arange(40).reshape(5, 8), units="km")
+
+ value = -1
+ for indices in (
+ ([0, 3, 4], [1, 6, 7]),
+ ([0, 3, 4], [1, 7, 6]),
+ ([0, 4, 3], [1, 6, 7]),
+ ([0, 4, 3], [1, 7, 6]),
+ ([4, 3, 0], [7, 6, 1]),
+ ([4, 3, 0], [1, 6, 7]),
+ ([0, 3, 4], [7, 6, 1]),
+ ([0, 3, -1], [7, 6, 1]),
+ ([0, 3, 4], [-1, 6, 1]),
+ ([0, 3, -1], [-1, 6, 1]),
+ ):
+ d[indices] = value
+ self.assertEqual((d.array == value).sum(), 9)
+ value -= 1
+
+ # Repeated list elements
+ for indices in (
+ ([0, 3, 3], [7, 6, 1]),
+ ([3, 3, 0], [7, 6, 1]),
+ ([0, 4, 3], [7, 6, 7]),
+ ):
+ d[indices] = value
+ self.assertEqual((d.array == value).sum(), 6)
+ value -= 1
+
+ for indices in (
+ ([3, 4, 3], [7, 6, 7]),
+ ([3, 3, 4], [7, 7, 6]),
+ ([4, 3, 3], [6, 7, 7]),
+ ):
+ d[indices] = value
+ self.assertEqual((d.array == value).sum(), 4)
+ value -= 1
+
+ # Multiple list indices, array value
+ a = np.arange(40).reshape(1, 5, 8)
+
+ value = np.arange(9).reshape(3, 3) - 9
+
+ for indices in (
+ (slice(None), [0, 3, 4], [1, 6, 7]),
+ (slice(None), [0, 3, 4], [1, 7, 6]),
+ (slice(None), [0, 4, 3], [1, 6, 7]),
+ (slice(None), [0, 4, 3], [1, 7, 6]),
+ (slice(None), [4, 3, 0], [7, 6, 1]),
+ (slice(None), [4, 3, 0], [1, 6, 7]),
+ (slice(None), [0, 3, 4], [7, 6, 1]),
+ (slice(None), [0, 3, -1], [7, 6, 1]),
+ (slice(None), [0, 3, 4], [-1, 6, 1]),
+ (slice(None), [0, 3, -1], [-1, 6, 1]),
+ ):
+ d = cfdm.Data(a.copy())
+ d[indices] = value
+ self.assertEqual((d.array < 0).sum(), 9)
+
+ # Repeated list elements
+ for indices in (
+ (slice(None), [0, 3, 3], [7, 6, 1]),
+ (slice(None), [0, 4, 3], [7, 6, 7]),
+ (slice(None), [3, 3, 4], [1, 6, 7]),
+ (slice(None), [0, 4, 3], [7, 7, 6]),
+ ):
+ d = cfdm.Data(a.copy())
+ d[indices] = value
+ self.assertEqual((d.array < 0).sum(), 6)
+
+ for indices in (
+ (slice(None), [3, 4, 3], [7, 6, 7]),
+ (slice(None), [4, 3, 3], [6, 7, 7]),
+ (slice(None), [3, 3, 4], [6, 7, 7]),
+ (slice(None), [3, 3, 4], [7, 7, 6]),
+ (slice(None), [4, 3, 3], [7, 7, 6]),
+ ):
+ d = cfdm.Data(a.copy())
+ d[indices] = value
+ self.assertEqual((d.array < 0).sum(), 4)
+
+ # Multiple list indices, array value + broadcasting
+ value = np.arange(3).reshape(1, 3) - 9
+
+ for indices in ((slice(None), [0, 3, 4], [1, 6, 7]),):
+ d = cfdm.Data(a.copy())
+ d[indices] = value
+ self.assertEqual((d.array < 0).sum(), 9)
+
+ # Repeated list elements
+ for indices in ((slice(None), [0, 3, 3], [7, 6, 1]),):
+ d = cfdm.Data(a.copy())
+ d[indices] = value
+ self.assertEqual((d.array < 0).sum(), 6)
+
+ for indices in ((slice(None), [4, 3, 3], [7, 7, 6]),):
+ d = cfdm.Data(a.copy())
+ d[indices] = value
+ self.assertEqual((d.array < 0).sum(), 4)
def test_Data_apply_masking(self):
"""Test the `apply_masking` Data method."""
| Incorrect data assignment with some multiple list indices
When multiple list indices are provided to a data assignment (i.e. `Data.__setitem__`), the assignment works if all of the lists are strictly monotonically increasing, but only for some cases cases where they are not:
```python
>>> import cfdm
>>> d = cfdm.example_field(0).data
>>> d[[0, 3, 4], [1, 7, 6]].array
array([[0.034, 0.029, 0.024],
[0.059, 0.017, 0.009],
[0.036, 0.013, 0.034]])
>>> d[[0, 3, 4], [1, 7, 6]] = -1 # all lists strictly monotonically increasing => OK
>>> d[[0, 3, 4], [1, 7, 6]].array
array([[-1., -1., -1.],
[-1., -1., -1.],
[-1., -1., -1.]])
```
```python
>>> import cfdm
>>> d = cfdm.example_field(0).data
>>> d[[0, 3, 4], [1, 6, 7]].array
array([[0.034, 0.029, 0.024],
[0.059, 0.017, 0.009],
[0.036, 0.013, 0.034]])
>>> d[[0, 3, 4], [1, 7, 6]] = -1
>>> d[[0, 3, 4], [1, 7, 6]].array # one list not strictly monotonically increasing => OK (edited from "not OK"!)
array([[-1., -1., -1.],
[-1., -1., -1.],
[-1., -1., -1.]])
```
```python
>>> d = cfdm.example_field(0).data
>>> d[[4, 3, 0], [7, 6, 1]] = -1
>>> d[[4, 3, 0], [7, 6, 1]].array # both lists strictly monotonically decreasing => NOT OK
array([[ 0.013, 0.034, 0.036],
[ 0.017, 0.009, 0.059],
[ 0.029, 0.024, -1. ]])
```
```python
>>> d = cfdm.example_field(0).data
>>> d[[4, 3, 0], [7, 6, 1]] = -1
>>> d[[4, 3, 0], [7, 6, 1]].array # NOT OK
array([[ 0.013, 0.034, 0.036],
[ 0.017, 0.009, 0.059],
[ 0.029, 0.024, -1. ]])
```
```python
>>> d = cfdm.example_field(0).data
>>> d[[0, 4, 3], [1, 7, 6]] = -1
>>> d[[0, 4, 3], [1, 7, 6]].array # OK, though!
array([[-1., -1., -1.],
[-1., -1., -1.],
[-1., -1., -1.]])
```
PR to follow (after cfdm has been transferred to `main`) (edit: https://github.com/davidhassell/cfdm/tree/data-setitem-multiple-list-indices-2, lest I forget)
```python
>>> cfdm.environment(paths=False)
Platform: Linux-5.14.0-1052-oem-x86_64-with-glibc2.31
HDF5 library: 1.12.1
netcdf library: 4.8.1
Python: 3.9.12
netCDF4: 1.6.0
numpy: 1.22.3
cfdm.core: 1.10.0.0
cftime: 1.6.1
netcdf_flattener: 1.2.0
cfdm: 1.10.0.0
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_Data.py::DataTest::test_Data__setitem__"
] | [
"cfdm/test/test_Data.py::DataTest::test_Data_any",
"cfdm/test/test_Data.py::DataTest::test_Data_get_index",
"cfdm/test/test_Data.py::DataTest::test_Data_datetime_array",
"cfdm/test/test_Data.py::DataTest::test_Data_unique",
"cfdm/test/test_Data.py::DataTest::test_Data__format__",
"cfdm/test/test_Data.py::DataTest::test_Data_flatten",
"cfdm/test/test_Data.py::DataTest::test_Data_get_compressed_dimension",
"cfdm/test/test_Data.py::DataTest::test_Data_equals",
"cfdm/test/test_Data.py::DataTest::test_Data_maximum_minimum_sum_squeeze",
"cfdm/test/test_Data.py::DataTest::test_Data_apply_masking",
"cfdm/test/test_Data.py::DataTest::test_Data__repr__str",
"cfdm/test/test_Data.py::DataTest::test_Data_get_count",
"cfdm/test/test_Data.py::DataTest::test_Data_orginal_filenames",
"cfdm/test/test_Data.py::DataTest::test_Data_insert_dimension",
"cfdm/test/test_Data.py::DataTest::test_Data_filled",
"cfdm/test/test_Data.py::DataTest::test_Data_transpose",
"cfdm/test/test_Data.py::DataTest::test_Data_array",
"cfdm/test/test_Data.py::DataTest::test_Data_dtype_mask",
"cfdm/test/test_Data.py::DataTest::test_Data_get_list"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2022-10-12T10:56:28Z" | mit |
|
NCAS-CMS__cfdm-243 | diff --git a/Changelog.rst b/Changelog.rst
index b18f7dd25..721dfade1 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -7,7 +7,9 @@ Version 1.10.0.2
(https://github.com/NCAS-CMS/cfdm/issues/228)
* Relocate the call to `NetCDFRead._customize_read_vars` to earlier in
`NetCDFRead.read` (https://github.com/NCAS-CMS/cfdm/issues/233)
-
+* Fixed bug that caused incorrect formula terms in output netCDF files
+ in some cases (https://github.com/NCAS-CMS/cfdm/issues/242)
+
----
Version 1.10.0.1
diff --git a/cfdm/examplefield.py b/cfdm/examplefield.py
index 7d9acfb68..6a15fbd88 100644
--- a/cfdm/examplefield.py
+++ b/cfdm/examplefield.py
@@ -466,13 +466,19 @@ def example_field(n, _implementation=_implementation):
# domain_ancillary
c = DomainAncillary()
- c.set_properties({"units": "m"})
+ c.set_properties(
+ {
+ "units": "m",
+ "computed_standard_name": "altitude",
+ "standard_name": "atmosphere_hybrid_height_coordinate",
+ }
+ )
c.nc_set_variable("a")
- data = Data([10.0], units="m", dtype="f8")
+ data = Data([20.0], units="m", dtype="f8")
c.set_data(data)
b = Bounds()
b.nc_set_variable("a_bounds")
- data = Data([[5.0, 15.0]], units="m", dtype="f8")
+ data = Data([[0, 36.6666717529297]], units="m", dtype="f8")
b.set_data(data)
c.set_bounds(b)
f.set_construct(
@@ -482,11 +488,11 @@ def example_field(n, _implementation=_implementation):
# domain_ancillary
c = DomainAncillary()
c.nc_set_variable("b")
- data = Data([20.0], dtype="f8")
+ data = Data([0.997741281986237], dtype="f8")
c.set_data(data)
b = Bounds()
b.nc_set_variable("b_bounds")
- data = Data([[14.0, 26.0]], dtype="f8")
+ data = Data([[1, 0.995860934257507]], dtype="f8")
b.set_data(data)
c.set_bounds(b)
f.set_construct(
@@ -946,16 +952,17 @@ def example_field(n, _implementation=_implementation):
c = DimensionCoordinate()
c.set_properties(
{
+ "units": "m",
"computed_standard_name": "altitude",
"standard_name": "atmosphere_hybrid_height_coordinate",
}
)
c.nc_set_variable("atmosphere_hybrid_height_coordinate")
- data = Data([1.5], dtype="f8")
+ data = Data([20.0], dtype="f8")
c.set_data(data)
b = Bounds()
b.nc_set_variable("atmosphere_hybrid_height_coordinate_bounds")
- data = Data([[1.0, 2.0]], dtype="f8")
+ data = Data([[0, 36.6666717529297]], dtype="f8")
b.set_data(data)
c.set_bounds(b)
f.set_construct(
diff --git a/cfdm/read_write/netcdf/netcdfread.py b/cfdm/read_write/netcdf/netcdfread.py
index 201fe8b7e..29aa0ef19 100644
--- a/cfdm/read_write/netcdf/netcdfread.py
+++ b/cfdm/read_write/netcdf/netcdfread.py
@@ -4569,6 +4569,9 @@ class NetCDFRead(IORead):
elif nodes:
attribute = "nodes"
+ # Make sure that the bounds attribute is removed
+ properties.pop(attribute, None)
+
if dimension:
properties.pop("compress", None)
c = self.implementation.initialise_DimensionCoordinate()
@@ -4778,7 +4781,6 @@ class NetCDFRead(IORead):
# Store the netCDF variable name
self.implementation.nc_set_variable(cell_measure, ncvar)
-
if ncvar in g["external_variables"]:
# The cell measure variable is in an unknown external file
self.implementation.nc_set_external(construct=cell_measure)
| NCAS-CMS/cfdm | 0cece5a8c04d4e1d26dd9ae50a825e4b8e1d335d | diff --git a/cfdm/test/test_Constructs.py b/cfdm/test/test_Constructs.py
index 6cae2d2a4..350e8d987 100644
--- a/cfdm/test/test_Constructs.py
+++ b/cfdm/test/test_Constructs.py
@@ -390,7 +390,7 @@ class ConstructsTest(unittest.TestCase):
e = d.inverse_filter()
self.assertEqual(len(e), len(c) - len(d))
- self.assertEqual(len(c.filter_by_property(standard_name=None)), 8)
+ self.assertEqual(len(c.filter_by_property(standard_name=None)), 9)
with self.assertRaises(ValueError):
c.filter_by_property("too many", "modes")
diff --git a/cfdm/test/test_read_write.py b/cfdm/test/test_read_write.py
index 1a2009ff4..0bc53bae2 100644
--- a/cfdm/test/test_read_write.py
+++ b/cfdm/test/test_read_write.py
@@ -7,6 +7,7 @@ import subprocess
import tempfile
import unittest
+import netCDF4
import numpy as np
faulthandler.enable() # to debug seg faults and timeouts
@@ -938,6 +939,51 @@ class read_writeTest(unittest.TestCase):
self.assertFalse(g.array.count())
self.assertTrue(g.construct("grid_latitude").array.count())
+ def test_read_write_domain_ancillary(self):
+ """Test when domain ancillary equals dimension coordinate."""
+ f = cfdm.example_field(1)
+
+ # Check the domain ancillary does indeed equal the dimension
+ # coordinate
+ self.assertTrue(
+ f.domain_ancillary("atmosphere_hybrid_height_coordinate").equals(
+ f.dimension_coordinate("atmosphere_hybrid_height_coordinate"),
+ ignore_type=True,
+ )
+ )
+
+ cfdm.write(f, tmpfile)
+ g = cfdm.read(tmpfile)
+ self.assertEqual(len(g), 1)
+ g = g[0]
+ self.assertTrue(f.equals(g))
+
+ nc = netCDF4.Dataset(tmpfile, "r")
+
+ z = nc.variables["atmosphere_hybrid_height_coordinate"]
+ # In the following test we are checking that it is not 'a: a
+ # b: b orog: surface_altitude'
+ self.assertEqual(
+ z.getncattr("formula_terms"),
+ "a: atmosphere_hybrid_height_coordinate b: b orog: surface_altitude",
+ )
+
+ zb = nc.variables["atmosphere_hybrid_height_coordinate_bounds"]
+ # In the following test we are checking that it is not 'a:
+ # a_bounds b: b orog: surface_altitude'
+ self.assertEqual(
+ zb.getncattr("formula_terms"),
+ "a: atmosphere_hybrid_height_coordinate_bounds b: b_bounds orog: surface_altitude",
+ )
+
+ nc.close()
+
+ # Test the full round trip
+ cfdm.write(g, tmpfile0)
+ h = cfdm.read(tmpfile0)
+ self.assertEqual(len(h), 1)
+ self.assertTrue(f.equals(h[0]))
+
if __name__ == "__main__":
print("Run date:", datetime.datetime.now())
| Incorrect `formula_terms` in output netCDF files in some cases
When a domain ancillary construct with bounds is the same netCDF variable as the parent dimension coordinate constructs, the domain ancillary construct is erroneously written to an output file in addition to the dimension coordinate construct.
```python
>>> import cfdm
>>> cfdm.environment(paths=False)
Platform: Linux-5.15.0-60-generic-x86_64-with-glibc2.35
HDF5 library: 1.12.1
netcdf library: 4.8.1
Python: 3.10.8
netCDF4: 1.6.0
numpy: 1.23.5
cfdm.core: 1.10.0.2
cftime: 1.6.2
netcdf_flattener: 1.2.0
cfdm: 1.10.0.2
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_property"
] | [
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_type",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_key",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_private",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_domain_axis_identity",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs__repr__str__dump",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_construct_types",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_ncdim",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_items_key_value",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_domain_axes",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_size",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_todict",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_get_data_axes",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_naxes",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_axis",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_check_construct_type",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_clear_filters_applied",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_data",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs__getitem__",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_method",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs__len__",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_identity",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_measure",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter_by_ncvar",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_filter",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_FILTERING",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_construct_type",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_copy_shallow_copy",
"cfdm/test/test_Constructs.py::ConstructsTest::test_Constructs_copy",
"cfdm/test/test_read_write.py::read_writeTest::test_read_mask",
"cfdm/test/test_read_write.py::read_writeTest::test_read_zero_length_file",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename_expansion",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename",
"cfdm/test/test_read_write.py::read_writeTest::test_write_coordinates"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2023-02-22T19:23:47Z" | mit |
|
NCAS-CMS__cfdm-247 | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ae9504987..576c555fc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -68,7 +68,7 @@ repos:
# compatible with 'black' with the lines set to ensure so in the repo's
# pyproject.toml. Other than that and the below, no extra config is required.
- repo: https://github.com/pycqa/isort
- rev: 5.8.0
+ rev: 5.12.0
hooks:
- id: isort
name: isort (python)
diff --git a/Changelog.rst b/Changelog.rst
index 9ca2645bb..7951b55e5 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -7,6 +7,8 @@ Version 1.10.0.3
(https://github.com/NCAS-CMS/cfdm/issues/241)
* New keyword parameter to `cfdm.unique_constructs`:
``ignore_properties`` (https://github.com/NCAS-CMS/cfdm/issues/240)
+* New keyword parameter to `cfdm.NetCDFArray`: ``missing_values``
+ (https://github.com/NCAS-CMS/cfdm/issues/246)
* Fixed bug that caused `cf.write` to erroneously change external
netCDF variable names (https://github.com/NCAS-CMS/cfdm/issues/244)
diff --git a/cfdm/cfdmimplementation.py b/cfdm/cfdmimplementation.py
index 497297061..5ee5f2d2e 100644
--- a/cfdm/cfdmimplementation.py
+++ b/cfdm/cfdmimplementation.py
@@ -2168,6 +2168,7 @@ class CFDMImplementation(Implementation):
mask=True,
units=False,
calendar=None,
+ missing_values=None,
):
"""Return a netCDF array instance.
@@ -2203,9 +2204,15 @@ class CFDMImplementation(Implementation):
.. versionadded:: (cfdm) 1.10.0.2
+ missing_values: `dict`, optional
+ The missing value indicators defined by the netCDF
+ variable attributes.
+
+ .. versionadded:: (cfdm) 1.10.0.3
+
:Returns:
- NetCDF array instance
+ `NetCDFArray`
"""
cls = self.get_class("NetCDFArray")
@@ -2220,6 +2227,7 @@ class CFDMImplementation(Implementation):
mask=mask,
units=units,
calendar=calendar,
+ missing_values=missing_values,
)
def initialise_NodeCountProperties(self):
diff --git a/cfdm/data/netcdfarray.py b/cfdm/data/netcdfarray.py
index 264b579f3..01d2123b6 100644
--- a/cfdm/data/netcdfarray.py
+++ b/cfdm/data/netcdfarray.py
@@ -25,6 +25,7 @@ class NetCDFArray(abstract.Array):
mask=True,
units=False,
calendar=False,
+ missing_values=None,
source=None,
copy=True,
):
@@ -107,6 +108,13 @@ class NetCDFArray(abstract.Array):
.. versionadded:: (cfdm) 1.10.0.1
+ missing_values: `dict`, optional
+ The missing value indicators defined by the netCDF
+ variable attributes. See `get_missing_values` for
+ details.
+
+ .. versionadded:: (cfdm) 1.10.0.3
+
{{init source: optional}}
.. versionadded:: (cfdm) 1.10.0.0
@@ -164,6 +172,11 @@ class NetCDFArray(abstract.Array):
except AttributeError:
calendar = False
+ try:
+ missing_values = source._get_component("missing_values", None)
+ except AttributeError:
+ missing_values = None
+
if shape is not None:
self._set_component("shape", shape, copy=False)
@@ -176,6 +189,11 @@ class NetCDFArray(abstract.Array):
if varid is not None:
self._set_component("varid", varid, copy=False)
+ if missing_values is not None:
+ self._set_component(
+ "missing_values", missing_values.copy(), copy=False
+ )
+
self._set_component("group", group, copy=False)
self._set_component("dtype", dtype, copy=False)
self._set_component("mask", mask, copy=False)
@@ -454,6 +472,41 @@ class NetCDFArray(abstract.Array):
"""
return self._get_component("mask")
+ def get_missing_values(self):
+ """The missing value indicators from the netCDF variable.
+
+ .. versionadded:: (cfdm) 1.10.0.3
+
+ :Returns:
+
+ `dict` or `None`
+ The missing value indicators from the netCDF variable,
+ keyed by their netCDF attribute names. An empty
+ dictionary signifies that no missing values are given
+ in the file. `None` signifies that the missing values
+ have not been set.
+
+ **Examples**
+
+ >>> a.get_missing_values()
+ None
+
+ >>> b.get_missing_values()
+ {}
+
+ >>> c.get_missing_values()
+ {'missing_value': 1e20, 'valid_range': (-10, 20)}
+
+ >>> d.get_missing_values()
+ {'valid_min': -999}
+
+ """
+ out = self._get_component("missing_values", None)
+ if out is None:
+ return
+
+ return out.copy()
+
def get_ncvar(self):
"""The name of the netCDF variable containing the array.
diff --git a/cfdm/read_write/netcdf/netcdfread.py b/cfdm/read_write/netcdf/netcdfread.py
index 29aa0ef19..cbe0c1703 100644
--- a/cfdm/read_write/netcdf/netcdfread.py
+++ b/cfdm/read_write/netcdf/netcdfread.py
@@ -5378,6 +5378,26 @@ class NetCDFRead(IORead):
# TODO: think using e.g. '/forecasts/model1' has the value for
# nc_set_variable. What about nc_set_dimension?
+ # Store the missing value indicators
+ missing_values = {}
+ for attr in (
+ "missing_value",
+ "_FillValue",
+ "valid_min",
+ "valid_max",
+ "valid_range",
+ ):
+ value = getattr(variable, attr, None)
+ if value is not None:
+ missing_values[attr] = value
+
+ valid_range = missing_values.get("valid_range")
+ if valid_range is not None:
+ try:
+ missing_values["valid_range"] = tuple(valid_range)
+ except TypeError:
+ pass
+
kwargs = {
"filename": filename,
"shape": shape,
@@ -5387,16 +5407,13 @@ class NetCDFRead(IORead):
"group": group,
"units": units,
"calendar": calendar,
+ "missing_values": missing_values,
}
if return_kwargs_only:
return kwargs
- array = self.implementation.initialise_NetCDFArray(
- ndim=ndim, # TODO: Can we get rid of this?
- size=size, # TODO: Can we get rid of this?
- **kwargs,
- )
+ array = self.implementation.initialise_NetCDFArray(**kwargs)
return array, kwargs
| NCAS-CMS/cfdm | 1698508adff6148f782a1123be7f3cbd1ecd39f7 | diff --git a/cfdm/test/test_NetCDFArray.py b/cfdm/test/test_NetCDFArray.py
new file mode 100644
index 000000000..8bd75d697
--- /dev/null
+++ b/cfdm/test/test_NetCDFArray.py
@@ -0,0 +1,78 @@
+import atexit
+import datetime
+import faulthandler
+import os
+import tempfile
+import unittest
+
+faulthandler.enable() # to debug seg faults and timeouts
+
+import cfdm
+
+n_tmpfiles = 1
+tmpfiles = [
+ tempfile.mkstemp("_test_netCDF.nc", dir=os.getcwd())[1]
+ for i in range(n_tmpfiles)
+]
+(tmpfile,) = tmpfiles
+
+
+def _remove_tmpfiles():
+ """Remove temporary files created during tests."""
+ for f in tmpfiles:
+ try:
+ os.remove(f)
+ except OSError:
+ pass
+
+
+atexit.register(_remove_tmpfiles)
+
+
+class NetCDFTest(unittest.TestCase):
+ """Unit test for the NetCDF class."""
+
+ def setUp(self):
+ """Preparations called immediately before each test method."""
+ # Disable log messages to silence expected warnings
+ cfdm.log_level("DISABLE")
+ # Note: to enable all messages for given methods, lines or
+ # calls (those without a 'verbose' option to do the same)
+ # e.g. to debug them, wrap them (for methods, start-to-end
+ # internally) as follows:
+ #
+ # cfdm.log_level('DEBUG')
+ # < ... test code ... >
+ # cfdm.log_level('DISABLE')
+
+ def test_NetCDFArray_get_missing_values(self):
+ """Test NetCDFArray.get_missing_values."""
+ f = cfdm.example_field(0)
+
+ f.set_property("missing_value", -999)
+ f.set_property("_FillValue", -3)
+ f.set_property("valid_range", [-111, 222])
+ cfdm.write(f, tmpfile)
+
+ g = cfdm.read(tmpfile)[0]
+ self.assertEqual(
+ g.data.source().get_missing_values(),
+ {
+ "missing_value": -999.0,
+ "_FillValue": -3,
+ "valid_range": (-111, 222),
+ },
+ )
+
+ c = g.coordinate("latitude")
+ self.assertEqual(c.data.source().get_missing_values(), {})
+
+ a = cfdm.NetCDFArray("file.nc", "ncvar")
+ self.assertIsNone(a.get_missing_values())
+
+
+if __name__ == "__main__":
+ print("Run date:", datetime.datetime.now())
+ cfdm.environment()
+ print("")
+ unittest.main(verbosity=2)
| Allow file missing data indicator values to be stored on `NetCDFArray` instances
It can be useful to applications to know the missing data indicator without opening the netCDF file. This is the case for active storage reductions currently being implemented in cf-python.
It is almost always the case that the netCDF file is open and available when the `NetCDFArray` instance is created, so it is easy for these to be stored on the object if desired, e.g.
```python
>>> n = NetCDFArray(
... 'file.nc', 'ncvar', missing_values={"missing_value": -999.0, "valid_range": (-111, 222)}
... )
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_NetCDFArray.py::NetCDFTest::test_NetCDFArray_get_missing_values"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-03-03T12:34:34Z" | mit |
|
NCAS-CMS__cfdm-250 | diff --git a/Changelog.rst b/Changelog.rst
index 05fef7152..0987737fc 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -1,3 +1,14 @@
+Version 1.10.?.?
+----------------
+
+**2023-??-??**
+
+* Fixed bug that caused `cf.Data.second_element` to fail for some data
+ shapes, namely for a final axis with size one.
+ (https://github.com/NCAS-CMS/cfdm/issues/249)
+
+----
+
Version 1.10.0.3
----------------
diff --git a/cfdm/data/data.py b/cfdm/data/data.py
index fb08dc004..39acd7570 100644
--- a/cfdm/data/data.py
+++ b/cfdm/data/data.py
@@ -3006,9 +3006,7 @@ class Data(Container, NetCDFHDF5, Files, core.Data):
bar <class 'str'>
"""
- return self._item(
- (slice(0, 1, 1),) * (self.ndim - 1) + (slice(1, 2, 1),)
- )
+ return self._item(np.unravel_index(1, self.shape))
@_inplace_enabled(default=False)
def to_memory(self, inplace=False):
| NCAS-CMS/cfdm | 55a5f5bc8ec0a231d403c2284634339342104f34 | diff --git a/cfdm/test/test_Data.py b/cfdm/test/test_Data.py
index 940a0d585..80a52610b 100644
--- a/cfdm/test/test_Data.py
+++ b/cfdm/test/test_Data.py
@@ -730,6 +730,21 @@ class DataTest(unittest.TestCase):
with self.assertRaises(ValueError):
d._original_filenames(update="file4.nc", clear=True)
+ def test_Data_first_element(self):
+ """Test the `first_element` Data methods."""
+ d = cfdm.Data(np.arange(6).reshape(1, 6, 1))
+ self.assertEqual(d.first_element(), 0)
+
+ def test_Data_second_element(self):
+ """Test the `second_element` Data methods."""
+ d = cfdm.Data(np.arange(6).reshape(1, 6, 1))
+ self.assertEqual(d.second_element(), 1)
+
+ def test_Data_last_element(self):
+ """Test the `last_element` Data methods."""
+ d = cfdm.Data(np.arange(6).reshape(1, 6, 1))
+ self.assertEqual(d.last_element(), 5)
+
if __name__ == "__main__":
print("Run date:", datetime.datetime.now())
| Error from `Data.second_element` for some data shapes
For some `Data` shapes, the retrieval of the second element causes an error:
```python
>>> import cfdm
>>> import numpy as np
>>> d = cfdm.Data(np.arange(6).reshape(1, 3, 2)) # This shape is OK
>>> d.array
array([[[0],
[1],
[2],
[3],
[4],
[5]]])
>>> d.second_element()
1
>>> d = cfdm.Data(np.arange(6).reshape(1, 6, 1)) # Not OK
>>> d.second_element()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[51], line 1
----> 1 d.second_element()
File ~/cfdm/cfdm/data/data.py:3009, in Data.second_element(self)
2981 def second_element(self):
2982 """Return the second element of the data as a scalar.
2983
2984 .. versionadded:: (cfdm) 1.7.0
(...)
3007
3008 """
-> 3009 return self._item(
3010 (slice(0, 1, 1),) * (self.ndim - 1) + (slice(1, 2, 1),)
3011 )
File ~/cfdm/cfdm/data/data.py:655, in Data._item(self, index)
652 array = self[index].array
654 if not numpy.ma.isMA(array):
--> 655 return array.item()
657 mask = array.mask
658 if mask is numpy.ma.nomask or not mask.item():
ValueError: can only convert an array of size 1 to a Python scalar
```
```python
>>> cfdm.environment(paths=False)
Platform: Linux-5.15.0-67-generic-x86_64-with-glibc2.35
HDF5 library: 1.12.1
netcdf library: 4.8.1
Python: 3.10.9
netCDF4: 1.6.0
numpy: 1.22.3
cfdm.core: 1.10.0.3
cftime: 1.6.2
netcdf_flattener: 1.2.0
cfdm: 1.10.0.3
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_Data.py::DataTest::test_Data_second_element"
] | [
"cfdm/test/test_Data.py::DataTest::test_Data__format__",
"cfdm/test/test_Data.py::DataTest::test_Data__repr__str",
"cfdm/test/test_Data.py::DataTest::test_Data__setitem__",
"cfdm/test/test_Data.py::DataTest::test_Data_any",
"cfdm/test/test_Data.py::DataTest::test_Data_apply_masking",
"cfdm/test/test_Data.py::DataTest::test_Data_array",
"cfdm/test/test_Data.py::DataTest::test_Data_datetime_array",
"cfdm/test/test_Data.py::DataTest::test_Data_dtype_mask",
"cfdm/test/test_Data.py::DataTest::test_Data_equals",
"cfdm/test/test_Data.py::DataTest::test_Data_filled",
"cfdm/test/test_Data.py::DataTest::test_Data_first_element",
"cfdm/test/test_Data.py::DataTest::test_Data_flatten",
"cfdm/test/test_Data.py::DataTest::test_Data_get_compressed_dimension",
"cfdm/test/test_Data.py::DataTest::test_Data_get_count",
"cfdm/test/test_Data.py::DataTest::test_Data_get_index",
"cfdm/test/test_Data.py::DataTest::test_Data_get_list",
"cfdm/test/test_Data.py::DataTest::test_Data_insert_dimension",
"cfdm/test/test_Data.py::DataTest::test_Data_last_element",
"cfdm/test/test_Data.py::DataTest::test_Data_maximum_minimum_sum_squeeze",
"cfdm/test/test_Data.py::DataTest::test_Data_orginal_filenames",
"cfdm/test/test_Data.py::DataTest::test_Data_transpose",
"cfdm/test/test_Data.py::DataTest::test_Data_unique"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2023-03-15T17:55:11Z" | mit |
|
NCAS-CMS__cfdm-263 | diff --git a/Changelog.rst b/Changelog.rst
index 73d3efdea..ff7f4a697 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -6,6 +6,8 @@ Version 1.10.1.1
* Fix bug that caused very slow reads of datasets with compession by
gathering or DSG ragged arrays
(https://github.com/NCAS-CMS/cfdm/issues/260)
+* Fix bug that prevented `cfdm.read` from accessing remote files in URL
+ locations (https://github.com/NCAS-CMS/cfdm/issues/262)
----
diff --git a/cfdm/read_write/netcdf/netcdfread.py b/cfdm/read_write/netcdf/netcdfread.py
index ad6e1baf9..c3c3c8c75 100644
--- a/cfdm/read_write/netcdf/netcdfread.py
+++ b/cfdm/read_write/netcdf/netcdfread.py
@@ -8,6 +8,7 @@ import tempfile
from ast import literal_eval
from copy import deepcopy
from functools import reduce
+from urllib.parse import urlparse
import netCDF4
import netcdf_flattener
@@ -584,6 +585,66 @@ class NetCDFRead(IORead):
return cdl
+ @classmethod
+ def is_file(cls, filename):
+ """Return `True` if *filename* is a file.
+
+ Note that a remote URL starting with ``http://`` or
+ ``https://`` is always considered as a file.
+
+ .. versionadded:: (cfdm) 1.10.1.1
+
+ :Parameters:
+
+ filename: `str`
+ The name of the file.
+
+ :Returns:
+
+ `bool`
+ Whether or not *filename* is a file.
+
+ **Examples**
+
+ >>> {{package}}.{{class}}.is_file('file.nc')
+ True
+ >>> {{package}}.{{class}}.is_file('http://file.nc')
+ True
+ >>> {{package}}.{{class}}.is_file('https://file.nc')
+ True
+
+ """
+ # Assume that URLs are files
+ u = urlparse(filename)
+ if u.scheme in ("http", "https"):
+ return True
+
+ return os.path.isfile(filename)
+
+ @classmethod
+ def is_dir(cls, filename):
+ """Return `True` if *filename* is a directory.
+
+ .. versionadded:: (cfdm) 1.10.1.1
+
+ :Parameters:
+
+ filename: `str`
+ The name of the file.
+
+ :Returns:
+
+ `bool`
+ Whether or not *filename* is a directory.
+
+ **Examples**
+
+ >>> {{package}}.{{class}}.is_dir('file.nc')
+ False
+
+ """
+ return os.path.isdir(filename)
+
def default_netCDF_fill_value(self, ncvar):
"""The default netCDF fill value for a variable.
@@ -803,10 +864,10 @@ class NetCDFRead(IORead):
filename = os.path.expanduser(os.path.expandvars(filename))
- if os.path.isdir(filename):
+ if self.is_dir(filename):
raise IOError(f"Can't read directory {filename}")
- if not os.path.isfile(filename):
+ if not self.is_file(filename):
raise IOError(f"Can't read non-existent file {filename}")
g["filename"] = filename
diff --git a/cfdm/read_write/read.py b/cfdm/read_write/read.py
index 4d8ec8311..7207e6323 100644
--- a/cfdm/read_write/read.py
+++ b/cfdm/read_write/read.py
@@ -294,6 +294,9 @@ def read(
>>> j = cfdm.read('parent.nc', external=['external1.nc', 'external2.nc'])
"""
+ # Initialise a netCDF read object
+ netcdf = NetCDFRead(_implementation)
+
# Parse the field parameter
if extra is None:
extra = ()
@@ -302,19 +305,15 @@ def read(
filename = os.path.expanduser(os.path.expandvars(filename))
- if os.path.isdir(filename):
+ if netcdf.is_dir(filename):
raise IOError(f"Can't read directory {filename}")
- if not os.path.isfile(filename):
+ if not netcdf.is_file(filename):
raise IOError(f"Can't read non-existent file {filename}")
# ----------------------------------------------------------------
# Read the file into field/domain contructs
# ----------------------------------------------------------------
-
- # Initialise a netCDF read object
- netcdf = NetCDFRead(_implementation)
-
cdl = False
if netcdf.is_cdl_file(filename):
# Create a temporary netCDF file from input CDL
| NCAS-CMS/cfdm | 55572dac1b57fd06e97300577c8c8b1e0b6bcab1 | diff --git a/cfdm/test/test_read_write.py b/cfdm/test/test_read_write.py
index 069522b35..ec27b0207 100644
--- a/cfdm/test/test_read_write.py
+++ b/cfdm/test/test_read_write.py
@@ -983,6 +983,15 @@ class read_writeTest(unittest.TestCase):
self.assertEqual(len(h), 1)
self.assertTrue(f.equals(h[0]))
+ def test_read_url(self):
+ """Test reading urls."""
+ remote = "http://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc"
+ # Check that the file remote is indeed accesible
+ _ = netCDF4.Dataset(remote, "r")
+ # Check that cfdm can access it
+ f = cfdm.read(remote)
+ self.assertEqual(len(f), 1)
+
if __name__ == "__main__":
print("Run date:", datetime.datetime.now())
| Reading of `http://` remote files does not work
In cfdm `1.10.1.0` we get:
```python
>>> remote_file = 'http://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc'
>>> import netCDF4
>>> nc = netCDF4.Dataset(remote_file, 'r') # works
>>> import cfdm
>>> j = cfdm.read(remote_file) # doesn't work
<snip>
packages/cfdm/read_write/netcdf/netcdfread.py:810, in NetCDFRead.read(self, filename, extra, default_version, external, extra_read_vars, _scan_only, verbose, mask, warnings, warn_valid, domain)
807 raise IOError(f"Can't read directory {filename}")
809 if not os.path.isfile(filename):
--> 810 raise IOError(f"Can't read non-existent file {filename}")
812 g["filename"] = filename
814 # ------------------------------------------------------------
815 # Open the netCDF file to be read
816 # ------------------------------------------------------------
OSError: Can't read non-existent file http://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc
```
The problem is that cfdm is testing `remote_file` for being a file with `os.path.isfile`, which returns `False` for remote URLs.
PR to follow. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_read_write.py::read_writeTest::test_read_url"
] | [
"cfdm/test/test_read_write.py::read_writeTest::test_read_zero_length_file",
"cfdm/test/test_read_write.py::read_writeTest::test_read_mask",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename_expansion",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename",
"cfdm/test/test_read_write.py::read_writeTest::test_write_coordinates"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2023-06-19T13:59:13Z" | mit |
|
NCAS-CMS__cfdm-269 | diff --git a/Changelog.rst b/Changelog.rst
index e4e178769..5cf7d255c 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -1,3 +1,13 @@
+Version 1.10.1.2
+----------------
+
+**2023-08-31**
+
+* Fix bug that prevented "https://" netCDF files from being read
+ (https://github.com/NCAS-CMS/cfdm/issues/268)
+
+----
+
Version 1.10.1.1
----------------
diff --git a/cfdm/read_write/netcdf/netcdfread.py b/cfdm/read_write/netcdf/netcdfread.py
index c3c3c8c75..e6a6295cd 100644
--- a/cfdm/read_write/netcdf/netcdfread.py
+++ b/cfdm/read_write/netcdf/netcdfread.py
@@ -481,8 +481,10 @@ class NetCDFRead(IORead):
def is_netcdf_file(cls, filename):
"""Return `True` if the file is a netCDF file.
- Note that the file type is determined by inspecting the file's
- contents and any file suffix is not not considered.
+ The file type is determined by inspecting the file's contents
+ and any file suffix is not not considered. However, file names
+ starting ``https://`` or ``http://`` are assumed, without
+ checking, to be netCDF files.
:Parameters:
@@ -501,7 +503,7 @@ class NetCDFRead(IORead):
"""
# Assume that URLs are in netCDF format
- if filename.startswith("http://"):
+ if filename.startswith("https://") or filename.startswith("http://"):
return True
# Read the magic number
| NCAS-CMS/cfdm | 142accf27fbbc052473b4eee47daf0e81c88df3a | diff --git a/cfdm/test/test_read_write.py b/cfdm/test/test_read_write.py
index ec27b0207..e60779cb7 100644
--- a/cfdm/test/test_read_write.py
+++ b/cfdm/test/test_read_write.py
@@ -985,12 +985,11 @@ class read_writeTest(unittest.TestCase):
def test_read_url(self):
"""Test reading urls."""
- remote = "http://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc"
- # Check that the file remote is indeed accesible
- _ = netCDF4.Dataset(remote, "r")
- # Check that cfdm can access it
- f = cfdm.read(remote)
- self.assertEqual(len(f), 1)
+ for scheme in ("http", "https"):
+ remote = f"{scheme}://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc"
+ # Check that cfdm can access it
+ f = cfdm.read(remote)
+ self.assertEqual(len(f), 1)
if __name__ == "__main__":
| Reading `https://` netCDF files does not work
At version 1.10.1.1, reading a `https://` files does not work, but reading an `http://` file does work:
```python
>>> import cfdm
>>> remote = "http://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc"
>>> cfdm.read(remote)
[<Field: air_temperature_anomaly(time(2082), latitude(36), longitude(72)) observations>]
>>> remote = "https://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc"
>>> cfdm.read(remote)
--------------------------------------------------------------------------
OSError Traceback (most recent call last)
Cell In[3], line 1
----> 1 f = cfdm.read(remote)
File ~/cfdm/cfdm/read_write/read.py:360, in read(filename, external, extra, verbose, warnings, warn_valid, mask, domain, _implementation)
355 raise IOError(
356 f"Can't determine format of file {filename} "
357 f"generated from CDL file {cdl_filename}"
358 )
359 else:
--> 360 raise IOError(f"Can't determine format of file {filename}")
362 # ----------------------------------------------------------------
363 # Return the field or domain constructs
364 # ----------------------------------------------------------------
365 return fields
OSError: Can't determine format of file https://psl.noaa.gov/thredds/dodsC/Datasets/cru/crutem5/Monthlies/air.mon.anom.nobs.nc
```
This is due to a simple bug in `cfdm.read_write.netcdf.netcdfread.is_netcdf_file`. PR to follow. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_read_write.py::read_writeTest::test_read_url"
] | [
"cfdm/test/test_read_write.py::read_writeTest::test_read_mask",
"cfdm/test/test_read_write.py::read_writeTest::test_read_zero_length_file",
"cfdm/test/test_read_write.py::read_writeTest::test_write_coordinates",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename_expansion"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2023-08-30T08:33:32Z" | mit |
|
NCAS-CMS__cfdm-288 | diff --git a/Changelog.rst b/Changelog.rst
index d5c425fe2..f028a514f 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -1,3 +1,13 @@
+Version 1.11.1.0
+----------------
+
+**2024-??-??**
+
+* New keyword parameter to `cfdm.Field.insert_dimension`:
+ ``constructs`` (https://github.com/NCAS-CMS/cfdm/issues/287)
+
+----
+
Version 1.11.0.0
----------------
diff --git a/cfdm/field.py b/cfdm/field.py
index 5b16ce8eb..90ba5a5f3 100644
--- a/cfdm/field.py
+++ b/cfdm/field.py
@@ -7,6 +7,7 @@ from . import (
Constructs,
Count,
Domain,
+ DomainAxis,
Index,
List,
core,
@@ -105,6 +106,7 @@ class Field(
instance._AuxiliaryCoordinate = AuxiliaryCoordinate
instance._Constructs = Constructs
instance._Domain = Domain
+ instance._DomainAxis = DomainAxis
instance._RaggedContiguousArray = RaggedContiguousArray
instance._RaggedIndexedArray = RaggedIndexedArray
instance._RaggedIndexedContiguousArray = RaggedIndexedContiguousArray
@@ -1032,8 +1034,6 @@ class Field(
return self._RaggedContiguousArray(
compressed_data,
shape=data.shape,
- # size=data.size,
- # ndim=data.ndim,
count_variable=count_variable,
)
@@ -1041,8 +1041,6 @@ class Field(
return self._RaggedIndexedArray(
compressed_data,
shape=data.shape,
- # size=data.size,
- # ndim=data.ndim,
index_variable=index_variable,
)
@@ -1052,8 +1050,6 @@ class Field(
return self._RaggedIndexedContiguousArray(
compressed_data,
shape=data.shape,
- # size=data.size,
- # ndim=data.ndim,
count_variable=count_variable,
index_variable=index_variable,
)
@@ -1173,6 +1169,37 @@ class Field(
y = Array_func(f, compressed_data, data=data, **kwargs)
data._set_CompressedArray(y, copy=False)
+ def _derive_count(flattened_data):
+ """Derive the DSG count for each feature.
+
+ :Parameters:
+
+ flattened_data: array_like
+ The 2-d flattened array from which to derive the
+ counts. The leading dimension is the number of
+ features.
+
+ :Returns:
+
+ `list`
+ The count for each feature.
+
+ """
+ count = []
+ masked = np.ma.masked
+ for d in flattened_data:
+ d = d.array
+ last = d.size
+ for i in d[::-1]:
+ if i is not masked:
+ break
+
+ last -= 1
+
+ count.append(last)
+
+ return count
+
f = _inplace_enabled_define_and_cleanup(self)
data = f.get_data(None)
@@ -1224,18 +1251,25 @@ class Field(
# --------------------------------------------------------
flattened_data = data.flatten(range(data.ndim - 1))
- count = []
- masked = np.ma.masked
- for d in flattened_data:
- d = d.array
- last = d.size
- for i in d[::-1]:
- if i is not masked:
- break
+ # Try to get the counts from an auxiliary coordinate
+ # construct that spans the same axes as the field data
+ count = None
+ data_axes = f.get_data_axes()
+ construct_axes = f.constructs.data_axes()
+ for key, c in (
+ f.auxiliary_coordinates().filter_by_data(todict=True).items()
+ ):
+ if construct_axes[key] != data_axes:
+ continue
- last -= 1
+ count = _derive_count(c.data.flatten(range(c.ndim - 1)))
+ break
- count.append(last)
+ if count is None:
+ # When no auxiliary coordinate constructs span the
+ # field data dimensions, get the counts from the field
+ # data.
+ count = _derive_count(flattened_data)
N = sum(count)
compressed_field_data = _empty_compressed_data(data, (N,))
@@ -2081,7 +2115,9 @@ class Field(
return tuple([indices[axis] for axis in self.get_data_axes()])
@_inplace_enabled(default=False)
- def insert_dimension(self, axis, position=0, inplace=False):
+ def insert_dimension(
+ self, axis, position=0, constructs=False, inplace=False
+ ):
"""Expand the shape of the data array.
Inserts a new size 1 axis, corresponding to an existing domain
@@ -2097,6 +2133,9 @@ class Field(
The identifier of the domain axis construct
corresponding to the inserted axis.
+ If *axis* is `None` then a new domain axis construct
+ will be created for the inserted dimension.
+
*Parameter example:*
``axis='domainaxis2'``
@@ -2112,6 +2151,13 @@ class Field(
*Parameter example:*
``position=-1``
+ constructs: `bool`
+ If True then also insert the new axis into all
+ metadata constructs that don't already include it. By
+ default, metadata constructs are not changed.
+
+ .. versionadded:: (cfdm) 1.11.1.0
+
{{inplace: `bool`, optional}}
:Returns:
@@ -2132,22 +2178,30 @@ class Field(
(19, 73, 1, 96)
>>> f.data.shape
(19, 73, 1, 96)
+ >>> f.insert_dimension(None, 1).data.shape
+ (19, 1, 73, 1, 96)
"""
f = _inplace_enabled_define_and_cleanup(self)
- domain_axis = f.domain_axes(todict=True).get(axis)
- if domain_axis is None:
- raise ValueError(f"Can't insert non-existent domain axis: {axis}")
-
- if domain_axis.get_size() != 1:
- raise ValueError(
- f"Can only insert axis of size 1. Axis {axis!r} has size "
- f"{domain_axis.get_size()}"
+ if axis is None:
+ axis = f.set_construct(self._DomainAxis(1))
+ else:
+ axis, domain_axis = f.domain_axis(
+ axis,
+ item=True,
+ default=ValueError("Can't identify a unique axis to insert"),
)
+ if domain_axis.get_size() != 1:
+ raise ValueError(
+ f"Can only insert axis of size 1. Axis {axis!r} has size "
+ f"{domain_axis.get_size()}"
+ )
+
data_axes = f.get_data_axes(default=None)
if data_axes is not None:
+ data_axes0 = data_axes[:]
if axis in data_axes:
raise ValueError(
f"Can't insert a duplicate data array axis: {axis!r}"
@@ -2159,9 +2213,44 @@ class Field(
# Expand the dims in the field's data array
super(Field, f).insert_dimension(position, inplace=True)
+ # Update the axes
if data_axes is not None:
f.set_data_axes(data_axes)
+ if constructs:
+ if data_axes is None:
+ data_axes0 = []
+ position = 0
+
+ for key, construct in f.constructs.filter_by_data(
+ todict=True
+ ).items():
+ data = construct.get_data(
+ None, _units=False, _fill_value=False
+ )
+ if data is None:
+ continue
+
+ construct_axes = list(f.get_data_axes(key))
+ if axis in construct_axes:
+ continue
+
+ # Find the position of the new axis
+ c_position = position
+ for a in data_axes0:
+ if a not in construct_axes:
+ c_position -= 1
+
+ if c_position < 0:
+ c_position = 0
+
+ # Expand the dims in the construct's data array
+ construct.insert_dimension(c_position, inplace=True)
+
+ # Update the construct axes
+ construct_axes.insert(c_position, axis)
+ f.set_data_axes(axes=construct_axes, key=key)
+
return f
def convert(self, *identity, full_domain=True, **filter_kwargs):
| NCAS-CMS/cfdm | 92f8fdd4b9cdcea27479b3467988f1b453946427 | diff --git a/cfdm/test/test_Field.py b/cfdm/test/test_Field.py
index 200dc441f..cc6ab7b22 100644
--- a/cfdm/test/test_Field.py
+++ b/cfdm/test/test_Field.py
@@ -539,8 +539,8 @@ class FieldTest(unittest.TestCase):
f.set_construct(cfdm.DomainAxis(0), key="")
self.assertTrue(f.has_construct(""))
- def test_Field_squeeze_transpose_insert_dimension(self):
- """Test squeeze, transpose and `insert_dimension` methods."""
+ def test_Field_squeeze_transpose(self):
+ """Test squeeze and transpose methods."""
f = self.f1
g = f.transpose()
@@ -555,6 +555,9 @@ class FieldTest(unittest.TestCase):
(g.get_data_axes(), f.get_data_axes()),
)
+ def test_Field_insert_dimension(self):
+ """Test cfdm.Field.insert_dimension method."""
+ f = self.f1
g = f.copy()
key = g.set_construct(cfdm.DomainAxis(1))
@@ -567,6 +570,10 @@ class FieldTest(unittest.TestCase):
self.assertEqual(h.data.ndim, f.data.ndim + 1)
self.assertEqual(h.get_data_axes()[:-1], f.get_data_axes())
+ self.assertEqual(g.cell_measure().ndim, 2)
+ h = g.insert_dimension(None, constructs=True)
+ self.assertEqual(h.cell_measure().ndim, 3)
+
def test_Field_compress_uncompress(self):
"""Test the compress and uncompress Field methods."""
contiguous = os.path.join(
| Allow discrete sampling geometries with 1-d data to be written as ragged arrays, and improve the compression process
Currently, a 1-d DSG can not be compressed so that it is written out to netCDF file as a ragged array. E.g.
```python
>>> print(dsg)
Field: mole_fraction_of_ozone_in_air (ncvar%O3_TECO)
----------------------------------------------------
Data : mole_fraction_of_ozone_in_air(ncdim%obs(11160)) ppb
Auxiliary coords: time(ncdim%obs(11160)) = [2017-07-03 11:15:07, ..., 2017-07-03 14:21:06] standard
: altitude(ncdim%obs(11160)) = [2577.927001953125, ..., 151.16905212402344] m
: air_pressure(ncdim%obs(11160)) = [751.6758422851562, ..., 1006.53076171875] hPa
: latitude(ncdim%obs(11160)) = [52.56147766113281, ..., 52.0729866027832] degree_north
: longitude(ncdim%obs(11160)) = [0.3171832859516144, ..., -0.6249311566352844] degree_east
: cf_role=trajectory_id(cf_role=trajectory_id(1)) = [STANCO]
```
This can be solved be making it possible to insert the `cf_role=trajectory_id` dimension into the data _and_ appropriate metadata constructs, so it would look like (note that the `cf_role=trajectory_id` construct remains unchanged):
```python
Field: mole_fraction_of_ozone_in_air (ncvar%O3_TECO)
----------------------------------------------------
Data : mole_fraction_of_ozone_in_air(cf_role=trajectory_id(1), ncdim%obs(11160)) ppb
Auxiliary coords: time(cf_role=trajectory_id(1), ncdim%obs(11160)) = [[2017-07-03 11:15:07, ..., 2017-07-03 14:21:06]] standard
: altitude(cf_role=trajectory_id(1), ncdim%obs(11160)) = [[2577.927001953125, ..., 151.16905212402344]] m
: air_pressure(cf_role=trajectory_id(1), ncdim%obs(11160)) = [[751.6758422851562, ..., 1006.53076171875]] hPa
: latitude(cf_role=trajectory_id(1), ncdim%obs(11160)) = [[52.56147766113281, ..., 52.0729866027832]] degree_north
: longitude(cf_role=trajectory_id(1), ncdim%obs(11160)) = [[0.3171832859516144, ..., -0.6249311566352844]] degree_east
: cf_role=trajectory_id(cf_role=trajectory_id(1)) = [STANCO]
```
This can be done be add a `constructs` keyword to `cf.Field.insert_dimension` that works in the same was as the same keyword on `cf.Field.transpose`.
_Edit: To be clear, this is about allowing a manipulation that turns a 1-d DSG into a 2-d one!_
Whilst we're at it, the compression process in `cf.Field.compress` could be improved, to avoid the following situation: If the data contains trailing missing values at positions where there are non-missing coordinate values, then those non-missing coordinate values are currently lost.
PR to follow. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_Field.py::FieldTest::test_Field_insert_dimension"
] | [
"cfdm/test/test_Field.py::FieldTest::test_Field_CONSTRUCTS",
"cfdm/test/test_Field.py::FieldTest::test_Field_del_construct",
"cfdm/test/test_Field.py::FieldTest::test_Field_field_ancillary",
"cfdm/test/test_Field.py::FieldTest::test_Field_set_get_del_has_data",
"cfdm/test/test_Field.py::FieldTest::test_Field_domain_ancillary",
"cfdm/test/test_Field.py::FieldTest::test_Field_coordinate",
"cfdm/test/test_Field.py::FieldTest::test_Field_bounds",
"cfdm/test/test_Field.py::FieldTest::test_Field_domain_axis",
"cfdm/test/test_Field.py::FieldTest::test_Field_convert",
"cfdm/test/test_Field.py::FieldTest::test_Field_construct_item",
"cfdm/test/test_Field.py::FieldTest::test_Field__repr__str__dump_construct_type",
"cfdm/test/test_Field.py::FieldTest::test_Field_cell_method",
"cfdm/test/test_Field.py::FieldTest::test_Field_data_axes",
"cfdm/test/test_Field.py::FieldTest::test_Field_squeeze_transpose",
"cfdm/test/test_Field.py::FieldTest::test_Field__init__",
"cfdm/test/test_Field.py::FieldTest::test_Field_coordinate_reference",
"cfdm/test/test_Field.py::FieldTest::test_Field_has_geometry",
"cfdm/test/test_Field.py::FieldTest::test_Field_indices",
"cfdm/test/test_Field.py::FieldTest::test_Field_dimension_coordinate",
"cfdm/test/test_Field.py::FieldTest::test_Field___getitem__",
"cfdm/test/test_Field.py::FieldTest::test_Field_del_properties",
"cfdm/test/test_Field.py::FieldTest::test_Field_get_original_filenames",
"cfdm/test/test_Field.py::FieldTest::test_Field_domain_axes",
"cfdm/test/test_Field.py::FieldTest::test_Field_has_construct",
"cfdm/test/test_Field.py::FieldTest::test_Field_cell_measure",
"cfdm/test/test_Field.py::FieldTest::test_Field_creation_commands",
"cfdm/test/test_Field.py::FieldTest::test_Field_climatological_time_axes",
"cfdm/test/test_Field.py::FieldTest::test_Field_auxiliary_coordinate",
"cfdm/test/test_Field.py::FieldTest::test_Field_PROPERTIES",
"cfdm/test/test_Field.py::FieldTest::test_Field_apply_masking",
"cfdm/test/test_Field.py::FieldTest::test_Field_equals"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2024-02-20T17:54:10Z" | mit |
|
NCAS-CMS__cfdm-61 | diff --git a/Changelog.rst b/Changelog.rst
index 24dca59c7..61cb26149 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -8,6 +8,9 @@ version 1.8.6
(https://github.com/NCAS-CMS/cfdm/issues/55)
* Implemented the reading and writing of netCDF4 group hierarchies for
CF-1.8 (https://github.com/NCAS-CMS/cfdm/issues/13)
+* Renamed to lower-case (but otherwise identical) names all functions which
+ get and set global constants: `cfdm.atol`, `cfdm.rtol`, `cfdm.log_level`.
+ The old names e.g. `cfdm.ATOL` remain functional as aliases.
* New method: `cfdm.Field.nc_variable_groups`
* New method: `cfdm.Field.nc_set_variable_groups`
* New method: `cfdm.Field.nc_clear_variable_groups`
@@ -23,6 +26,7 @@ version 1.8.6
* New method: `cfdm.DomainAxis.nc_clear_dimension_groups`
* New keyword parameter to `cfdm.write`: ``group``
* New dependency: ``netcdf_flattener>=1.1.0``
+* New function: `cfdm.configuration`
version 1.8.5
-------------
diff --git a/cfdm/__init__.py b/cfdm/__init__.py
index 4cf6a6d8e..667e2f58c 100644
--- a/cfdm/__init__.py
+++ b/cfdm/__init__.py
@@ -75,6 +75,7 @@ from .functions import (
RTOL,
abspath,
atol,
+ configuration,
environment,
log_level,
rtol,
diff --git a/cfdm/constants.py b/cfdm/constants.py
index ce5387caa..c19978128 100644
--- a/cfdm/constants.py
+++ b/cfdm/constants.py
@@ -6,15 +6,29 @@ from enum import Enum
import numpy
+"""
+A dictionary of useful constants.
+
+Whilst the dictionary may be modified directly, it is safer to
+retrieve and set the values with the dedicated get-and-set functions.
+
+:Keys:
+
+ ATOL : float
+ The value of absolute tolerance for testing numerically
+ tolerant equality.
+
+ RTOL : float
+ The value of relative tolerance for testing numerically
+ tolerant equality.
+
+ LOG_LEVEL : str
+ The minimal level of seriousness for which log messages are shown.
+ See `cf.log_level`.
+"""
CONSTANTS = {
- # The value of absolute tolerance for testing numerically tolerant
- # equality.
- 'RTOL': sys.float_info.epsilon,
- # The value of relative tolerance for testing numerically tolerant
- # equality.
'ATOL': sys.float_info.epsilon,
- # The minimal level of seriousness for which log messages are shown. See
- # functions.log_level().
+ 'RTOL': sys.float_info.epsilon,
'LOG_LEVEL': logging.getLevelName(logging.getLogger().level),
}
diff --git a/cfdm/functions.py b/cfdm/functions.py
index ce428a547..049cc9262 100644
--- a/cfdm/functions.py
+++ b/cfdm/functions.py
@@ -16,6 +16,128 @@ from . import (__version__,
from .constants import CONSTANTS, ValidLogLevels
+def configuration(atol=None, rtol=None, log_level=None):
+ '''View or set any number of constants in the project-wide configuration.
+
+ Global constants that are provided in a dictionary to view, and can be set
+ in any combination, are:
+
+ * `atol`
+ * `rtol`
+ * `log_level`
+
+ These are constants that apply throughout `cfdm`, except for specific
+ functions if overriden by keyword arguments provided to those.
+
+ Note that setting a constant using this function is equivalent to setting
+ it by means of a specific function of the same name, e.g. via `cfdm.atol`,
+ but in this case mutliple constants can be set at once.
+
+ .. versionadded:: 1.8.6
+
+ .. seealso:: `atol`, `rtol`, `log_level`
+
+ :Parameters:
+
+ atol: `float`, optional
+ The new value of absolute tolerance. The default is to not
+ change the current value.
+
+ rtol: `float`, optional
+ The new value of relative tolerance. The default is to not
+ change the current value.
+
+ log_level: `str` or `int`, optional
+ The new value of the minimal log severity level. This can
+ be specified either as a string equal (ignoring case) to
+ the named set of log levels or identifier 'DISABLE', or an
+ integer code corresponding to each of these, namely:
+
+ * ``'DISABLE'`` (``0``);
+ * ``'WARNING'`` (``1``);
+ * ``'INFO'`` (``2``);
+ * ``'DETAIL'`` (``3``);
+ * ``'DEBUG'`` (``-1``).
+
+ :Returns:
+
+ `dict`
+ The value of the project-wide constants prior to the change, or
+ the current value if no new value was specified.
+
+ **Examples:**
+
+ # View the full global configuration of constants:
+ >>> cfdm.configuration()
+ {'atol': 2.220446049250313e-16,
+ 'rtol': 2.220446049250313e-16,
+ 'log_level': 'WARNING'}
+ # See a change in the constants reflected in the return value:
+ >>> cfdm.log_level('DEBUG')
+ 'WARNING'
+ >>> cfdm.configuration()
+ {'atol': 2.220446049250313e-16,
+ 'rtol': 2.220446049250313e-16,
+ 'log_level': 'DEBUG'}
+
+ # Access specific values by standard Python dictionary key querying, e.g:
+ >>> cfdm.configuration()['atol']
+ 2.220446049250313e-16
+ # Note the equivalency:
+ >>> cfdm.configuration()['atol'] == cfdm.atol()
+ True
+
+ # Set multiple constants at once. Note this example is equivalent to
+ # running `cfdm.atol()` and `cfdm.log_level()` separately:
+ >>> cfdm.configuration(atol=5e-14, log_level='INFO')
+ {'atol': 2.220446049250313e-16,
+ 'rtol': 2.220446049250313e-16,
+ 'log_level': 'DEBUG'}
+ >>> cfdm.configuration()
+ {'atol': 5e-14, 'rtol': 2.220446049250313e-16, 'log_level': 'INFO'}
+
+ # Set just one constant, here equivalent to setting it via `cfdm.rtol()`:
+ >>> cfdm.configuration(rtol=1e-17)
+ {'atol': 5e-14, 'rtol': 2.220446049250313e-16, 'log_level': 'INFO'}
+ >>> cfdm.configuration()
+ {'atol': 5e-14, 'rtol': 1e-17, 'log_level': 'INFO'}
+ '''
+ return _configuration(
+ new_atol=atol, new_rtol=rtol, new_log_level=log_level)
+
+
+def _configuration(**kwargs):
+ '''Internal helper function to provide the logic for `cfdm.configuration`.
+
+ We delegate from the user-facing `cfdm.configuration` for two main reasons:
+
+ 1) to avoid a name clash there between the keyword arguments and the
+ functions which they each call (e.g. `atol` and `cfdm.atol`) which
+ would otherwise necessitate aliasing every such function name; and
+
+ 2) because the user-facing function must have the appropriate keywords
+ explicitly listed, but the very similar logic applied for each keyword
+ can be consolidated by iterating over the full dictionary of input kwargs.
+
+ '''
+ old = {name.lower(): val for name, val in CONSTANTS.items()}
+ # Filter out 'None' kwargs from configuration() defaults. Note that this
+ # does not filter out '0' or 'True' values, which is important as the user
+ # might be trying to set those, as opposed to None emerging as default.
+ kwargs = {name: val for name, val in kwargs.items() if val is not None}
+
+ # Note values are the functions not the keyword arguments of same name:
+ reset_mapping = {
+ 'new_atol': atol,
+ 'new_rtol': rtol,
+ 'new_log_level': log_level,
+ }
+ for setting_alias, new_value in kwargs.items(): # for all input kwargs...
+ reset_mapping[setting_alias](new_value) # ...run corresponding func
+
+ return old
+
+
def atol(*atol):
'''The tolerance on absolute differences when testing for numerically
tolerant equality.
| NCAS-CMS/cfdm | cfec0493b584abe64fdb6e0027a91bdaa5be1e4c | diff --git a/cfdm/test/test_functions.py b/cfdm/test/test_functions.py
index 0dac80f61..a744b0bc8 100644
--- a/cfdm/test/test_functions.py
+++ b/cfdm/test/test_functions.py
@@ -227,6 +227,99 @@ class FunctionsTest(unittest.TestCase):
filename = 'https://test_file.nc'
self.assertEqual(cfdm.abspath(filename), filename)
+ def test_configuration(self):
+ if self.test_only and inspect.stack()[0][3] not in self.test_only:
+ return
+
+ # Test getting of all config. and store original values to test on:
+ org = cfdm.configuration()
+ self.assertIsInstance(org, dict)
+ self.assertEqual(len(org), 3)
+ org_atol = org['atol']
+ self.assertIsInstance(org_atol, float)
+ org_rtol = org['rtol']
+ self.assertIsInstance(org_rtol, float)
+ org_ll = org['log_level'] # will be 'DISABLE' as disable for test
+ self.assertIsInstance(org_ll, str)
+
+ # Store some sensible values to reset items to for testing,
+ # ensure these are kept to be different to the defaults:
+ atol_rtol_reset_value = 7e-10
+ ll_reset_value = 'DETAIL'
+
+ # Test the setting of each lone item:
+ cfdm.configuration(atol=atol_rtol_reset_value)
+ post_set = cfdm.configuration()
+ self.assertEqual(post_set['atol'], atol_rtol_reset_value)
+ self.assertEqual(post_set['rtol'], org_rtol)
+ self.assertEqual(post_set['log_level'], org_ll)
+ cfdm.configuration(atol=org_atol) # reset to org
+
+ cfdm.configuration(rtol=atol_rtol_reset_value)
+ post_set = cfdm.configuration()
+ self.assertEqual(post_set['atol'], org_atol)
+ self.assertEqual(post_set['rtol'], atol_rtol_reset_value)
+ self.assertEqual(post_set['log_level'], org_ll)
+ # don't reset to org this time to test change persisting...
+
+ # Note setting of previous items persist, e.g. atol above
+ cfdm.configuration(log_level=ll_reset_value)
+ post_set = cfdm.configuration()
+ self.assertEqual(post_set['atol'], org_atol)
+ self.assertEqual(
+ post_set['rtol'], atol_rtol_reset_value) # since changed it above
+ self.assertEqual(post_set['log_level'], ll_reset_value)
+
+ # Test the setting of more than one, but not all, items simultaneously:
+ new_atol_rtol_reset_value = 5e-18
+ new_ll_reset_value = 'DEBUG'
+ cfdm.configuration(
+ rtol=new_atol_rtol_reset_value, log_level=new_ll_reset_value)
+ post_set = cfdm.configuration()
+ self.assertEqual(post_set['atol'], org_atol)
+ self.assertEqual(post_set['rtol'], new_atol_rtol_reset_value)
+ self.assertEqual(post_set['log_level'], new_ll_reset_value)
+
+ # Test setting all possible items simultaneously (to originals):
+ cfdm.configuration(
+ atol=org_atol, # same as current setting, testing on 'no change'
+ rtol=org_rtol,
+ log_level=org_ll
+ )
+ post_set = cfdm.configuration()
+ self.assertEqual(post_set['atol'], org_atol)
+ self.assertEqual(post_set['rtol'], org_rtol)
+ self.assertEqual(post_set['log_level'], org_ll)
+
+ # Test edge cases & invalid inputs...
+ # ... 1. User might set '0' or 'True' in some cases, which is
+ # somewhat a risk area for error as 0 is Falsy & True a Bool:
+ cfdm.configuration(rtol=0, atol=0.0, log_level=0)
+ post_set = cfdm.configuration()
+ self.assertEqual(post_set['atol'], 0.0)
+ self.assertEqual(post_set['rtol'], 0.0)
+ self.assertEqual(post_set['log_level'], 'DISABLE') # DISABLE == 0
+ cfdm.configuration(log_level=True) # deprecated but valid value
+ # Deprecated True is converted to value 'WARNING' by log_level
+ self.assertEqual(cfdm.configuration()['log_level'], 'WARNING')
+
+ # 2. None as an input kwarg rather than as a default:
+ cfdm.configuration(atol=None, log_level=None, rtol=org_rtol)
+ post_set = cfdm.configuration()
+ self.assertEqual(post_set['atol'], 0.0) # 0.0 as set above
+ self.assertEqual(post_set['rtol'], org_rtol)
+ self.assertEqual(post_set['log_level'], 'WARNING') # as set above
+
+ # 3. Gracefully error with useful error messages with invalid inputs:
+ with self.assertRaises(ValueError):
+ cfdm.configuration(rtol='bad')
+ with self.assertRaises(ValueError):
+ cfdm.configuration(log_level=7)
+
+ # 4. Check invalid kwarg given logic processes **kwargs:
+ with self.assertRaises(TypeError):
+ cfdm.configuration(bad_kwarg=1e-15)
+
# def test_default_netCDF_fill_values(self):
# if self.test_only and inspect.stack()[0][3] not in self.test_only:
# return
| Consolidate functions that get & set global settings
Equivalent to NCAS-CMS/cf-python#70. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_functions.py::FunctionsTest::test_configuration"
] | [
"cfdm/test/test_functions.py::FunctionsTest::test_CF",
"cfdm/test/test_functions.py::FunctionsTest::test_abspath",
"cfdm/test/test_functions.py::FunctionsTest::test_atol_rtol",
"cfdm/test/test_functions.py::FunctionsTest::test_disable_logging",
"cfdm/test/test_functions.py::FunctionsTest::test_environment",
"cfdm/test/test_functions.py::FunctionsTest::test_log_level",
"cfdm/test/test_functions.py::FunctionsTest::test_reset_log_emergence_level"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-07-07T19:34:20Z" | mit |
|
NCAS-CMS__cfdm-86 | diff --git a/cfdm/core/cellmethod.py b/cfdm/core/cellmethod.py
index d2ce40fe4..3266d2d30 100644
--- a/cfdm/core/cellmethod.py
+++ b/cfdm/core/cellmethod.py
@@ -90,12 +90,12 @@ class CellMethod(abstract.Container):
if source:
try:
axes = source.get_axes(None)
- except AttributeErrror:
+ except AttributeError:
axes = None
try:
method = source.get_method(None)
- except AttributeErrror:
+ except AttributeError:
method = None
try:
diff --git a/cfdm/read_write/netcdf/netcdfwrite.py b/cfdm/read_write/netcdf/netcdfwrite.py
index 48d8ffe01..66790f30f 100644
--- a/cfdm/read_write/netcdf/netcdfwrite.py
+++ b/cfdm/read_write/netcdf/netcdfwrite.py
@@ -619,7 +619,8 @@ class NetCDFWrite(IOWrite):
g['dimensions'].add(ncdim)
- def _write_dimension_coordinate(self, f, key, coord, ncdim):
+ def _write_dimension_coordinate(self, f, key, coord, ncdim,
+ coordinates):
'''Write a coordinate variable and its bounds variable to the file.
This also writes a new netCDF dimension to the file and, if
@@ -639,6 +640,11 @@ class NetCDFWrite(IOWrite):
that the group structure may be different to the
corodinate variable, and the basename.
+ coordinates: `list`
+ This list may get updated in-place.
+
+ .. versionadded:: (cfdm) .8.7.0
+
:Returns:
`str`
@@ -740,6 +746,11 @@ class NetCDFWrite(IOWrite):
g['axis_to_ncdim'][axis] = seen[id(coord)]['ncdims'][0]
+ if g['coordinates'] and ncvar is not None:
+ # Add the dimension coordinate netCDF variable name to the
+ # 'coordinates' arttribute
+ coordinates.append(ncvar)
+
return ncvar
def _write_count_variable(self, f, count_variable, ncdim=None,
@@ -2939,8 +2950,9 @@ class NetCDFWrite(IOWrite):
#
g['part_ncdim'] = None
- # Initialize the list of the field's auxiliary/scalar
- # coordinates
+ # Initialize the list of the field's auxiliary and scalar
+ # coordinate variable, and possibly its coordinate variables,
+ # too.
coordinates = []
if g['output_version'] >= g['CF-1.8']:
@@ -3033,8 +3045,6 @@ class NetCDFWrite(IOWrite):
# if ncdim is not None:
# ncdim = self._netcdf_name(ncdim)
-# print ('\n\n F ncdim=', ncdim)
-
found_dimension_coordinate = False
for key, dim_coord in dimension_coordinates.items():
if (self.implementation.get_construct_data_axes(f, key)
@@ -3050,7 +3060,9 @@ class NetCDFWrite(IOWrite):
# the dimension coordinate to the file as a
# coordinate variable.
ncvar = self._write_dimension_coordinate(
- f, key, dim_coord, ncdim=ncdim)
+ f, key,
+ dim_coord, ncdim=ncdim,
+ coordinates=coordinates)
else:
# The data array does not span this axis (and
# therefore the dimension coordinate must have
@@ -3065,10 +3077,10 @@ class NetCDFWrite(IOWrite):
# this domain axis. Therefore write the
# dimension coordinate to the file as a
# coordinate variable.
- ncvar = self._write_dimension_coordinate(f,
- key,
- dim_coord,
- ncdim=ncdim)
+ ncvar = self._write_dimension_coordinate(
+ f, key,
+ dim_coord,
+ ncdim=ncdim, coordinates=coordinates)
# Expand the field's data array to include
# this domain axis
@@ -4047,7 +4059,7 @@ class NetCDFWrite(IOWrite):
endian='native', compress=0, fletcher32=False,
shuffle=True, scalar=True, string=True,
extra_write_vars=None, verbose=None, warn_valid=True,
- group=True):
+ group=True, coordinates=False):
'''Write fields to a netCDF file.
NetCDF dimension and variable names will be taken from variables'
@@ -4068,146 +4080,147 @@ class NetCDFWrite(IOWrite):
:Parameters:
- fields : (arbitrarily nested sequence of) `cfdm.Field`
+ fields : (sequence of) `cfdm.Field`
The field or fields to write to the file.
+ See `cfdm.write` for details.
+
filename : str
- The output CF-netCDF file. Various type of expansion are
- applied to the file names:
-
- ==================== ======================================
- Expansion Description
- ==================== ======================================
- Tilde An initial component of ``~`` or
- ``~user`` is replaced by that *user*'s
- home directory.
-
- Environment variable Substrings of the form ``$name`` or
- ``${name}`` are replaced by the value
- of environment variable *name*.
- ==================== ======================================
-
- Where more than one type of expansion is used in the same
- string, they are applied in the order given in the above
- table.
-
- Example: If the environment variable *MYSELF* has been set
- to the "david", then ``'~$MYSELF/out.nc'`` is equivalent to
- ``'~david/out.nc'``.
-
- fmt : str, optional
- The format of the output file. One of:
-
- ========================== =================================================
- *fmt* Description
- ========================== =================================================
- ``'NETCDF4'`` Output to a CF-netCDF4 format file
- ``'NETCDF4_CLASSIC'`` Output to a CF-netCDF4 classic format file
- ``'NETCDF3_CLASSIC'`` Output to a CF-netCDF3 classic format file
- ``'NETCDF3_64BIT'`` Output to a CF-netCDF3 64-bit offset format file
- ``'NETCDF3_64BIT_OFFSET'`` NetCDF3 64-bit offset format file
- ``'NETCDF3_64BIT'`` An alias for ``'NETCDF3_64BIT_OFFSET'``
- ``'NETCDF3_64BIT_DATA'`` NetCDF3 64-bit offset format file with extensions
- ========================== =================================================
-
- By default the *fmt* is ``'NETCDF4'``. Note that the
- netCDF3 formats may be slower than netCDF4 options.
+ The output CF-netCDF file.
+
+ See `cfdm.write` for details.
overwrite: bool, optional
If False then raise an exception if the output file
pre-exists. By default a pre-existing output file is over
written.
+ See `cfdm.write` for details.
+
verbose : bool, optional
- If True then print one-line summaries of each field written.
+ See `cfdm.write` for details.
+
+ file_descriptors: `dict`, optional
+ Create description of file contents netCDF global
+ attributes from the specified attributes and their values.
+
+ See `cfdm.write` for details.
+
+ global_attributes: (sequence of) `str`, optional
+ Create netCDF global attributes from the specified field
+ construct properties, rather than netCDF data variable
+ attributes.
+
+ See `cfdm.write` for details.
+
+ variable_attributes: (sequence of) `str`, optional
+ Create netCDF data variable attributes from the specified
+ field construct properties.
+
+ See `cfdm.write` for details.
+
+ external: `str`, optional
+ Write metadata constructs that have data and are marked as
+ external to the named external file. Ignored if there are
+ no such constructs.
+
+ See `cfdm.write` for details.
datatype : dict, optional
Specify data type conversions to be applied prior to writing
- data to disk. Arrays with data types which are not specified
- remain unchanged. By default, array data types are preserved
- with the exception of Booleans (``numpy.dtype(bool)``, which
- are converted to 32 bit integers.
+ data to disk.
- *Parameter example:*
- To convert 64 bit floats and integers to their 32 bit
- counterparts: ``dtype={numpy.dtype(float):
- numpy.dtype('float32'), numpy.dtype(int):
- numpy.dtype('int32')}``.
+ See `cfdm.write` for details.
Conventions: (sequence of) `str`, optional
- Specify conventions to be recorded by the netCDF global
- "Conventions" attribute. These conventions are in addition to
- version of CF being used e.g. ``'CF-1.7'``, which must not be
- specified. If the "Conventions" property is set on a field
- construct then it is ignored. Note that a convention name is
- not allowed to contain any commas.
+ Specify conventions to be recorded by the netCDF global
+ ``Conventions`` attribute.
+
+ See `cfdm.write` for details.
+
+ endian: `str`, optional
+ The endian-ness of the output file.
+
+ See `cfdm.write` for details.
- *Parameter example:*
- ``Conventions='UGRID-1.0'``
+ compress: `int`, optional
+ Regulate the speed and efficiency of compression.
- *Parameter example:*
- ``Conventions=['UGRID-1.0']``
+ See `cfdm.write` for details.
- *Parameter example:*
- ``Conventions=['CMIP-6.2', 'UGRID-1.0']``
+ least_significant_digit: `int`, optional
+ Truncate the input field construct data arrays, but not
+ the data arrays of metadata constructs.
- *Parameter example:*
- ``Conventions='CF-1.7'``
+ See `cfdm.write` for details.
- *Parameter example:*
- ``Conventions=['CF-1.7', 'CMIP-6.2']``
+ fletcher32: `bool`, optional
+ If True then the Fletcher-32 HDF5 checksum algorithm is
+ activated to detect compression errors. Ignored if
+ *compress* is ``0``.
+
+ See `cfdm.write` for details.
+
+ shuffle: `bool`, optional
+ If True (the default) then the HDF5 shuffle filter (which
+ de-interlaces a block of data before compression by
+ reordering the bytes by storing the first byte of all of a
+ variable's values in the chunk contiguously, followed by
+ all the second bytes, and so on) is turned off.
+
+ See `cfdm.write` for details.
string: `bool`, optional
- By default string-valued construct data are written as
- netCDF arrays of type string if the output file format is
- ``'NETCDF4'``, or of type char with an extra dimension
- denoting the maximum string length for any other output
- file format (see the *fmt* parameter). If *string* is False
- then string-valued construct data are written as netCDF
- arrays of type char with an extra dimension denoting the
- maximum string length, regardless of the selected output
- file format.
+ By default string-valued construct data are written as
+ netCDF arrays of type string if the output file format is
+ ``'NETCDF4'``, or of type char with an extra dimension
+ denoting the maximum string length for any other output
+ file format (see the *fmt* parameter). If *string* is False
+ then string-valued construct data are written as netCDF
+ arrays of type char with an extra dimension denoting the
+ maximum string length, regardless of the selected output
+ file format.
+
+ See `cfdm.write` for details.
+
+ .. versionadded:: (cfdm) 1.8.0
warn_valid: `bool`, optional
- If False then do not warn for when writing "out of range"
- data, as defined by the presence of ``valid_min``,
- ``valid_max`` or ``valid_range`` properties on field or
- metadata constructs that have data. By default a warning
- is printed if any such construct has any of these
- properties.
-
- *Parameter example:*
- If a field construct has ``valid_max`` property with
- value ``100`` and data with maximum value ``999``, then
- a warning will be printed if ``warn_valid=True``.
+ If False then do not print a warning when writing
+ "out-of-range" data, as indicated by the values, if
+ present, of any of the ``valid_min``, ``valid_max`` or
+ ``valid_range`` properties on field and metadata
+ constructs that have data.
+
+ See `cfdm.write` for details.
.. versionadded:: (cfdm) 1.8.3
group: `bool`, optional
- TODO
+ If False then create a "flat" netCDF file, i.e. one with
+ only the root group, regardless of any group structure
+ specified by the field constructs.
+
+ See `cfdm.write` for details.
.. versionadded:: (cfdm) 1.8.6
+ coordinates: `bool`, optional
+ If True then include CF-netCDF coordinate variable names
+ in the 'coordinates' attribute of output data
+ variables.
+
+ See `cfdm.write` for details.
+
+ .. versionadded:: (cfdm) 1.8.7.0
+
:Returns:
`None`
**Examples:**
- >>> f
- [<CF Field: air_pressure(30, 24)>,
- <CF Field: u_compnt_of_wind(19, 29, 24)>,
- <CF Field: v_compnt_of_wind(19, 29, 24)>,
- <CF Field: potential_temperature(19, 30, 24)>]
- >>> write(f, 'file')
-
- >>> type(f)
- <class 'cfdm.field.FieldList'>
- >>> cfdm.write([f, g], 'file.nc', verbose=3)
- [<CF Field: air_pressure(30, 24)>,
- <CF Field: u_compnt_of_wind(19, 29, 24)>,
- <CF Field: v_compnt_of_wind(19, 29, 24)>,
- <CF Field: potential_temperature(19, 30, 24)>]
+ See `cfdm.write` for examples.
'''
logger.info('Writing to {}'.format(fmt)) # pragma: no cover
@@ -4282,6 +4295,10 @@ class NetCDFWrite(IOWrite):
# valid_[min|max|range] attributes?
'warn_valid': bool(warn_valid),
'valid_properties': set(('valid_min', 'valid_max', 'valid_range')),
+
+ # Whether or not to name dimension corodinates in the
+ # 'coordinates' attribute
+ 'coordinates': bool(coordinates),
}
g = self.write_vars
diff --git a/cfdm/read_write/write.py b/cfdm/read_write/write.py
index 454b998ab..5ed92901a 100644
--- a/cfdm/read_write/write.py
+++ b/cfdm/read_write/write.py
@@ -12,7 +12,7 @@ def write(fields, filename, fmt='NETCDF4', overwrite=True,
datatype=None, least_significant_digit=None,
endian='native', compress=0, fletcher32=False, shuffle=True,
string=True, verbose=None, warn_valid=True, group=True,
- _implementation=_implementation):
+ coordinates=False, _implementation=_implementation):
'''Write field constructs to a netCDF file.
**File format**
@@ -357,6 +357,8 @@ def write(fields, filename, fmt='NETCDF4', overwrite=True,
maximum string length, regardless of the selected output
file format.
+ .. versionadded:: (cfdm) 1.8.0
+
verbose: `int` or `str` or `None`, optional
If an integer from ``-1`` to ``3``, or an equivalent string
equal ignoring case to one of:
@@ -413,6 +415,14 @@ def write(fields, filename, fmt='NETCDF4', overwrite=True,
.. versionadded:: (cfdm) 1.8.6
+ coordinates: `bool`, optional
+ If True then include CF-netCDF coordinate variable names
+ in the 'coordinates' attribute of output data
+ variables. By default only auxiliary and scalar coordinate
+ variables are included.
+
+ .. versionadded:: (cfdm) 1.8.7.0
+
_implementation: (subclass of) `CFDMImplementation`, optional
Define the CF data model implementation that defines field
and metadata constructs and their components.
@@ -449,4 +459,4 @@ def write(fields, filename, fmt='NETCDF4', overwrite=True,
shuffle=shuffle, fletcher32=fletcher32,
string=string, verbose=verbose,
warn_valid=warn_valid, group=group,
- extra_write_vars=None)
+ coordinates=coordinates, extra_write_vars=None)
| NCAS-CMS/cfdm | 57ca70e3a2ad24069207989fe4a6fce9b84a4424 | diff --git a/cfdm/test/test_DimensionCoordinate.py b/cfdm/test/test_DimensionCoordinate.py
index 36a6440e5..713a7c053 100644
--- a/cfdm/test/test_DimensionCoordinate.py
+++ b/cfdm/test/test_DimensionCoordinate.py
@@ -70,6 +70,7 @@ class DimensionCoordinateTest(unittest.TestCase):
with self.assertRaises(Exception):
y = x.set_data(cfdm.Data(1))
+ @unittest.skip("wait until 1.9.0.0 ...")
def test_DimensionCoordinate_climatology(self):
x = cfdm.DimensionCoordinate()
diff --git a/cfdm/test/test_read_write.py b/cfdm/test/test_read_write.py
index ebe11df36..fb5f35163 100644
--- a/cfdm/test/test_read_write.py
+++ b/cfdm/test/test_read_write.py
@@ -13,6 +13,7 @@ import cfdm
warnings = False
+# Set up temporary files
n_tmpfiles = 6
tmpfiles = [tempfile.mkstemp('_test_read_write.nc', dir=os.getcwd())[1]
for i in range(n_tmpfiles)]
@@ -435,6 +436,18 @@ class read_writeTest(unittest.TestCase):
self.assertFalse(f)
+ def test_write_coordinates(self):
+ if self.test_only and inspect.stack()[0][3] not in self.test_only:
+ return
+
+ f = cfdm.example_field(0)
+
+ cfdm.write(f, tmpfile, coordinates=True)
+ g = cfdm.read(tmpfile)
+
+ self.assertEqual(len(g), 1)
+ self.assertTrue(g[0].equals(f))
+
# --- End: class
| In cf.write, allow dimension coordinate constructs' netCDF names to be added to the "coordinates" attribute
In `cf.write`, allow dimension coordinate constructs' netCDF names to be added to the `coordinates` netCDF attribute, if the user desires. Currently they are always omitted. Either way is CF-compliant. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"cfdm/test/test_read_write.py::read_writeTest::test_write_coordinates"
] | [
"cfdm/test/test_read_write.py::read_writeTest::test_read_mask",
"cfdm/test/test_read_write.py::read_writeTest::test_write_filename",
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensionCoordinate__init__",
"cfdm/test/test_DimensionCoordinate.py::DimensionCoordinateTest::test_DimensionCoordinate_set_data"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-09-30T08:18:08Z" | mit |
|
NREL__hescore-hpxml-100 | diff --git a/hescorehpxml/__init__.py b/hescorehpxml/__init__.py
index 306fcba0..b178e9fc 100644
--- a/hescorehpxml/__init__.py
+++ b/hescorehpxml/__init__.py
@@ -236,7 +236,7 @@ class HPXMLtoHEScoreTranslator(object):
if glass_layers in ('double-pane', 'single-paned with storms', 'single-paned with low-e storms'):
if glass_layers == 'double-pane' and glass_type == 'low-e' and gas_fill == 'argon':
window_code = 'dpeaab'
- elif glass_type is not None and glass_type == 'reflective':
+ elif glass_type is not None and glass_type in ('reflective', 'low-e'):
# TODO: figure out if 'reflective' is close enough to 'solar-control' low-e
window_code = 'dseab'
elif glass_type is not None and glass_type.startswith('tinted'):
@@ -251,7 +251,7 @@ class HPXMLtoHEScoreTranslator(object):
else:
window_code = 'scna'
elif glass_layers in ('double-pane', 'single-paned with storms', 'single-paned with low-e storms'):
- if glass_type is not None and glass_type in ('reflective', 'tinted/reflective'):
+ if glass_type is not None and glass_type in ('reflective', 'tinted/reflective', 'low-e'):
window_code = 'dseaa'
elif glass_type is not None and glass_type == 'tinted':
window_code = 'dtaa'
| NREL/hescore-hpxml | b4c0dfd6a0e38b571a558b2127757c9c81a8640e | diff --git a/tests/tests.py b/tests/tests.py
index ccc81832..86a1f68b 100644
--- a/tests/tests.py
+++ b/tests/tests.py
@@ -6,7 +6,7 @@ from builtins import object
import os
import unittest
import datetime as dt
-from lxml import etree
+from lxml import etree, objectify
from hescorehpxml import HPXMLtoHEScoreTranslator, TranslationError, InputOutOfBounds
import io
import json
@@ -61,6 +61,15 @@ class ComparatorBase(object):
def xpath(self, xpathexpr, *args, **kwargs):
return self.translator.xpath(self.translator.hpxmldoc, xpathexpr, *args, **kwargs)
+ @property
+ def E(self):
+ E = objectify.ElementMaker(
+ annotate=False,
+ namespace=self.translator.ns['h'],
+ nsmap=self.translator.ns
+ )
+ return E
+
class TestAPIHouses(unittest.TestCase, ComparatorBase):
def test_house1(self):
@@ -1997,6 +2006,40 @@ class TestHEScore2019Updates(unittest.TestCase, ComparatorBase):
roof_type = d['building']['zone']['zone_roof'][0]['roof_type']
self.assertEqual(roof_type, 'vented_attic')
+ def test_window_code_mappings_aluminum(self):
+ tr = self._load_xmlfile('hescore_min')
+
+ window2_frametype = self.xpath('//h:Window[h:SystemIdentifier/@id="window2"]/h:FrameType')
+ window2_frametype.clear()
+ window2_frametype.append(self.E.Aluminum())
+ window2_frametype.getparent().append(self.E.GlassType('low-e'))
+
+ window3_frametype = self.xpath('//h:Window[h:SystemIdentifier/@id="window3"]/h:FrameType')
+ window3_frametype.clear()
+ window3_frametype.append(self.E.Aluminum(self.E.ThermalBreak(True)))
+
+ window4_frametype = self.xpath('//h:Window[h:SystemIdentifier/@id="window4"]/h:FrameType')
+ window4_frametype.clear()
+ window4_frametype.append(self.E.Aluminum(self.E.ThermalBreak(True)))
+ window4_frametype.getparent().append(self.E.GlassType('low-e'))
+
+ d = tr.hpxml_to_hescore_dict()
+ walls = {}
+ for wall in d['building']['zone']['zone_wall']:
+ walls[wall['side']] = wall
+ self.assertEqual(
+ walls['left']['zone_window']['window_code'],
+ 'dseaa'
+ )
+ self.assertEqual(
+ walls['back']['zone_window']['window_code'],
+ 'dcab'
+ )
+ self.assertEqual(
+ walls['right']['zone_window']['window_code'],
+ 'dseab'
+ )
+
if __name__ == "__main__":
unittest.main()
| Window code mapping changes
- Aluminum, double pane, low-e is currently mapping to `dcaa`.
- Aluminum, thermal break is currently mapping to `dcab`
@lirainer What should these be mapping to? | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/tests.py::TestHEScore2019Updates::test_window_code_mappings_aluminum"
] | [
"tests/tests.py::TestAPIHouses::test_house1",
"tests/tests.py::TestAPIHouses::test_house1_v1_1",
"tests/tests.py::TestAPIHouses::test_house1_v2",
"tests/tests.py::TestAPIHouses::test_house1_v2_1",
"tests/tests.py::TestAPIHouses::test_house2",
"tests/tests.py::TestAPIHouses::test_house3",
"tests/tests.py::TestAPIHouses::test_house4",
"tests/tests.py::TestAPIHouses::test_house5",
"tests/tests.py::TestAPIHouses::test_house6",
"tests/tests.py::TestAPIHouses::test_house7",
"tests/tests.py::TestAPIHouses::test_house8",
"tests/tests.py::TestOtherHouses::test_air_source_heat_pump_has_no_ducts",
"tests/tests.py::TestOtherHouses::test_attic_knee_wall",
"tests/tests.py::TestOtherHouses::test_attic_roof_assoc",
"tests/tests.py::TestOtherHouses::test_bad_duct_location",
"tests/tests.py::TestOtherHouses::test_bldgid_not_found",
"tests/tests.py::TestOtherHouses::test_clg_sys_has_air_dist",
"tests/tests.py::TestOtherHouses::test_cooling_system_wrong_efficiency_type",
"tests/tests.py::TestOtherHouses::test_dist_sys_idref",
"tests/tests.py::TestOtherHouses::test_evap_cooling_system_type",
"tests/tests.py::TestOtherHouses::test_external_id_extension_passthru",
"tests/tests.py::TestOtherHouses::test_external_id_passthru",
"tests/tests.py::TestOtherHouses::test_extra_roof_sheathing_insulation",
"tests/tests.py::TestOtherHouses::test_extra_wall_sheathing_insulation",
"tests/tests.py::TestOtherHouses::test_floor_no_area",
"tests/tests.py::TestOtherHouses::test_foundation_walls_on_slab",
"tests/tests.py::TestOtherHouses::test_heating_system_no_efficiency",
"tests/tests.py::TestOtherHouses::test_heating_system_wrong_efficiency_type",
"tests/tests.py::TestOtherHouses::test_heatpump_no_cooling",
"tests/tests.py::TestOtherHouses::test_heatpump_no_heating",
"tests/tests.py::TestOtherHouses::test_hescore_min",
"tests/tests.py::TestOtherHouses::test_htg_sys_has_air_dist",
"tests/tests.py::TestOtherHouses::test_hvac_fractions_sum_to_one",
"tests/tests.py::TestOtherHouses::test_impossible_cooling_system_type",
"tests/tests.py::TestOtherHouses::test_impossible_heating_system_type",
"tests/tests.py::TestOtherHouses::test_impossible_triple_pane_window",
"tests/tests.py::TestOtherHouses::test_impossible_window",
"tests/tests.py::TestOtherHouses::test_indirect_dhw_error",
"tests/tests.py::TestOtherHouses::test_invalid_attic_type",
"tests/tests.py::TestOtherHouses::test_invalid_residential_faciliy_type",
"tests/tests.py::TestOtherHouses::test_invalid_roof_type",
"tests/tests.py::TestOtherHouses::test_invalid_surroundings",
"tests/tests.py::TestOtherHouses::test_log_wall_fail",
"tests/tests.py::TestOtherHouses::test_mentor_extension",
"tests/tests.py::TestOtherHouses::test_missing_attached_to_roof",
"tests/tests.py::TestOtherHouses::test_missing_cooling_system",
"tests/tests.py::TestOtherHouses::test_missing_cooling_weighting_factor",
"tests/tests.py::TestOtherHouses::test_missing_heating_system",
"tests/tests.py::TestOtherHouses::test_missing_heating_weighting_factor",
"tests/tests.py::TestOtherHouses::test_missing_residential_facility_type",
"tests/tests.py::TestOtherHouses::test_missing_roof_color",
"tests/tests.py::TestOtherHouses::test_missing_roof_type",
"tests/tests.py::TestOtherHouses::test_missing_siding",
"tests/tests.py::TestOtherHouses::test_missing_skylight_area",
"tests/tests.py::TestOtherHouses::test_missing_surroundings",
"tests/tests.py::TestOtherHouses::test_missing_water_heater",
"tests/tests.py::TestOtherHouses::test_missing_window_area",
"tests/tests.py::TestOtherHouses::test_missing_window_orientation",
"tests/tests.py::TestOtherHouses::test_only_duct_system_per_heating_sys",
"tests/tests.py::TestOtherHouses::test_ove_low_r",
"tests/tests.py::TestOtherHouses::test_preconstruction_event_type",
"tests/tests.py::TestOtherHouses::test_siding_cmu_fail",
"tests/tests.py::TestOtherHouses::test_siding_fail2",
"tests/tests.py::TestOtherHouses::test_tankless_coil_dhw_error",
"tests/tests.py::TestOtherHouses::test_too_many_duct_systems",
"tests/tests.py::TestOtherHouses::test_townhouse_walls",
"tests/tests.py::TestOtherHouses::test_townhouse_walls_all_same",
"tests/tests.py::TestOtherHouses::test_townhouse_walls_conflict",
"tests/tests.py::TestOtherHouses::test_townhouse_window_fail",
"tests/tests.py::TestOtherHouses::test_townhouse_window_wall_all_same_fail",
"tests/tests.py::TestOtherHouses::test_townhouse_windows_area_wrong",
"tests/tests.py::TestOtherHouses::test_wall_construction_ps_low_r",
"tests/tests.py::TestOtherHouses::test_wall_insulation_layer_missing_rvalue",
"tests/tests.py::TestOtherHouses::test_wall_same_area_same_side_different_construction",
"tests/tests.py::TestOtherHouses::test_window_area_sum_on_angled_front_door",
"tests/tests.py::TestOtherHouses::test_window_attached_to_wall",
"tests/tests.py::TestOtherHouses::test_wood_stove",
"tests/tests.py::TestOtherHouses::test_wood_stove_invalid_fuel_type",
"tests/tests.py::TestOtherHouses::test_zipcode_missing",
"tests/tests.py::TestInputOutOfBounds::test_assessment_date1",
"tests/tests.py::TestInputOutOfBounds::test_assessment_date2",
"tests/tests.py::TestInputOutOfBounds::test_conditioned_floor_area1",
"tests/tests.py::TestInputOutOfBounds::test_conditioned_floor_area2",
"tests/tests.py::TestInputOutOfBounds::test_cooling_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_cooling_year",
"tests/tests.py::TestInputOutOfBounds::test_dhw_heat_pump_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_dhw_storage_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_dhw_year",
"tests/tests.py::TestInputOutOfBounds::test_envelope_leakage",
"tests/tests.py::TestInputOutOfBounds::test_evap_cooler_missing_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_floor_to_ceiling_height1",
"tests/tests.py::TestInputOutOfBounds::test_floor_to_ceiling_height2",
"tests/tests.py::TestInputOutOfBounds::test_heating_efficiency_furnace",
"tests/tests.py::TestInputOutOfBounds::test_heating_efficiency_gchp",
"tests/tests.py::TestInputOutOfBounds::test_heating_efficiency_heat_pump",
"tests/tests.py::TestInputOutOfBounds::test_heating_year",
"tests/tests.py::TestInputOutOfBounds::test_num_floor_above_grade",
"tests/tests.py::TestInputOutOfBounds::test_skylight_area",
"tests/tests.py::TestInputOutOfBounds::test_skylight_u_value",
"tests/tests.py::TestInputOutOfBounds::test_window_area",
"tests/tests.py::TestInputOutOfBounds::test_window_u_value",
"tests/tests.py::TestInputOutOfBounds::test_year_built1",
"tests/tests.py::TestInputOutOfBounds::test_year_built2",
"tests/tests.py::TestHVACFractions::test_allow_5pct_diff",
"tests/tests.py::TestHVACFractions::test_boiler_roomac",
"tests/tests.py::TestHVACFractions::test_furnace_baseboard_centralac",
"tests/tests.py::TestHVACFractions::test_furnace_heat_pump",
"tests/tests.py::TestHVACFractions::test_wall_furnace_baseboard_centralac",
"tests/tests.py::TestPhotovoltaics::test_azimuth_orientation_missing",
"tests/tests.py::TestPhotovoltaics::test_capacity_missing",
"tests/tests.py::TestPhotovoltaics::test_collector_area",
"tests/tests.py::TestPhotovoltaics::test_orientation",
"tests/tests.py::TestPhotovoltaics::test_pv",
"tests/tests.py::TestPhotovoltaics::test_two_sys_avg",
"tests/tests.py::TestPhotovoltaics::test_two_sys_different_capacity_error",
"tests/tests.py::TestPhotovoltaics::test_years_missing",
"tests/tests.py::TesHPXMLVersion2Point3::test_floor_furnace",
"tests/tests.py::TesHPXMLVersion2Point3::test_medium_dark_roof_color",
"tests/tests.py::TesHPXMLVersion2Point3::test_roof_absorptance",
"tests/tests.py::TestHEScore2019Updates::test_bldg_about_comment",
"tests/tests.py::TestHEScore2019Updates::test_conditioned_attic",
"tests/tests.py::TestHEScore2019Updates::test_duct_location_validation",
"tests/tests.py::TestHEScore2019Updates::test_hvac_combinations",
"tests/tests.py::TestHEScore2019Updates::test_skylight_solar_screens_exteriorshading",
"tests/tests.py::TestHEScore2019Updates::test_skylight_solar_screens_treatments",
"tests/tests.py::TestHEScore2019Updates::test_tankless",
"tests/tests.py::TestHEScore2019Updates::test_tankless_energyfactorerror",
"tests/tests.py::TestHEScore2019Updates::test_uef_over_ef",
"tests/tests.py::TestHEScore2019Updates::test_uef_with_tankless",
"tests/tests.py::TestHEScore2019Updates::test_window_solar_screens"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2019-08-01T19:19:11Z" | bsd-2-clause |
|
NREL__hescore-hpxml-169 | diff --git a/docs/source/translation/building_address.rst b/docs/source/translation/building_address.rst
index 33449491..8fdf52c7 100644
--- a/docs/source/translation/building_address.rst
+++ b/docs/source/translation/building_address.rst
@@ -33,8 +33,10 @@ format for HEScore.
HPXML allows for two lines of address elements. If both are used, the lines will
be concatenated with a space between for submission to the HEScore
-``building_address.address`` field. All of the HPXML elements shown in the
-above code snippet are required with the exception of ``Address2``
+``building_address.address`` field. All of the HPXML elements shown in the above
+code snippet are required with the exception of ``Address2``. Additionally, if a
+zip plus 4 code is entered in HPXML, it will be trimmed to just the 5 digit zip
+code before being passed to HEScore.
.. _assessment-type-mapping:
diff --git a/hescorehpxml/base.py b/hescorehpxml/base.py
index bfb1f4d5..169a39dd 100644
--- a/hescorehpxml/base.py
+++ b/hescorehpxml/base.py
@@ -816,7 +816,8 @@ class HPXMLtoHEScoreTranslatorBase(object):
raise ElementNotFoundError(hpxmladdress, 'h:Address1/text() | h:Address2/text()', {})
bldgaddr['city'] = xpath(b, 'h:Site/h:Address/h:CityMunicipality/text()', raise_err=True)
bldgaddr['state'] = xpath(b, 'h:Site/h:Address/h:StateCode/text()', raise_err=True)
- bldgaddr['zip_code'] = xpath(b, 'h:Site/h:Address/h:ZipCode/text()', raise_err=True)
+ hpxml_zipcode = xpath(b, 'h:Site/h:Address/h:ZipCode/text()', raise_err=True)
+ bldgaddr['zip_code'] = re.match(r"([0-9]{5})(-[0-9]{4})?", hpxml_zipcode).group(1)
transaction_type = xpath(self.hpxmldoc, 'h:XMLTransactionHeaderInformation/h:Transaction/text()')
is_mentor = xpath(b, 'boolean(h:ProjectStatus/h:extension/h:HEScoreMentorAssessment)')
if is_mentor:
| NREL/hescore-hpxml | 974933e77b66154cc2302c553374006d7151a15c | diff --git a/tests/test_translation.py b/tests/test_translation.py
index 117daa04..ff02c0ff 100644
--- a/tests/test_translation.py
+++ b/tests/test_translation.py
@@ -3048,6 +3048,14 @@ class TestHEScore2021Updates(unittest.TestCase, ComparatorBase):
self.assertEqual(res3['building']['zone']['zone_roof'][1]['zone_skylight']['skylight_code'], 'dtab')
self.assertFalse(res3['building']['zone']['zone_roof'][1]['zone_skylight']['solar_screen'])
+ def test_zip_plus4(self):
+ tr = self._load_xmlfile('hescore_min_v3')
+ el = self.xpath('//h:ZipCode')
+ orig_zipcode = str(el.text)
+ el.text = el.text + '-1234'
+ res = tr.hpxml_to_hescore()
+ self.assertEqual(res['building_address']['zip_code'], orig_zipcode)
+
class TestHEScoreV3(unittest.TestCase, ComparatorBase):
| Handle ZIP+4 cleanly
HPXML allows for ZIP+4: https://hpxml.nrel.gov/datadictionary/3.0.0/Building/Site/Address/ZipCode
If an [HPXML file](https://github.com/NREL/hescore-hpxml/files/7171808/zip_plus_4.xml.txt) is submitted with a valid ZIP+4 entry then hpxml2hescore passes it with no changes to the JSON output:
```
"building_address": {
"address": "1 API House",
"city": "Minden",
"state": "NE",
"zip_code": "68959-1234",
"assessment_type": "initial"
},
```
This results in the following error from submit_hpxml:
```
ZIP code 68959-1234 not found
```
**Expected behavior**
JSON output:
```
"building_address": {
"address": "1 API House",
"city": "Minden",
"state": "NE",
"zip_code": "68959",
"assessment_type": "initial"
},
```
And no error from submit_hpxml | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_translation.py::TestHEScore2021Updates::test_zip_plus4"
] | [
"tests/test_translation.py::TestAPIHouses::test_house1",
"tests/test_translation.py::TestAPIHouses::test_house1_v2",
"tests/test_translation.py::TestAPIHouses::test_house1_v2_1",
"tests/test_translation.py::TestAPIHouses::test_house2",
"tests/test_translation.py::TestAPIHouses::test_house3",
"tests/test_translation.py::TestAPIHouses::test_house4",
"tests/test_translation.py::TestAPIHouses::test_house5",
"tests/test_translation.py::TestAPIHouses::test_house6",
"tests/test_translation.py::TestAPIHouses::test_house7",
"tests/test_translation.py::TestAPIHouses::test_house8",
"tests/test_translation.py::TestCLI::test_cli_pass",
"tests/test_translation.py::TestOtherHouses::test_air_source_heat_pump_has_no_ducts",
"tests/test_translation.py::TestOtherHouses::test_attic_knee_wall",
"tests/test_translation.py::TestOtherHouses::test_attic_knee_wall_zero_rvalue",
"tests/test_translation.py::TestOtherHouses::test_attic_roof_assoc",
"tests/test_translation.py::TestOtherHouses::test_bad_duct_location",
"tests/test_translation.py::TestOtherHouses::test_bldgid_not_found",
"tests/test_translation.py::TestOtherHouses::test_clg_sys_has_air_dist",
"tests/test_translation.py::TestOtherHouses::test_cooling_system_wrong_efficiency_type",
"tests/test_translation.py::TestOtherHouses::test_dist_sys_idref",
"tests/test_translation.py::TestOtherHouses::test_evap_cooling_system_type",
"tests/test_translation.py::TestOtherHouses::test_external_id_extension_passthru",
"tests/test_translation.py::TestOtherHouses::test_external_id_passthru",
"tests/test_translation.py::TestOtherHouses::test_extra_roof_sheathing_insulation",
"tests/test_translation.py::TestOtherHouses::test_extra_wall_sheathing_insulation",
"tests/test_translation.py::TestOtherHouses::test_floor_no_area",
"tests/test_translation.py::TestOtherHouses::test_foundation_walls_on_slab",
"tests/test_translation.py::TestOtherHouses::test_frac_duct_area_missing",
"tests/test_translation.py::TestOtherHouses::test_gable_wall_ignore",
"tests/test_translation.py::TestOtherHouses::test_heating_system_no_efficiency",
"tests/test_translation.py::TestOtherHouses::test_heating_system_wrong_efficiency_type",
"tests/test_translation.py::TestOtherHouses::test_heatpump_no_cooling",
"tests/test_translation.py::TestOtherHouses::test_heatpump_no_heating",
"tests/test_translation.py::TestOtherHouses::test_hescore_min",
"tests/test_translation.py::TestOtherHouses::test_htg_sys_has_air_dist",
"tests/test_translation.py::TestOtherHouses::test_hvac_fractions_sum_to_one",
"tests/test_translation.py::TestOtherHouses::test_impossible_cooling_system_type",
"tests/test_translation.py::TestOtherHouses::test_impossible_heating_system_type",
"tests/test_translation.py::TestOtherHouses::test_impossible_triple_pane_window",
"tests/test_translation.py::TestOtherHouses::test_impossible_window",
"tests/test_translation.py::TestOtherHouses::test_indirect_dhw_error",
"tests/test_translation.py::TestOtherHouses::test_invalid_attic_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_residential_faciliy_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_roof_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_surroundings",
"tests/test_translation.py::TestOtherHouses::test_log_wall_fail",
"tests/test_translation.py::TestOtherHouses::test_mentor_extension",
"tests/test_translation.py::TestOtherHouses::test_missing_attached_to_roof",
"tests/test_translation.py::TestOtherHouses::test_missing_cooling_system",
"tests/test_translation.py::TestOtherHouses::test_missing_cooling_weighting_factor",
"tests/test_translation.py::TestOtherHouses::test_missing_heating_system",
"tests/test_translation.py::TestOtherHouses::test_missing_heating_weighting_factor",
"tests/test_translation.py::TestOtherHouses::test_missing_residential_facility_type",
"tests/test_translation.py::TestOtherHouses::test_missing_roof_color",
"tests/test_translation.py::TestOtherHouses::test_missing_roof_type",
"tests/test_translation.py::TestOtherHouses::test_missing_siding",
"tests/test_translation.py::TestOtherHouses::test_missing_skylight_area",
"tests/test_translation.py::TestOtherHouses::test_missing_surroundings",
"tests/test_translation.py::TestOtherHouses::test_missing_water_heater",
"tests/test_translation.py::TestOtherHouses::test_missing_window_area",
"tests/test_translation.py::TestOtherHouses::test_missing_window_orientation",
"tests/test_translation.py::TestOtherHouses::test_only_duct_system_per_heating_sys",
"tests/test_translation.py::TestOtherHouses::test_ove_low_r",
"tests/test_translation.py::TestOtherHouses::test_preconstruction_event_type",
"tests/test_translation.py::TestOtherHouses::test_siding_cmu_fail",
"tests/test_translation.py::TestOtherHouses::test_siding_fail2",
"tests/test_translation.py::TestOtherHouses::test_slab_missing",
"tests/test_translation.py::TestOtherHouses::test_tankless_coil_dhw_error",
"tests/test_translation.py::TestOtherHouses::test_too_many_duct_systems",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls_all_same",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls_conflict",
"tests/test_translation.py::TestOtherHouses::test_townhouse_window_fail",
"tests/test_translation.py::TestOtherHouses::test_townhouse_window_wall_all_same_fail",
"tests/test_translation.py::TestOtherHouses::test_townhouse_windows_area_wrong",
"tests/test_translation.py::TestOtherHouses::test_wall_construction_ps_low_r",
"tests/test_translation.py::TestOtherHouses::test_wall_insulation_layer_missing_rvalue",
"tests/test_translation.py::TestOtherHouses::test_wall_same_area_same_side_different_construction",
"tests/test_translation.py::TestOtherHouses::test_window_area_sum_on_angled_front_door",
"tests/test_translation.py::TestOtherHouses::test_window_attached_to_wall",
"tests/test_translation.py::TestOtherHouses::test_window_only_attached_to_foundation_wall",
"tests/test_translation.py::TestOtherHouses::test_wood_stove",
"tests/test_translation.py::TestOtherHouses::test_wood_stove_invalid_fuel_type",
"tests/test_translation.py::TestOtherHouses::test_zipcode_missing",
"tests/test_translation.py::TestInputOutOfBounds::test_assessment_date1",
"tests/test_translation.py::TestInputOutOfBounds::test_assessment_date2",
"tests/test_translation.py::TestInputOutOfBounds::test_conditioned_floor_area1",
"tests/test_translation.py::TestInputOutOfBounds::test_conditioned_floor_area2",
"tests/test_translation.py::TestInputOutOfBounds::test_cooling_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_cooling_year",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_heat_pump_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_storage_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_year",
"tests/test_translation.py::TestInputOutOfBounds::test_envelope_leakage",
"tests/test_translation.py::TestInputOutOfBounds::test_evap_cooler_missing_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_floor_to_ceiling_height1",
"tests/test_translation.py::TestInputOutOfBounds::test_floor_to_ceiling_height2",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_furnace",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_gchp",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_heat_pump",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_year",
"tests/test_translation.py::TestInputOutOfBounds::test_num_floor_above_grade",
"tests/test_translation.py::TestInputOutOfBounds::test_number_bedrooms",
"tests/test_translation.py::TestInputOutOfBounds::test_skylight_area",
"tests/test_translation.py::TestInputOutOfBounds::test_skylight_u_value",
"tests/test_translation.py::TestInputOutOfBounds::test_window_area",
"tests/test_translation.py::TestInputOutOfBounds::test_window_u_value",
"tests/test_translation.py::TestInputOutOfBounds::test_year_built1",
"tests/test_translation.py::TestInputOutOfBounds::test_year_built2",
"tests/test_translation.py::TestHVACFractions::test_allow_5pct_diff",
"tests/test_translation.py::TestHVACFractions::test_boiler_roomac",
"tests/test_translation.py::TestHVACFractions::test_different_weighting_factors",
"tests/test_translation.py::TestHVACFractions::test_furnace_baseboard_centralac",
"tests/test_translation.py::TestHVACFractions::test_furnace_heat_pump",
"tests/test_translation.py::TestHVACFractions::test_wall_furnace_baseboard_centralac",
"tests/test_translation.py::TestPhotovoltaics::test_azimuth_orientation_missing",
"tests/test_translation.py::TestPhotovoltaics::test_capacity_missing",
"tests/test_translation.py::TestPhotovoltaics::test_collector_area",
"tests/test_translation.py::TestPhotovoltaics::test_orientation",
"tests/test_translation.py::TestPhotovoltaics::test_pv",
"tests/test_translation.py::TestPhotovoltaics::test_two_sys_avg",
"tests/test_translation.py::TestPhotovoltaics::test_two_sys_different_capacity_error",
"tests/test_translation.py::TestPhotovoltaics::test_years_missing",
"tests/test_translation.py::TesHPXMLVersion2Point3::test_floor_furnace",
"tests/test_translation.py::TesHPXMLVersion2Point3::test_medium_dark_roof_color",
"tests/test_translation.py::TesHPXMLVersion2Point3::test_roof_absorptance",
"tests/test_translation.py::TestHEScore2019Updates::test_bldg_about_comment",
"tests/test_translation.py::TestHEScore2019Updates::test_conditioned_attic",
"tests/test_translation.py::TestHEScore2019Updates::test_duct_location_validation",
"tests/test_translation.py::TestHEScore2019Updates::test_ducted_hvac_combinations",
"tests/test_translation.py::TestHEScore2019Updates::test_ductless_hvac_combinations",
"tests/test_translation.py::TestHEScore2019Updates::test_hpwes",
"tests/test_translation.py::TestHEScore2019Updates::test_hpwes_fail",
"tests/test_translation.py::TestHEScore2019Updates::test_mini_split_cooling_only",
"tests/test_translation.py::TestHEScore2019Updates::test_skylight_solar_screens_exteriorshading",
"tests/test_translation.py::TestHEScore2019Updates::test_skylight_solar_screens_treatments",
"tests/test_translation.py::TestHEScore2019Updates::test_tankless",
"tests/test_translation.py::TestHEScore2019Updates::test_tankless_energyfactorerror",
"tests/test_translation.py::TestHEScore2019Updates::test_uef_over_ef",
"tests/test_translation.py::TestHEScore2019Updates::test_uef_with_tankless",
"tests/test_translation.py::TestHEScore2019Updates::test_window_code_mappings_aluminum",
"tests/test_translation.py::TestHEScore2019Updates::test_window_solar_screens",
"tests/test_translation.py::TestHEScore2021Updates::test_skylight_assignment",
"tests/test_translation.py::TestHEScoreV3::test_attic_roof_unattached",
"tests/test_translation.py::TestHEScoreV3::test_attic_type",
"tests/test_translation.py::TestHEScoreV3::test_attic_with_multiple_frame_floors",
"tests/test_translation.py::TestHEScoreV3::test_attic_with_multiple_roofs",
"tests/test_translation.py::TestHEScoreV3::test_hescore_min_translation",
"tests/test_translation.py::TestHEScoreV3::test_house1_translation",
"tests/test_translation.py::TestHEScoreV3::test_house2_translation",
"tests/test_translation.py::TestHEScoreV3::test_house3_translation",
"tests/test_translation.py::TestHEScoreV3::test_house4_translation",
"tests/test_translation.py::TestHEScoreV3::test_house5_translation",
"tests/test_translation.py::TestHEScoreV3::test_house6_translation",
"tests/test_translation.py::TestHEScoreV3::test_house7_translation",
"tests/test_translation.py::TestHEScoreV3::test_house8_translation",
"tests/test_translation.py::TestHEScoreV3::test_mini_split_cooling_only",
"tests/test_translation.py::TestHEScoreV3::test_townhouse_walls_translation",
"tests/test_translation.py::TestHEScoreV3::test_v3_duct_insulation",
"tests/test_translation.py::TestHEScoreV3::test_v3_duct_location"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2021-11-18T21:54:55Z" | bsd-2-clause |
|
NREL__hescore-hpxml-179 | diff --git a/docs/source/translation/generation.rst b/docs/source/translation/generation.rst
index b4b2c751..039ac1a1 100644
--- a/docs/source/translation/generation.rst
+++ b/docs/source/translation/generation.rst
@@ -4,55 +4,75 @@ Generation
Solar Electric
**************
-HEScore allows for a single photovoltaic system to be included as of v2016.
-In HPXML, multiple ``PVSystem`` elements can be specified to represent the PV systems on the house.
-The translator combines multiple systems and generates the appropriate HEScore inputs as follows:
+HEScore allows for a single photovoltaic system to be included as of v2016. In
+HPXML, multiple ``PVSystem`` elements can be specified to represent the PV
+systems on the house. The translator combines multiple systems and generates the
+appropriate HEScore inputs as follows:
Capacity Known
==============
-If each ``PVSystem`` has a ``MaxPowerOutput``, this is true.
-If each ``PVSystem`` has a ``CollectorArea``, this is false.
-Preference is given to known capacity if both are available.
-Either a ``MaxPowerOutput`` must be specified for every ``PVSystem``
-or ``CollectorArea`` must be specified for every ``PVSystem``.
+If each ``PVSystem`` has a ``MaxPowerOutput``, this is true. If each
+``PVSystem`` has a ``NumberOfPanels`` or if each has ``CollectorArea``, this is
+false. Preference is given to known capacity if available. Either a
+``MaxPowerOutput`` must be specified for every ``PVSystem`` or ``CollectorArea``
+must be specified for every ``PVSystem``.
DC Capacity
===========
-If each ``PVSystem`` has a ``MaxPowerOutput``, the system capacity is known.
-The ``system_capacity`` in HEScore is calculated by summing all the ``MaxPowerOutput`` elements in HPXML.
+If each ``PVSystem`` has a ``MaxPowerOutput``, the system capacity is known. The
+``system_capacity`` in HEScore is calculated by summing all the
+``MaxPowerOutput`` elements in HPXML.
Number of Panels
================
-If ``MaxPowerOutput`` is missing from any ``PVSystem``,
-``CollectorArea`` is required on every PVSystem and the system capacity is not known.
-The number of panels is calculated by summing all the collector area, dividing by 17.6 sq.ft.,
-and rounding to the nearest whole number.
+If ``MaxPowerOutput`` is missing from any ``PVSystem``, the translator will
+check to see if every system has ``NumberOfPanels`` and calculate the total
+number of panels.
+
+If ``NumberOfPanels`` isn't available on every system, the translator will look
+for ``CollectorArea`` on every PVSystem. The number of panels is calculated by
+summing all the collector area, dividing by 17.6 sq.ft., and rounding to the
+nearest whole number.
+
+Weighted Averages
+=================
+
+The below quantities are calculated using weighted averages. The weights used
+are in priority order:
+
+- ``MaxPowerOutput``
+- ``NumberOfPanels``
+- ``CollectorArea``
+
+Which is the same data elements used to determine the PV sizing inputs above.
Year Installed
==============
-For each ``PVSystem`` the ``YearInverterManufactured`` and ``YearModulesManufactured`` element values are retrieved,
-and the greater of the two is assumed to be the year that system was installed.
-When there are multiple ``PVSystem`` elements, a capacity or area-weighted average of the assumed year installed
-is calculated and used.
+For each ``PVSystem`` the ``YearInverterManufactured`` and
+``YearModulesManufactured`` element values are retrieved, and the greater of the
+two is assumed to be the year that system was installed. When there are multiple
+``PVSystem`` elements, a weighted average is calculated and used.
Panel Orientation (Azimuth)
===========================
-For each ``PVSystem`` the ``ArrayAzimuth`` (degrees clockwise from north) is retrieved.
-If ``ArrayAzimuth`` is not available, ``ArrayOrientation`` (north, northwest, etc) is converted into an azimuth.
-A capacity or area-weighted average azimuth is calculated and converted into the nearest cardinal direction
-(north, northwest, etc) for submission into the ``array_azimuth`` HEScore input (which expects a direction,
-not a numeric azimuth).
+For each ``PVSystem`` the ``ArrayAzimuth`` (degrees clockwise from north) is
+retrieved. If ``ArrayAzimuth`` is not available, ``ArrayOrientation`` (north,
+northwest, etc) is converted into an azimuth. A weighted average azimuth is
+calculated and converted into the nearest cardinal direction (north, northwest,
+etc) for submission into the ``array_azimuth`` HEScore input (which expects a
+direction, not a numeric azimuth).
Panel Tilt
==========
-For each ``PVSystem`` the ``ArrayTilt`` (in degrees from horizontal) is retrieved.
-A capacity or area-weighted average tilt is calculated and submitted to the ``array_tilt`` HEScore input
-(which expects an enumeration, not a numeric tilt).
+
+For each ``PVSystem`` the ``ArrayTilt`` (in degrees from horizontal) is
+retrieved. A weighted average tilt is calculated and submitted to the
+``array_tilt`` HEScore input (which expects an enumeration, not a numeric tilt).
The tilt is mapped to HEScore as follows:
.. table:: Tilt mapping
diff --git a/hescorehpxml/base.py b/hescorehpxml/base.py
index a5f104d9..017db4c8 100644
--- a/hescorehpxml/base.py
+++ b/hescorehpxml/base.py
@@ -65,6 +65,10 @@ def round_to_nearest(x, vals, tails_tolerance=None):
return nearest
+def weighted_average(items, weights):
+ return sum(item * weight for item, weight in zip(items, weights)) / sum(weights)
+
+
class HPXMLtoHEScoreTranslatorBase(object):
SCHEMA_DIR = None
@@ -2235,31 +2239,28 @@ class HPXMLtoHEScoreTranslatorBase(object):
capacities = []
collector_areas = []
+ n_panels_per_system = []
years = []
azimuths = []
tilts = []
for pvsystem in pvsystems:
- max_power_output = self.xpath(pvsystem, 'h:MaxPowerOutput/text()')
- if max_power_output:
- capacities.append(float(max_power_output)) # W
- collector_areas.append(None)
- else:
- capacities.append(None)
- collector_area = self.xpath(pvsystem, 'h:CollectorArea/text()')
- if collector_area:
- collector_areas.append(float(collector_area))
- else:
- raise TranslationError('MaxPowerOutput or CollectorArea is required for every PVSystem.')
+ capacities.append(convert_to_type(float, self.xpath(pvsystem, 'h:MaxPowerOutput/text()')))
+ collector_areas.append(convert_to_type(float, self.xpath(pvsystem, 'h:CollectorArea/text()')))
+ n_panels_per_system.append(convert_to_type(int, self.xpath(pvsystem, 'h:NumberOfPanels/text()')))
+
+ if not (capacities[-1] or collector_areas[-1] or n_panels_per_system[-1]):
+ raise TranslationError(
+ 'MaxPowerOutput, NumberOfPanels, or CollectorArea is required for every PVSystem.'
+ )
- manufacture_years = list(map(
- int,
- self.xpath(
+ manufacture_years = [
+ int(x) for x in self.xpath(
pvsystem,
'h:YearInverterManufactured/text()|h:YearModulesManufactured/text()',
- aslist=True)
- )
- )
+ aslist=True
+ )
+ ]
if manufacture_years:
years.append(max(manufacture_years)) # Use the latest year of manufacture
else:
@@ -2283,31 +2284,25 @@ class HPXMLtoHEScoreTranslatorBase(object):
if None not in capacities:
solar_electric['capacity_known'] = True
- total_capacity = sum(capacities)
- solar_electric['system_capacity'] = total_capacity / 1000.
- solar_electric['year'] = int(
- old_div(sum([year * capacity for year, capacity in zip(years, capacities)]), total_capacity))
- wtavg_azimuth = old_div(sum(
- [az * capacity for az, capacity in zip(azimuths, capacities)]), total_capacity)
- wtavg_tilt = sum(t * capacity for t, capacity in zip(tilts, capacities)) / total_capacity
+ solar_electric['system_capacity'] = sum(capacities) / 1000.
+ weights = capacities
+ elif None not in n_panels_per_system:
+ solar_electric['capacity_known'] = False
+ solar_electric['num_panels'] = sum(n_panels_per_system)
+ weights = n_panels_per_system
elif None not in collector_areas:
solar_electric['capacity_known'] = False
- total_area = sum(collector_areas)
- solar_electric['num_panels'] = int(python2round(total_area / 17.6))
- solar_electric['year'] = int(sum([year * area for year, area in zip(years, collector_areas)]) / total_area)
- wtavg_azimuth = old_div(sum(
- [az * area for az, area in zip(azimuths, collector_areas)]
- ), total_area)
- wtavg_tilt = sum(t * area for t, area in zip(tilts, collector_areas)) / total_area
+ solar_electric['num_panels'] = int(round(sum(collector_areas) / 17.6))
+ weights = collector_areas
else:
raise TranslationError(
- 'Either a MaxPowerOutput must be specified for every PVSystem '
- 'or CollectorArea must be specified for every PVSystem.'
+ 'Either a MaxPowerOutput or NumberOfPanels or CollectorArea must be specified for every PVSystem.'
)
- nearest_azimuth = self.get_nearest_azimuth(azimuth=wtavg_azimuth)
+ solar_electric['year'] = round(weighted_average(years, weights))
+ nearest_azimuth = self.get_nearest_azimuth(azimuth=weighted_average(azimuths, weights))
solar_electric['array_azimuth'] = self.azimuth_to_hescore_orientation[nearest_azimuth]
- solar_electric['array_tilt'] = self.get_nearest_tilt(wtavg_tilt)
+ solar_electric['array_tilt'] = self.get_nearest_tilt(weighted_average(tilts, weights))
return generation
| NREL/hescore-hpxml | 72e5137bd0d00bd8c2640a7a18cfc9ab93c50906 | diff --git a/tests/test_translation.py b/tests/test_translation.py
index 4a5dd11a..cd222749 100644
--- a/tests/test_translation.py
+++ b/tests/test_translation.py
@@ -1642,6 +1642,7 @@ class TestPhotovoltaics(unittest.TestCase, ComparatorBase):
capacity=5,
inverter_year=2015,
module_year=2013,
+ n_panels=None,
collector_area=None):
addns = self.translator.addns
@@ -1668,6 +1669,8 @@ class TestPhotovoltaics(unittest.TestCase, ComparatorBase):
add_elem(pv_system, 'MaxPowerOutput', capacity * 1000)
if collector_area is not None:
add_elem(pv_system, 'CollectorArea', collector_area)
+ if n_panels is not None:
+ add_elem(pv_system, 'NumberOfPanels', n_panels)
if inverter_year is not None:
add_elem(pv_system, 'YearInverterManufactured', inverter_year)
if module_year is not None:
@@ -1690,9 +1693,22 @@ class TestPhotovoltaics(unittest.TestCase, ComparatorBase):
self._add_pv(capacity=None)
self.assertRaisesRegex(
TranslationError,
- r'MaxPowerOutput or CollectorArea is required',
+ r'MaxPowerOutput, NumberOfPanels, or CollectorArea is required',
tr.hpxml_to_hescore
- )
+ )
+
+ def test_n_panels(self):
+ tr = self._load_xmlfile('hescore_min_v3')
+ self._add_pv(
+ capacity=None,
+ n_panels=12,
+ collector_area=1
+ )
+ hesd = tr.hpxml_to_hescore()
+ pv = hesd['building']['systems']['generation']['solar_electric']
+ self.assertFalse(pv['capacity_known'])
+ self.assertNotIn('system_capacity', list(pv.keys()))
+ self.assertEqual(pv['num_panels'], 12)
def test_collector_area(self):
tr = self._load_xmlfile('hescore_min')
@@ -1700,7 +1716,7 @@ class TestPhotovoltaics(unittest.TestCase, ComparatorBase):
hesd = tr.hpxml_to_hescore()
pv = hesd['building']['systems']['generation']['solar_electric']
self.assertFalse(pv['capacity_known'])
- self.assertNotIn('capacity', list(pv.keys()))
+ self.assertNotIn('system_capacity', list(pv.keys()))
self.assertEqual(pv['num_panels'], 10)
def test_orientation(self):
@@ -1761,7 +1777,7 @@ class TestPhotovoltaics(unittest.TestCase, ComparatorBase):
module_year=2013)
self.assertRaisesRegex(
TranslationError,
- r'Either a MaxPowerOutput must be specified for every PVSystem or CollectorArea',
+ r'Either a MaxPowerOutput or NumberOfPanels or CollectorArea must be specified',
tr.hpxml_to_hescore
)
| Read NumberOfPanels from PV in HPXML v3
**Is your feature request related to a problem? Please describe.**
The `NumberOfPanels` element was added in HPXML v3 (probably to support HEScore). Currently we're either reading from `MaxPowerOutput` or estimating the number of panels from `CollectorArea` because that input wasn't available in HPXML v2. Now that we have it, we should read it.
**Describe the solution you'd like**
The following logic:
- If `MaxPowerOutput` is available, use that.
- Else if `NumberOfPanels` is available, use that.
- Else if `CollectorArea` is available, estimate number of panels from that as we do now.
cc @torstenglidden | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_translation.py::TestPhotovoltaics::test_capacity_missing",
"tests/test_translation.py::TestPhotovoltaics::test_n_panels",
"tests/test_translation.py::TestPhotovoltaics::test_two_sys_different_capacity_error"
] | [
"tests/test_translation.py::TestAPIHouses::test_house1",
"tests/test_translation.py::TestAPIHouses::test_house1_v2",
"tests/test_translation.py::TestAPIHouses::test_house1_v2_1",
"tests/test_translation.py::TestAPIHouses::test_house2",
"tests/test_translation.py::TestAPIHouses::test_house3",
"tests/test_translation.py::TestAPIHouses::test_house4",
"tests/test_translation.py::TestAPIHouses::test_house5",
"tests/test_translation.py::TestAPIHouses::test_house6",
"tests/test_translation.py::TestAPIHouses::test_house7",
"tests/test_translation.py::TestAPIHouses::test_house8",
"tests/test_translation.py::TestCLI::test_cli_pass",
"tests/test_translation.py::TestOtherHouses::test_air_source_heat_pump_has_no_ducts",
"tests/test_translation.py::TestOtherHouses::test_attic_knee_wall",
"tests/test_translation.py::TestOtherHouses::test_attic_knee_wall_zero_rvalue",
"tests/test_translation.py::TestOtherHouses::test_attic_roof_assoc",
"tests/test_translation.py::TestOtherHouses::test_bad_duct_location",
"tests/test_translation.py::TestOtherHouses::test_bldgid_not_found",
"tests/test_translation.py::TestOtherHouses::test_clg_sys_has_air_dist",
"tests/test_translation.py::TestOtherHouses::test_cooling_system_wrong_efficiency_type",
"tests/test_translation.py::TestOtherHouses::test_dist_sys_idref",
"tests/test_translation.py::TestOtherHouses::test_evap_cooling_system_type",
"tests/test_translation.py::TestOtherHouses::test_external_id_extension_passthru",
"tests/test_translation.py::TestOtherHouses::test_external_id_passthru",
"tests/test_translation.py::TestOtherHouses::test_extra_roof_sheathing_insulation",
"tests/test_translation.py::TestOtherHouses::test_extra_wall_sheathing_insulation",
"tests/test_translation.py::TestOtherHouses::test_floor_no_area",
"tests/test_translation.py::TestOtherHouses::test_foundation_walls_on_slab",
"tests/test_translation.py::TestOtherHouses::test_frac_duct_area_missing",
"tests/test_translation.py::TestOtherHouses::test_gable_wall_ignore",
"tests/test_translation.py::TestOtherHouses::test_heating_system_no_efficiency",
"tests/test_translation.py::TestOtherHouses::test_heating_system_wrong_efficiency_type",
"tests/test_translation.py::TestOtherHouses::test_heatpump_no_cooling",
"tests/test_translation.py::TestOtherHouses::test_heatpump_no_heating",
"tests/test_translation.py::TestOtherHouses::test_hescore_min",
"tests/test_translation.py::TestOtherHouses::test_htg_sys_has_air_dist",
"tests/test_translation.py::TestOtherHouses::test_hvac_fractions_sum_to_one",
"tests/test_translation.py::TestOtherHouses::test_impossible_cooling_system_type",
"tests/test_translation.py::TestOtherHouses::test_impossible_heating_system_type",
"tests/test_translation.py::TestOtherHouses::test_impossible_triple_pane_window",
"tests/test_translation.py::TestOtherHouses::test_impossible_window",
"tests/test_translation.py::TestOtherHouses::test_indirect_dhw_error",
"tests/test_translation.py::TestOtherHouses::test_invalid_attic_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_residential_faciliy_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_roof_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_surroundings",
"tests/test_translation.py::TestOtherHouses::test_log_wall_fail",
"tests/test_translation.py::TestOtherHouses::test_mentor_extension",
"tests/test_translation.py::TestOtherHouses::test_missing_attached_to_roof",
"tests/test_translation.py::TestOtherHouses::test_missing_cooling_system",
"tests/test_translation.py::TestOtherHouses::test_missing_cooling_weighting_factor",
"tests/test_translation.py::TestOtherHouses::test_missing_heating_system",
"tests/test_translation.py::TestOtherHouses::test_missing_heating_weighting_factor",
"tests/test_translation.py::TestOtherHouses::test_missing_residential_facility_type",
"tests/test_translation.py::TestOtherHouses::test_missing_roof_color",
"tests/test_translation.py::TestOtherHouses::test_missing_roof_type",
"tests/test_translation.py::TestOtherHouses::test_missing_siding",
"tests/test_translation.py::TestOtherHouses::test_missing_skylight_area",
"tests/test_translation.py::TestOtherHouses::test_missing_surroundings",
"tests/test_translation.py::TestOtherHouses::test_missing_water_heater",
"tests/test_translation.py::TestOtherHouses::test_missing_window_area",
"tests/test_translation.py::TestOtherHouses::test_missing_window_orientation",
"tests/test_translation.py::TestOtherHouses::test_only_duct_system_per_heating_sys",
"tests/test_translation.py::TestOtherHouses::test_ove_low_r",
"tests/test_translation.py::TestOtherHouses::test_preconstruction_event_type",
"tests/test_translation.py::TestOtherHouses::test_siding_cmu_fail",
"tests/test_translation.py::TestOtherHouses::test_siding_fail2",
"tests/test_translation.py::TestOtherHouses::test_slab_missing",
"tests/test_translation.py::TestOtherHouses::test_tankless_coil_dhw_error",
"tests/test_translation.py::TestOtherHouses::test_too_many_duct_systems",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls_all_same",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls_conflict",
"tests/test_translation.py::TestOtherHouses::test_townhouse_window_fail",
"tests/test_translation.py::TestOtherHouses::test_townhouse_window_wall_all_same_fail",
"tests/test_translation.py::TestOtherHouses::test_townhouse_windows_area_wrong",
"tests/test_translation.py::TestOtherHouses::test_wall_construction_ps_low_r",
"tests/test_translation.py::TestOtherHouses::test_wall_insulation_layer_missing_rvalue",
"tests/test_translation.py::TestOtherHouses::test_wall_same_area_same_side_different_construction",
"tests/test_translation.py::TestOtherHouses::test_window_area_sum_on_angled_front_door",
"tests/test_translation.py::TestOtherHouses::test_window_attached_to_wall",
"tests/test_translation.py::TestOtherHouses::test_window_only_attached_to_foundation_wall",
"tests/test_translation.py::TestOtherHouses::test_wood_stove",
"tests/test_translation.py::TestOtherHouses::test_wood_stove_invalid_fuel_type",
"tests/test_translation.py::TestOtherHouses::test_zipcode_missing",
"tests/test_translation.py::TestInputOutOfBounds::test_assessment_date1",
"tests/test_translation.py::TestInputOutOfBounds::test_assessment_date2",
"tests/test_translation.py::TestInputOutOfBounds::test_conditioned_floor_area1",
"tests/test_translation.py::TestInputOutOfBounds::test_conditioned_floor_area2",
"tests/test_translation.py::TestInputOutOfBounds::test_cooling_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_cooling_year",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_heat_pump_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_storage_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_year",
"tests/test_translation.py::TestInputOutOfBounds::test_envelope_leakage",
"tests/test_translation.py::TestInputOutOfBounds::test_evap_cooler_missing_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_floor_to_ceiling_height1",
"tests/test_translation.py::TestInputOutOfBounds::test_floor_to_ceiling_height2",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_furnace",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_gchp",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_heat_pump",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_year",
"tests/test_translation.py::TestInputOutOfBounds::test_num_floor_above_grade",
"tests/test_translation.py::TestInputOutOfBounds::test_number_bedrooms",
"tests/test_translation.py::TestInputOutOfBounds::test_skylight_area",
"tests/test_translation.py::TestInputOutOfBounds::test_skylight_u_value",
"tests/test_translation.py::TestInputOutOfBounds::test_window_area",
"tests/test_translation.py::TestInputOutOfBounds::test_window_u_value",
"tests/test_translation.py::TestInputOutOfBounds::test_year_built1",
"tests/test_translation.py::TestInputOutOfBounds::test_year_built2",
"tests/test_translation.py::TestHVACFractions::test_allow_5pct_diff",
"tests/test_translation.py::TestHVACFractions::test_boiler_roomac",
"tests/test_translation.py::TestHVACFractions::test_different_weighting_factors",
"tests/test_translation.py::TestHVACFractions::test_furnace_baseboard_centralac",
"tests/test_translation.py::TestHVACFractions::test_furnace_heat_pump",
"tests/test_translation.py::TestHVACFractions::test_wall_furnace_baseboard_centralac",
"tests/test_translation.py::TestPhotovoltaics::test_azimuth_orientation_missing",
"tests/test_translation.py::TestPhotovoltaics::test_collector_area",
"tests/test_translation.py::TestPhotovoltaics::test_orientation",
"tests/test_translation.py::TestPhotovoltaics::test_pv",
"tests/test_translation.py::TestPhotovoltaics::test_tilt_missing",
"tests/test_translation.py::TestPhotovoltaics::test_two_sys_avg",
"tests/test_translation.py::TestPhotovoltaics::test_years_missing",
"tests/test_translation.py::TesHPXMLVersion2Point3::test_floor_furnace",
"tests/test_translation.py::TesHPXMLVersion2Point3::test_medium_dark_roof_color",
"tests/test_translation.py::TesHPXMLVersion2Point3::test_roof_absorptance",
"tests/test_translation.py::TestHEScore2019Updates::test_bldg_about_comment",
"tests/test_translation.py::TestHEScore2019Updates::test_conditioned_attic",
"tests/test_translation.py::TestHEScore2019Updates::test_duct_location_validation",
"tests/test_translation.py::TestHEScore2019Updates::test_ducted_hvac_combinations",
"tests/test_translation.py::TestHEScore2019Updates::test_ductless_hvac_combinations",
"tests/test_translation.py::TestHEScore2019Updates::test_hpwes",
"tests/test_translation.py::TestHEScore2019Updates::test_hpwes_fail",
"tests/test_translation.py::TestHEScore2019Updates::test_mini_split_cooling_only",
"tests/test_translation.py::TestHEScore2019Updates::test_skylight_solar_screens_exteriorshading",
"tests/test_translation.py::TestHEScore2019Updates::test_skylight_solar_screens_treatments",
"tests/test_translation.py::TestHEScore2019Updates::test_tankless",
"tests/test_translation.py::TestHEScore2019Updates::test_tankless_energyfactorerror",
"tests/test_translation.py::TestHEScore2019Updates::test_uef_over_ef",
"tests/test_translation.py::TestHEScore2019Updates::test_uef_with_tankless",
"tests/test_translation.py::TestHEScore2019Updates::test_window_code_mappings_aluminum",
"tests/test_translation.py::TestHEScore2019Updates::test_window_solar_screens",
"tests/test_translation.py::TestHEScore2021Updates::test_skylight_assignment",
"tests/test_translation.py::TestHEScore2021Updates::test_zip_plus4",
"tests/test_translation.py::TestHEScoreV3::test_attic_roof_unattached",
"tests/test_translation.py::TestHEScoreV3::test_attic_type",
"tests/test_translation.py::TestHEScoreV3::test_attic_with_multiple_frame_floors",
"tests/test_translation.py::TestHEScoreV3::test_attic_with_multiple_roofs",
"tests/test_translation.py::TestHEScoreV3::test_hescore_min_translation",
"tests/test_translation.py::TestHEScoreV3::test_house1_translation",
"tests/test_translation.py::TestHEScoreV3::test_house2_translation",
"tests/test_translation.py::TestHEScoreV3::test_house3_translation",
"tests/test_translation.py::TestHEScoreV3::test_house4_translation",
"tests/test_translation.py::TestHEScoreV3::test_house5_translation",
"tests/test_translation.py::TestHEScoreV3::test_house6_translation",
"tests/test_translation.py::TestHEScoreV3::test_house7_translation",
"tests/test_translation.py::TestHEScoreV3::test_house8_translation",
"tests/test_translation.py::TestHEScoreV3::test_mini_split_cooling_only",
"tests/test_translation.py::TestHEScoreV3::test_townhouse_walls_translation",
"tests/test_translation.py::TestHEScoreV3::test_v3_duct_insulation",
"tests/test_translation.py::TestHEScoreV3::test_v3_duct_location"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2021-12-09T17:20:37Z" | bsd-2-clause |
|
NREL__hescore-hpxml-201 | diff --git a/examples/house7.xml b/examples/house7.xml
index b3301351..4de32c3e 100644
--- a/examples/house7.xml
+++ b/examples/house7.xml
@@ -76,7 +76,12 @@
<Slab>
<SystemIdentifier id="slab1"/>
<Area>900</Area>
- <!-- omitting insulation to see if it comes out at zero -->
+ <PerimeterInsulation>
+ <SystemIdentifier id='slab1perimeterins'/>
+ <Layer>
+ <NominalRValue>0.0</NominalRValue>
+ </Layer>
+ </PerimeterInsulation>
</Slab>
</Foundation>
</Foundations>
diff --git a/examples/house7_v3.xml b/examples/house7_v3.xml
index 8ebe06fe..a6483cdf 100644
--- a/examples/house7_v3.xml
+++ b/examples/house7_v3.xml
@@ -89,7 +89,12 @@
<Slab>
<SystemIdentifier id="slab1"/>
<Area>900</Area>
- <!-- omitting insulation to see if it comes out at zero -->
+ <PerimeterInsulation>
+ <SystemIdentifier id='slab1perimeterins'/>
+ <Layer>
+ <NominalRValue>0.0</NominalRValue>
+ </Layer>
+ </PerimeterInsulation>
</Slab>
</Slabs>
<Windows>
diff --git a/hescorehpxml/base.py b/hescorehpxml/base.py
index 83c3306a..55ef7dd0 100644
--- a/hescorehpxml/base.py
+++ b/hescorehpxml/base.py
@@ -1698,8 +1698,7 @@ class HPXMLtoHEScoreTranslatorBase(object):
else:
raise TranslationError(
'If there is more than one FoundationWall, an Area is required for each.')
- fwall_assembly_rvalue = self.get_foundation_wall_assembly_rvalue(fwall, fwall)
- if fwall_assembly_rvalue is not None: # TODO: Allow for AssemblyEffectiveRValue
+ if not self.every_surface_layer_has_nominal_rvalue(fwall):
raise TranslationError(
f'Every foundation wall insulation layer needs a NominalRValue, fwall_id = {fwallid}')
else:
@@ -1723,8 +1722,7 @@ class HPXMLtoHEScoreTranslatorBase(object):
else:
raise TranslationError(
'If there is more than one Slab, an ExposedPerimeter is required for each.')
- slab_assembly_rvalue = self.get_slab_assembly_rvalue(slab, slab)
- if slab_assembly_rvalue is not None: # TODO: Allow for AssemblyEffectiveRValue
+ if not self.every_surface_layer_has_nominal_rvalue(slab):
raise TranslationError(
f"Every slab insulation layer needs a NominalRValue, slab_id = {slabid}")
else:
@@ -2519,6 +2517,23 @@ class HPXMLtoHEScoreTranslatorBase(object):
return generation
+ def every_surface_layer_has_nominal_rvalue(self, surf_el):
+ # This variable will be true only if every wall layer has a NominalRValue
+ if surf_el.tag.endswith('Slab'):
+ surf_ins_layers = self.xpath(surf_el, 'h:PerimeterInsulation/h:Layer', aslist=True)
+ elif surf_el.tag.endswith('FoundationWall'):
+ surf_ins_layers = self.xpath(surf_el, 'h:Insulation/h:Layer', aslist=True)
+ every_layer_has_nominal_rvalue = True
+ if surf_ins_layers:
+ for layer in surf_ins_layers:
+ if self.xpath(layer, 'h:NominalRValue') is None:
+ every_layer_has_nominal_rvalue = False
+ break
+ else:
+ every_layer_has_nominal_rvalue = False
+
+ return every_layer_has_nominal_rvalue
+
def validate_hescore_inputs(self, hescore_inputs):
def do_bounds_check(fieldname, value, minincl, maxincl):
| NREL/hescore-hpxml | f595831260d52a162d9f97efe2786d52383d04ca | diff --git a/tests/test_translation.py b/tests/test_translation.py
index 6a04e211..7bd9bfa1 100644
--- a/tests/test_translation.py
+++ b/tests/test_translation.py
@@ -449,6 +449,25 @@ class TestOtherHouses(unittest.TestCase, ComparatorBase):
tr.hpxml_to_hescore
)
+ def test_missing_nominal_rvalue(self):
+ tr = self._load_xmlfile('house7')
+ slab_perim_ins_nominal_rvalue = self.xpath('//h:Slab/h:PerimeterInsulation/h:Layer/h:NominalRValue')
+ slab_perim_ins_nominal_rvalue.getparent().remove(slab_perim_ins_nominal_rvalue)
+ self.assertRaisesRegex(
+ TranslationError,
+ 'Every slab insulation layer needs a NominalRValue, slab_id = slab1',
+ tr.hpxml_to_hescore
+ )
+
+ tr = self._load_xmlfile('house9')
+ fwall_ins_nominal_rvalue = self.xpath('//h:FoundationWall/h:Insulation/h:Layer[2]/h:NominalRValue')
+ fwall_ins_nominal_rvalue.getparent().remove(fwall_ins_nominal_rvalue)
+ self.assertRaisesRegex(
+ TranslationError,
+ 'Every foundation wall insulation layer needs a NominalRValue, fwall_id = Surface_13',
+ tr.hpxml_to_hescore
+ )
+
def test_missing_window_area(self):
tr = self._load_xmlfile('hescore_min')
el = self.xpath('//h:Window[1]/h:Area')
@@ -1330,6 +1349,23 @@ class TestOtherHouses(unittest.TestCase, ComparatorBase):
tr.hpxml_to_hescore
)
+ # ignore assembly effective R-value when both assembly effective R-value and nominal R-value present
+ tr = self._load_xmlfile('house9')
+ E = self.element_maker()
+ fwall_ins = self.xpath('//h:FoundationWall[1]/h:Insulation')
+ sysid = fwall_ins.find(tr.addns('h:SystemIdentifier'))
+ sysid.addnext(E.AssemblyEffectiveRValue('6.0'))
+ hesinp = tr.hpxml_to_hescore()
+ self.assertEqual(hesinp['building']['zone']['zone_floor'][0]['foundation_insulation_level'], 0)
+
+ tr = self._load_xmlfile('house3')
+ E = self.element_maker()
+ slab_perim_ins = self.xpath('//h:Slab/h:PerimeterInsulation')
+ sysid = slab_perim_ins.find(tr.addns('h:SystemIdentifier'))
+ sysid.addnext(E.AssemblyEffectiveRValue('6.0'))
+ hesinp = tr.hpxml_to_hescore()
+ self.assertEqual(hesinp['building']['zone']['zone_floor'][0]['foundation_insulation_level'], 5)
+
def test_duct_leakage_to_outside(self):
tr = self._load_xmlfile('house1')
E = self.element_maker()
| Foundation wall insulation errors when AssemblyRValue is present
**Describe the bug**
If a foundation wall has _both_ `AssemblyRValue` and `Layer/NominalRValue` elements, it throws an error saying that a `NominalRValue` is required for all `Layer`s.
**Error Message**
```
ERROR:TranslationError:Every foundation wall insulation layer needs a NominalRValue, fwall_id = ImpFoundationWallCrawl
```
**To Reproduce**
```xml
<FoundationWall>
<SystemIdentifier id="ImpFoundationWallCrawl" sameas="BaseFoundationWallCrawl" />
<Length>169.6985563</Length>
<Height>3</Height>
<Area>509.0956688</Area>
<BelowGradeDepth>0.5</BelowGradeDepth>
<AdjacentTo>ambient</AdjacentTo>
<Insulation>
<SystemIdentifier id="ImpConditionedCrawlInsulation1" sameas="BaseConditionedCrawlInsulation1" />
<InsulationGrade>1</InsulationGrade>
<AssemblyEffectiveRValue>12.8</AssemblyEffectiveRValue>
<MisalignedInsulation>false</MisalignedInsulation>
<Layer>
<InstallationType>continuous</InstallationType>
<InsulationMaterial>
<Rigid>polyisocyanurate</Rigid>
</InsulationMaterial>
<NominalRValue>10.8</NominalRValue>
<Thickness>1.5</Thickness>
</Layer>
</Insulation>
</FoundationWall>
```
**Expected behavior**
To not throw an error.
**Environment (please complete the following information):**
- OS: Mac, Linux
- Python Version: 3.9
**Additional context**
cc @gamalielL @torstenglidden
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_translation.py::TestOtherHouses::test_assembly_rvalues",
"tests/test_translation.py::TestOtherHouses::test_missing_nominal_rvalue"
] | [
"tests/test_translation.py::TestAPIHouses::test_assembly_rvalue",
"tests/test_translation.py::TestAPIHouses::test_house1",
"tests/test_translation.py::TestAPIHouses::test_house1_v2",
"tests/test_translation.py::TestAPIHouses::test_house1_v2_1",
"tests/test_translation.py::TestAPIHouses::test_house2",
"tests/test_translation.py::TestAPIHouses::test_house3",
"tests/test_translation.py::TestAPIHouses::test_house4",
"tests/test_translation.py::TestAPIHouses::test_house5",
"tests/test_translation.py::TestAPIHouses::test_house6",
"tests/test_translation.py::TestAPIHouses::test_house7",
"tests/test_translation.py::TestAPIHouses::test_house8",
"tests/test_translation.py::TestAPIHouses::test_house9",
"tests/test_translation.py::TestCLI::test_cli_pass",
"tests/test_translation.py::TestOtherHouses::test_air_source_heat_pump_has_no_ducts",
"tests/test_translation.py::TestOtherHouses::test_attic_knee_wall",
"tests/test_translation.py::TestOtherHouses::test_attic_knee_wall_zero_rvalue",
"tests/test_translation.py::TestOtherHouses::test_attic_roof_assoc",
"tests/test_translation.py::TestOtherHouses::test_bad_duct_location",
"tests/test_translation.py::TestOtherHouses::test_bldgid_not_found",
"tests/test_translation.py::TestOtherHouses::test_clg_sys_has_air_dist",
"tests/test_translation.py::TestOtherHouses::test_cooling_system_wrong_efficiency_type",
"tests/test_translation.py::TestOtherHouses::test_dist_sys_idref",
"tests/test_translation.py::TestOtherHouses::test_duct_leakage_to_outside",
"tests/test_translation.py::TestOtherHouses::test_ducts_insulation",
"tests/test_translation.py::TestOtherHouses::test_evap_cooling_system_type",
"tests/test_translation.py::TestOtherHouses::test_external_id_extension_passthru",
"tests/test_translation.py::TestOtherHouses::test_external_id_passthru",
"tests/test_translation.py::TestOtherHouses::test_extra_roof_sheathing_insulation",
"tests/test_translation.py::TestOtherHouses::test_extra_wall_sheathing_insulation",
"tests/test_translation.py::TestOtherHouses::test_floor_no_area",
"tests/test_translation.py::TestOtherHouses::test_foundation_walls_on_slab",
"tests/test_translation.py::TestOtherHouses::test_frac_duct_area_missing",
"tests/test_translation.py::TestOtherHouses::test_gable_wall_ignore",
"tests/test_translation.py::TestOtherHouses::test_heating_system_no_efficiency",
"tests/test_translation.py::TestOtherHouses::test_heating_system_wrong_efficiency_type",
"tests/test_translation.py::TestOtherHouses::test_heatpump_no_cooling",
"tests/test_translation.py::TestOtherHouses::test_heatpump_no_heating",
"tests/test_translation.py::TestOtherHouses::test_hescore_min",
"tests/test_translation.py::TestOtherHouses::test_htg_sys_has_air_dist",
"tests/test_translation.py::TestOtherHouses::test_hvac_fractions_sum_to_one",
"tests/test_translation.py::TestOtherHouses::test_impossible_cooling_system_type",
"tests/test_translation.py::TestOtherHouses::test_impossible_heating_system_type",
"tests/test_translation.py::TestOtherHouses::test_impossible_triple_pane_window",
"tests/test_translation.py::TestOtherHouses::test_impossible_window",
"tests/test_translation.py::TestOtherHouses::test_indirect_dhw_error",
"tests/test_translation.py::TestOtherHouses::test_invalid_attic_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_infiltration_unit_of_measure",
"tests/test_translation.py::TestOtherHouses::test_invalid_residential_faciliy_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_roof_type",
"tests/test_translation.py::TestOtherHouses::test_invalid_surroundings",
"tests/test_translation.py::TestOtherHouses::test_log_wall_fail",
"tests/test_translation.py::TestOtherHouses::test_mentor_extension",
"tests/test_translation.py::TestOtherHouses::test_missing_attached_to_roof",
"tests/test_translation.py::TestOtherHouses::test_missing_cooling_system",
"tests/test_translation.py::TestOtherHouses::test_missing_cooling_weighting_factor",
"tests/test_translation.py::TestOtherHouses::test_missing_heating_system",
"tests/test_translation.py::TestOtherHouses::test_missing_heating_weighting_factor",
"tests/test_translation.py::TestOtherHouses::test_missing_infiltration",
"tests/test_translation.py::TestOtherHouses::test_missing_residential_facility_type",
"tests/test_translation.py::TestOtherHouses::test_missing_roof_color",
"tests/test_translation.py::TestOtherHouses::test_missing_roof_type",
"tests/test_translation.py::TestOtherHouses::test_missing_siding",
"tests/test_translation.py::TestOtherHouses::test_missing_skylight_area",
"tests/test_translation.py::TestOtherHouses::test_missing_surroundings",
"tests/test_translation.py::TestOtherHouses::test_missing_water_heater",
"tests/test_translation.py::TestOtherHouses::test_missing_window_area",
"tests/test_translation.py::TestOtherHouses::test_missing_window_orientation",
"tests/test_translation.py::TestOtherHouses::test_only_duct_system_per_heating_sys",
"tests/test_translation.py::TestOtherHouses::test_ove_low_r",
"tests/test_translation.py::TestOtherHouses::test_preconstruction_event_type",
"tests/test_translation.py::TestOtherHouses::test_radiant_barrier",
"tests/test_translation.py::TestOtherHouses::test_siding_cmu_fail",
"tests/test_translation.py::TestOtherHouses::test_siding_fail2",
"tests/test_translation.py::TestOtherHouses::test_slab_missing",
"tests/test_translation.py::TestOtherHouses::test_tankless_coil_dhw_error",
"tests/test_translation.py::TestOtherHouses::test_too_many_duct_systems",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls_all_same",
"tests/test_translation.py::TestOtherHouses::test_townhouse_walls_conflict",
"tests/test_translation.py::TestOtherHouses::test_townhouse_window_fail",
"tests/test_translation.py::TestOtherHouses::test_townhouse_window_wall_all_same_fail",
"tests/test_translation.py::TestOtherHouses::test_townhouse_windows_area_wrong",
"tests/test_translation.py::TestOtherHouses::test_wall_construction_ps_low_r",
"tests/test_translation.py::TestOtherHouses::test_wall_insulation_layer_missing_rvalue",
"tests/test_translation.py::TestOtherHouses::test_wall_same_area_same_side_different_construction",
"tests/test_translation.py::TestOtherHouses::test_window_area_sum_on_angled_front_door",
"tests/test_translation.py::TestOtherHouses::test_window_attached_to_wall",
"tests/test_translation.py::TestOtherHouses::test_window_only_attached_to_foundation_wall",
"tests/test_translation.py::TestOtherHouses::test_wood_stove",
"tests/test_translation.py::TestOtherHouses::test_wood_stove_invalid_fuel_type",
"tests/test_translation.py::TestOtherHouses::test_zipcode_missing",
"tests/test_translation.py::TestInputOutOfBounds::test_assessment_date1",
"tests/test_translation.py::TestInputOutOfBounds::test_assessment_date2",
"tests/test_translation.py::TestInputOutOfBounds::test_conditioned_floor_area1",
"tests/test_translation.py::TestInputOutOfBounds::test_conditioned_floor_area2",
"tests/test_translation.py::TestInputOutOfBounds::test_cooling_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_cooling_system_not_requiring_ducts",
"tests/test_translation.py::TestInputOutOfBounds::test_cooling_year",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_heat_pump_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_storage_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_dhw_year",
"tests/test_translation.py::TestInputOutOfBounds::test_envelope_leakage",
"tests/test_translation.py::TestInputOutOfBounds::test_evap_cooler_missing_efficiency",
"tests/test_translation.py::TestInputOutOfBounds::test_floor_to_ceiling_height1",
"tests/test_translation.py::TestInputOutOfBounds::test_floor_to_ceiling_height2",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_furnace",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_gchp",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_efficiency_heat_pump",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_system_not_requiring_ducts",
"tests/test_translation.py::TestInputOutOfBounds::test_heating_year",
"tests/test_translation.py::TestInputOutOfBounds::test_num_floor_above_grade",
"tests/test_translation.py::TestInputOutOfBounds::test_number_bedrooms",
"tests/test_translation.py::TestInputOutOfBounds::test_skylight_area",
"tests/test_translation.py::TestInputOutOfBounds::test_skylight_u_value",
"tests/test_translation.py::TestInputOutOfBounds::test_window_area",
"tests/test_translation.py::TestInputOutOfBounds::test_window_u_value",
"tests/test_translation.py::TestInputOutOfBounds::test_year_built1",
"tests/test_translation.py::TestInputOutOfBounds::test_year_built2",
"tests/test_translation.py::TestHVACFractions::test_allow_5pct_diff",
"tests/test_translation.py::TestHVACFractions::test_boiler_roomac",
"tests/test_translation.py::TestHVACFractions::test_different_weighting_factors",
"tests/test_translation.py::TestHVACFractions::test_furnace_baseboard_centralac",
"tests/test_translation.py::TestHVACFractions::test_furnace_heat_pump",
"tests/test_translation.py::TestHVACFractions::test_wall_furnace_baseboard_centralac",
"tests/test_translation.py::TestPhotovoltaics::test_azimuth_orientation_missing",
"tests/test_translation.py::TestPhotovoltaics::test_capacity_missing",
"tests/test_translation.py::TestPhotovoltaics::test_collector_area",
"tests/test_translation.py::TestPhotovoltaics::test_n_panels",
"tests/test_translation.py::TestPhotovoltaics::test_orientation",
"tests/test_translation.py::TestPhotovoltaics::test_pv",
"tests/test_translation.py::TestPhotovoltaics::test_tilt_missing",
"tests/test_translation.py::TestPhotovoltaics::test_two_sys_avg",
"tests/test_translation.py::TestPhotovoltaics::test_two_sys_different_capacity_error",
"tests/test_translation.py::TestPhotovoltaics::test_years_missing",
"tests/test_translation.py::TestDuctLocations::test_exterior_wall",
"tests/test_translation.py::TestDuctLocations::test_outside",
"tests/test_translation.py::TestDuctLocations::test_outside_v2",
"tests/test_translation.py::TestDuctLocations::test_roof_deck",
"tests/test_translation.py::TestDuctLocations::test_under_slab",
"tests/test_translation.py::TestHPXMLVersion2Point3::test_floor_furnace",
"tests/test_translation.py::TestHPXMLVersion2Point3::test_medium_dark_roof_color",
"tests/test_translation.py::TestHPXMLVersion2Point3::test_roof_absorptance",
"tests/test_translation.py::TestHEScore2019Updates::test_bldg_about_comment",
"tests/test_translation.py::TestHEScore2019Updates::test_conditioned_attic",
"tests/test_translation.py::TestHEScore2019Updates::test_duct_location_validation",
"tests/test_translation.py::TestHEScore2019Updates::test_ducted_hvac_combinations",
"tests/test_translation.py::TestHEScore2019Updates::test_ductless_hvac_combinations",
"tests/test_translation.py::TestHEScore2019Updates::test_hpwes",
"tests/test_translation.py::TestHEScore2019Updates::test_hpwes_fail",
"tests/test_translation.py::TestHEScore2019Updates::test_mini_split_cooling_only",
"tests/test_translation.py::TestHEScore2019Updates::test_skylight_solar_screens_exteriorshading",
"tests/test_translation.py::TestHEScore2019Updates::test_skylight_solar_screens_treatments",
"tests/test_translation.py::TestHEScore2019Updates::test_tankless",
"tests/test_translation.py::TestHEScore2019Updates::test_tankless_energyfactorerror",
"tests/test_translation.py::TestHEScore2019Updates::test_uef_over_ef",
"tests/test_translation.py::TestHEScore2019Updates::test_uef_with_tankless",
"tests/test_translation.py::TestHEScore2019Updates::test_window_code_mappings_aluminum",
"tests/test_translation.py::TestHEScore2019Updates::test_window_solar_screens",
"tests/test_translation.py::TestHEScore2021Updates::test_hpxmlv2_garage_duct_location",
"tests/test_translation.py::TestHEScore2021Updates::test_skylight_assignment",
"tests/test_translation.py::TestHEScore2021Updates::test_xps_negative",
"tests/test_translation.py::TestHEScore2021Updates::test_zip_plus4",
"tests/test_translation.py::TestHEScoreV3::test_air_sealed_enclosure",
"tests/test_translation.py::TestHEScoreV3::test_attic_roof_unattached",
"tests/test_translation.py::TestHEScoreV3::test_attic_type",
"tests/test_translation.py::TestHEScoreV3::test_attic_with_multiple_frame_floors",
"tests/test_translation.py::TestHEScoreV3::test_attic_with_multiple_roofs",
"tests/test_translation.py::TestHEScoreV3::test_hescore_min_translation",
"tests/test_translation.py::TestHEScoreV3::test_hescore_min_v3",
"tests/test_translation.py::TestHEScoreV3::test_house1_translation",
"tests/test_translation.py::TestHEScoreV3::test_house2_translation",
"tests/test_translation.py::TestHEScoreV3::test_house3_translation",
"tests/test_translation.py::TestHEScoreV3::test_house4_translation",
"tests/test_translation.py::TestHEScoreV3::test_house5_translation",
"tests/test_translation.py::TestHEScoreV3::test_house6_translation",
"tests/test_translation.py::TestHEScoreV3::test_house7_translation",
"tests/test_translation.py::TestHEScoreV3::test_house8_translation",
"tests/test_translation.py::TestHEScoreV3::test_house9",
"tests/test_translation.py::TestHEScoreV3::test_mini_split_cooling_only",
"tests/test_translation.py::TestHEScoreV3::test_townhouse_walls_translation",
"tests/test_translation.py::TestHEScoreV3::test_v3_duct_insulation",
"tests/test_translation.py::TestHEScoreV3::test_v3_duct_location"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2022-05-31T19:38:42Z" | bsd-2-clause |
|
NREL__hescore-hpxml-98 | diff --git a/.circleci/config.yml b/.circleci/config.yml
index 0a4f5cf6..483e6512 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -11,7 +11,7 @@ jobs:
pip install --user virtualenv
python -m virtualenv env
source env/bin/activate
- pip install --progress-bar off .[dev]
+ pip install --progress-bar off .[test]
- run: &unittests
name: Run Unittests
command: |
diff --git a/docs/source/translation/zone_roof.rst b/docs/source/translation/zone_roof.rst
index b4f1e7df..4e1ed757 100644
--- a/docs/source/translation/zone_roof.rst
+++ b/docs/source/translation/zone_roof.rst
@@ -30,16 +30,19 @@ type according to the following mapping.
unvented attic vented_attic
vented attic vented_attic
venting unknown attic vented_attic
- other *not translated*
+ other *see note below*
===================== ================
-.. warning::
+.. note::
- There is no way to get a HEScore ``cond_attic``.
+ Currently, there's no existing HPXML element capturing a conditioned attic.
+ The only way to model a HEScore ``cond_attic`` is to specify HPXML Attic Type
+ to be ``other`` with an extra element ``Attic/extension/Conditioned`` to be
+ ``true``.
+
+ Otherwise, HPXML Attic Type ``other`` will not be translated and will
+ result in a translation error.
-.. note::
-
- Items that are *not translated* will result in a translation error.
HEScore can accept up to two attic/roof constructions. If there are more than
two specified in HPXML, the properties of the ``Attic`` elements with
diff --git a/hescorehpxml/__init__.py b/hescorehpxml/__init__.py
index 056928d6..306fcba0 100644
--- a/hescorehpxml/__init__.py
+++ b/hescorehpxml/__init__.py
@@ -923,9 +923,13 @@ class HPXMLtoHEScoreTranslator(object):
hpxml_attic_type = xpath(attic, 'h:AtticType/text()')
atticd['rooftype'] = rooftypemap[hpxml_attic_type]
if atticd['rooftype'] is None:
- raise TranslationError(
- 'Attic {}: Cannot translate HPXML AtticType {} to HEScore rooftype.'.format(atticid,
- hpxml_attic_type))
+ attc_is_cond = xpath(attic, 'h:extension/h:Conditioned/text()')
+ if attc_is_cond == 'true':
+ atticd['rooftype'] = 'cond_attic'
+ else:
+ raise TranslationError(
+ 'Attic {}: Cannot translate HPXML AtticType {} to HEScore rooftype.'.format(atticid,
+ hpxml_attic_type))
# Roof color
solar_absorptance = convert_to_type(float, xpath(roof, 'h:SolarAbsorptance/text()'))
diff --git a/setup.py b/setup.py
index f3143adf..5629d1aa 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@ with open(path.join(here, 'README.md'), encoding='utf-8') as f:
setup(
name='hescore-hpxml',
- version='5.0.0',
+ version='5.0.2',
description='HPXML Translator for the HEScore API',
long_description=long_description,
long_description_content_type='text/markdown',
@@ -35,7 +35,10 @@ setup(
],
keywords='home energy score hescore doe nrel',
packages=['hescorehpxml'],
- install_requires=['lxml'],
+ install_requires=[
+ 'lxml',
+ 'future',
+ ],
extras_require={
'dev': [
'flake8',
@@ -43,7 +46,13 @@ setup(
'sphinx',
'sphinx_rtd_theme',
'sphinx-autobuild',
- 'future'
+ ],
+ 'test': [
+ 'flake8',
+ 'coverage',
+ 'sphinx',
+ 'sphinx_rtd_theme',
+ 'sphinx-autobuild',
]
},
include_package_data=True,
| NREL/hescore-hpxml | cb48cf3a4bdcdbc0951186dd59b0e5e64c2c3d67 | diff --git a/tests/tests.py b/tests/tests.py
index 13fff2ac..ccc81832 100644
--- a/tests/tests.py
+++ b/tests/tests.py
@@ -1970,6 +1970,33 @@ class TestHEScore2019Updates(unittest.TestCase, ComparatorBase):
self.assertEqual(system['type'], 'tankless')
self.assertAlmostEqual(system['energy_factor'], 0.7)
+ def test_conditioned_attic(self):
+ tr = self._load_xmlfile('house4')
+ attic = self.xpath('//h:Attic[h:SystemIdentifier/@id="attic1"]')
+ attic_type = self.xpath('//h:Attic[h:SystemIdentifier/@id="attic1"]/h:AtticType')
+ attic_type.text = 'other'
+ self.assertRaisesRegexp(
+ TranslationError,
+ r'Attic attic1: Cannot translate HPXML AtticType other to HEScore rooftype.',
+ tr.hpxml_to_hescore_dict
+ )
+ is_attic_cond = etree.SubElement(etree.SubElement(attic, tr.addns('h:extension')), tr.addns('h:Conditioned'))
+ is_attic_cond.text = 'true'
+ d = tr.hpxml_to_hescore_dict()
+ roof_type = d['building']['zone']['zone_roof'][0]['roof_type']
+ self.assertEqual(roof_type, 'cond_attic')
+ is_attic_cond.text = 'false'
+ self.assertRaisesRegexp(
+ TranslationError,
+ r'Attic \w+: Cannot translate HPXML AtticType other to HEScore rooftype.',
+ tr.hpxml_to_hescore_dict
+ )
+ attic_type.text = 'vented attic'
+ is_attic_cond.text = 'true'
+ d = tr.hpxml_to_hescore_dict()
+ roof_type = d['building']['zone']['zone_roof'][0]['roof_type']
+ self.assertEqual(roof_type, 'vented_attic')
+
if __name__ == "__main__":
unittest.main()
| Workaround for conditioned attics in HPXML 2.3
There is [no way to translate a conditioned attic](https://hescore-hpxml.readthedocs.io/en/latest/translation/zone_roof.html#id1) from HPXML 2.3. It [will be possible](https://hpxml.nrel.gov/datadictionary/3/Building/BuildingDetails/Enclosure/Attics/Attic/AtticType/Attic/Conditioned) when we move to HPXML 3. We should include an extension to allow people to get that in v2.3.
Let's do this as follows: To get a conditioned attic in HEScore, you set `Attic/AtticType` to "other" and then `Attic/extension/Conditioned` to "true". If it meets those two criteria, then translate as `cond_attic` in HEScore. If not that act as it normally does (i.e. "other" will cause the translation to fail.).
To Do:
- [ ] Update translation
- [ ] Write test of new capability
- [ ] Update docs to reflect change in behavior | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/tests.py::TestHEScore2019Updates::test_conditioned_attic"
] | [
"tests/tests.py::TestAPIHouses::test_house1",
"tests/tests.py::TestAPIHouses::test_house1_v1_1",
"tests/tests.py::TestAPIHouses::test_house1_v2",
"tests/tests.py::TestAPIHouses::test_house1_v2_1",
"tests/tests.py::TestAPIHouses::test_house2",
"tests/tests.py::TestAPIHouses::test_house3",
"tests/tests.py::TestAPIHouses::test_house4",
"tests/tests.py::TestAPIHouses::test_house5",
"tests/tests.py::TestAPIHouses::test_house6",
"tests/tests.py::TestAPIHouses::test_house7",
"tests/tests.py::TestAPIHouses::test_house8",
"tests/tests.py::TestOtherHouses::test_air_source_heat_pump_has_no_ducts",
"tests/tests.py::TestOtherHouses::test_attic_knee_wall",
"tests/tests.py::TestOtherHouses::test_attic_roof_assoc",
"tests/tests.py::TestOtherHouses::test_bad_duct_location",
"tests/tests.py::TestOtherHouses::test_bldgid_not_found",
"tests/tests.py::TestOtherHouses::test_clg_sys_has_air_dist",
"tests/tests.py::TestOtherHouses::test_cooling_system_wrong_efficiency_type",
"tests/tests.py::TestOtherHouses::test_dist_sys_idref",
"tests/tests.py::TestOtherHouses::test_evap_cooling_system_type",
"tests/tests.py::TestOtherHouses::test_external_id_extension_passthru",
"tests/tests.py::TestOtherHouses::test_external_id_passthru",
"tests/tests.py::TestOtherHouses::test_extra_roof_sheathing_insulation",
"tests/tests.py::TestOtherHouses::test_extra_wall_sheathing_insulation",
"tests/tests.py::TestOtherHouses::test_floor_no_area",
"tests/tests.py::TestOtherHouses::test_foundation_walls_on_slab",
"tests/tests.py::TestOtherHouses::test_heating_system_no_efficiency",
"tests/tests.py::TestOtherHouses::test_heating_system_wrong_efficiency_type",
"tests/tests.py::TestOtherHouses::test_heatpump_no_cooling",
"tests/tests.py::TestOtherHouses::test_heatpump_no_heating",
"tests/tests.py::TestOtherHouses::test_hescore_min",
"tests/tests.py::TestOtherHouses::test_htg_sys_has_air_dist",
"tests/tests.py::TestOtherHouses::test_hvac_fractions_sum_to_one",
"tests/tests.py::TestOtherHouses::test_impossible_cooling_system_type",
"tests/tests.py::TestOtherHouses::test_impossible_heating_system_type",
"tests/tests.py::TestOtherHouses::test_impossible_triple_pane_window",
"tests/tests.py::TestOtherHouses::test_impossible_window",
"tests/tests.py::TestOtherHouses::test_indirect_dhw_error",
"tests/tests.py::TestOtherHouses::test_invalid_attic_type",
"tests/tests.py::TestOtherHouses::test_invalid_residential_faciliy_type",
"tests/tests.py::TestOtherHouses::test_invalid_roof_type",
"tests/tests.py::TestOtherHouses::test_invalid_surroundings",
"tests/tests.py::TestOtherHouses::test_log_wall_fail",
"tests/tests.py::TestOtherHouses::test_mentor_extension",
"tests/tests.py::TestOtherHouses::test_missing_attached_to_roof",
"tests/tests.py::TestOtherHouses::test_missing_cooling_system",
"tests/tests.py::TestOtherHouses::test_missing_cooling_weighting_factor",
"tests/tests.py::TestOtherHouses::test_missing_heating_system",
"tests/tests.py::TestOtherHouses::test_missing_heating_weighting_factor",
"tests/tests.py::TestOtherHouses::test_missing_residential_facility_type",
"tests/tests.py::TestOtherHouses::test_missing_roof_color",
"tests/tests.py::TestOtherHouses::test_missing_roof_type",
"tests/tests.py::TestOtherHouses::test_missing_siding",
"tests/tests.py::TestOtherHouses::test_missing_skylight_area",
"tests/tests.py::TestOtherHouses::test_missing_surroundings",
"tests/tests.py::TestOtherHouses::test_missing_water_heater",
"tests/tests.py::TestOtherHouses::test_missing_window_area",
"tests/tests.py::TestOtherHouses::test_missing_window_orientation",
"tests/tests.py::TestOtherHouses::test_only_duct_system_per_heating_sys",
"tests/tests.py::TestOtherHouses::test_ove_low_r",
"tests/tests.py::TestOtherHouses::test_preconstruction_event_type",
"tests/tests.py::TestOtherHouses::test_siding_cmu_fail",
"tests/tests.py::TestOtherHouses::test_siding_fail2",
"tests/tests.py::TestOtherHouses::test_tankless_coil_dhw_error",
"tests/tests.py::TestOtherHouses::test_too_many_duct_systems",
"tests/tests.py::TestOtherHouses::test_townhouse_walls",
"tests/tests.py::TestOtherHouses::test_townhouse_walls_all_same",
"tests/tests.py::TestOtherHouses::test_townhouse_walls_conflict",
"tests/tests.py::TestOtherHouses::test_townhouse_window_fail",
"tests/tests.py::TestOtherHouses::test_townhouse_window_wall_all_same_fail",
"tests/tests.py::TestOtherHouses::test_townhouse_windows_area_wrong",
"tests/tests.py::TestOtherHouses::test_wall_construction_ps_low_r",
"tests/tests.py::TestOtherHouses::test_wall_insulation_layer_missing_rvalue",
"tests/tests.py::TestOtherHouses::test_wall_same_area_same_side_different_construction",
"tests/tests.py::TestOtherHouses::test_window_area_sum_on_angled_front_door",
"tests/tests.py::TestOtherHouses::test_window_attached_to_wall",
"tests/tests.py::TestOtherHouses::test_wood_stove",
"tests/tests.py::TestOtherHouses::test_wood_stove_invalid_fuel_type",
"tests/tests.py::TestOtherHouses::test_zipcode_missing",
"tests/tests.py::TestInputOutOfBounds::test_assessment_date1",
"tests/tests.py::TestInputOutOfBounds::test_assessment_date2",
"tests/tests.py::TestInputOutOfBounds::test_conditioned_floor_area1",
"tests/tests.py::TestInputOutOfBounds::test_conditioned_floor_area2",
"tests/tests.py::TestInputOutOfBounds::test_cooling_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_cooling_year",
"tests/tests.py::TestInputOutOfBounds::test_dhw_heat_pump_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_dhw_storage_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_dhw_year",
"tests/tests.py::TestInputOutOfBounds::test_envelope_leakage",
"tests/tests.py::TestInputOutOfBounds::test_evap_cooler_missing_efficiency",
"tests/tests.py::TestInputOutOfBounds::test_floor_to_ceiling_height1",
"tests/tests.py::TestInputOutOfBounds::test_floor_to_ceiling_height2",
"tests/tests.py::TestInputOutOfBounds::test_heating_efficiency_furnace",
"tests/tests.py::TestInputOutOfBounds::test_heating_efficiency_gchp",
"tests/tests.py::TestInputOutOfBounds::test_heating_efficiency_heat_pump",
"tests/tests.py::TestInputOutOfBounds::test_heating_year",
"tests/tests.py::TestInputOutOfBounds::test_num_floor_above_grade",
"tests/tests.py::TestInputOutOfBounds::test_skylight_area",
"tests/tests.py::TestInputOutOfBounds::test_skylight_u_value",
"tests/tests.py::TestInputOutOfBounds::test_window_area",
"tests/tests.py::TestInputOutOfBounds::test_window_u_value",
"tests/tests.py::TestInputOutOfBounds::test_year_built1",
"tests/tests.py::TestInputOutOfBounds::test_year_built2",
"tests/tests.py::TestHVACFractions::test_allow_5pct_diff",
"tests/tests.py::TestHVACFractions::test_boiler_roomac",
"tests/tests.py::TestHVACFractions::test_furnace_baseboard_centralac",
"tests/tests.py::TestHVACFractions::test_furnace_heat_pump",
"tests/tests.py::TestHVACFractions::test_wall_furnace_baseboard_centralac",
"tests/tests.py::TestPhotovoltaics::test_azimuth_orientation_missing",
"tests/tests.py::TestPhotovoltaics::test_capacity_missing",
"tests/tests.py::TestPhotovoltaics::test_collector_area",
"tests/tests.py::TestPhotovoltaics::test_orientation",
"tests/tests.py::TestPhotovoltaics::test_pv",
"tests/tests.py::TestPhotovoltaics::test_two_sys_avg",
"tests/tests.py::TestPhotovoltaics::test_two_sys_different_capacity_error",
"tests/tests.py::TestPhotovoltaics::test_years_missing",
"tests/tests.py::TesHPXMLVersion2Point3::test_floor_furnace",
"tests/tests.py::TesHPXMLVersion2Point3::test_medium_dark_roof_color",
"tests/tests.py::TesHPXMLVersion2Point3::test_roof_absorptance",
"tests/tests.py::TestHEScore2019Updates::test_bldg_about_comment",
"tests/tests.py::TestHEScore2019Updates::test_duct_location_validation",
"tests/tests.py::TestHEScore2019Updates::test_hvac_combinations",
"tests/tests.py::TestHEScore2019Updates::test_skylight_solar_screens_exteriorshading",
"tests/tests.py::TestHEScore2019Updates::test_skylight_solar_screens_treatments",
"tests/tests.py::TestHEScore2019Updates::test_tankless",
"tests/tests.py::TestHEScore2019Updates::test_tankless_energyfactorerror",
"tests/tests.py::TestHEScore2019Updates::test_uef_over_ef",
"tests/tests.py::TestHEScore2019Updates::test_uef_with_tankless",
"tests/tests.py::TestHEScore2019Updates::test_window_solar_screens"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2019-07-24T19:57:16Z" | bsd-2-clause |
|
NVIDIA__NVFlare-56 | diff --git a/nvflare/apis/analytix.py b/nvflare/apis/analytix.py
index b99c10f3..26a6423b 100644
--- a/nvflare/apis/analytix.py
+++ b/nvflare/apis/analytix.py
@@ -17,8 +17,8 @@ from typing import Optional
from nvflare.apis.dxo import DXO, DataKind
-DATA_TYPE_KEY = "analytics_data_type"
-KWARGS_KEY = "analytics_kwargs"
+_DATA_TYPE_KEY = "analytics_data_type"
+_KWARGS_KEY = "analytics_kwargs"
class AnalyticsDataType(Enum):
@@ -29,7 +29,7 @@ class AnalyticsDataType(Enum):
class AnalyticsData:
- def __init__(self, tag: str, value, data_type: AnalyticsDataType, kwargs: Optional[dict]):
+ def __init__(self, tag: str, value, data_type: AnalyticsDataType, kwargs: Optional[dict] = None):
"""This class defines AnalyticsData format.
It is a wrapper to provide from / to DXO conversion.
@@ -39,6 +39,18 @@ class AnalyticsData:
data_type (AnalyticDataType): type of the analytic data.
kwargs (optional, dict): additional arguments to be passed.
"""
+ if not isinstance(tag, str):
+ raise TypeError(f"expect tag to be an instance of str, but got {type(tag)}.")
+ if not isinstance(data_type, AnalyticsDataType):
+ raise TypeError(f"expect data_type to be an instance of AnalyticsDataType, but got {type(data_type)}.")
+ if kwargs and not isinstance(kwargs, dict):
+ raise TypeError(f"expect kwargs to be an instance of dict, but got {type(kwargs)}.")
+ if data_type == AnalyticsDataType.SCALAR and not isinstance(value, float):
+ raise TypeError(f"expect value to be an instance of float, but got {type(value)}")
+ elif data_type == AnalyticsDataType.SCALARS and not isinstance(value, dict):
+ raise TypeError(f"expect value to be an instance of dict, but got {type(value)}")
+ elif data_type == AnalyticsDataType.TEXT and not isinstance(value, str):
+ raise TypeError(f"expect value to be an instance of str, but got {type(value)}")
self.tag = tag
self.value = value
self.data_type = data_type
@@ -47,8 +59,8 @@ class AnalyticsData:
def to_dxo(self):
"""Converts the AnalyticsData to DXO object."""
dxo = DXO(data_kind=DataKind.ANALYTIC, data={self.tag: self.value})
- dxo.set_meta_prop(DATA_TYPE_KEY, self.data_type)
- dxo.set_meta_prop(KWARGS_KEY, self.kwargs)
+ dxo.set_meta_prop(_DATA_TYPE_KEY, self.data_type)
+ dxo.set_meta_prop(_KWARGS_KEY, self.kwargs)
return dxo
@classmethod
@@ -59,16 +71,14 @@ class AnalyticsData:
dxo (DXO): The DXO object to convert.
"""
if not isinstance(dxo, DXO):
- raise TypeError(f"dxo is not of type DXO, instead it has type {type(dxo)}.")
+ raise TypeError(f"expect dxo to be an instance of DXO, but got {type(dxo)}.")
if len(dxo.data) != 1:
raise ValueError("dxo does not have the correct format for AnalyticsData.")
tag, value = list(dxo.data.items())[0]
- data_type = dxo.get_meta_prop(DATA_TYPE_KEY)
- kwargs = dxo.get_meta_prop(KWARGS_KEY)
- if not isinstance(data_type, AnalyticsDataType):
- raise ValueError(f"data_type {data_type} is not supported.")
+ data_type = dxo.get_meta_prop(_DATA_TYPE_KEY)
+ kwargs = dxo.get_meta_prop(_KWARGS_KEY)
return cls(tag, value, data_type, kwargs)
diff --git a/nvflare/app_common/widgets/streaming.py b/nvflare/app_common/widgets/streaming.py
index 588f381e..dcd82aaa 100644
--- a/nvflare/app_common/widgets/streaming.py
+++ b/nvflare/app_common/widgets/streaming.py
@@ -45,11 +45,11 @@ def send_analytic_dxo(comp: FLComponent, dxo: DXO, fl_ctx: FLContext, event_type
event_type (str): Event type.
"""
if not isinstance(comp, FLComponent):
- raise TypeError("expect comp to be FLComponent, but got {}".format(type(fl_ctx)))
+ raise TypeError(f"expect comp to be an instance of FLComponent, but got {type(comp)}")
if not isinstance(dxo, DXO):
- raise TypeError("expect fl_ctx to be FLContext, but got {}".format(type(fl_ctx)))
+ raise TypeError(f"expect dxo to be an instance of DXO, but got {type(dxo)}")
if not isinstance(fl_ctx, FLContext):
- raise TypeError("expect fl_ctx to be FLContext, but got {}".format(type(fl_ctx)))
+ raise TypeError(f"expect fl_ctx to be an instance of FLContext, but got {type(fl_ctx)}")
fl_ctx.set_prop(key=FLContextKey.EVENT_DATA, value=dxo.to_shareable(), private=True, sticky=False)
comp.fire_event(event_type=event_type, fl_ctx=fl_ctx)
@@ -117,7 +117,7 @@ class AnalyticsSender(Widget):
"""Sends analytics data.
This class implements some common methods follows signatures from PyTorch SummaryWriter and Python logger.
- It provides a convenient way for LearnerService to use.
+ It provides a convenient way for Learner to use.
"""
super().__init__()
self.engine = None
@@ -126,50 +126,66 @@ class AnalyticsSender(Widget):
if event_type == EventType.START_RUN:
self.engine = fl_ctx.get_engine()
- def _add(self, tag: str, value, data_type: AnalyticsDataType, kwargs: Optional[dict] = None):
+ def _add(
+ self,
+ tag: str,
+ value,
+ data_type: AnalyticsDataType,
+ global_step: Optional[int] = None,
+ kwargs: Optional[dict] = None,
+ ):
+ kwargs = kwargs if kwargs else {}
+ if global_step:
+ if not isinstance(global_step, int):
+ raise TypeError(f"Expect global step to be an instance of int, but got {type(global_step)}")
+ kwargs["global_step"] = global_step
dxo = _write(tag=tag, value=value, data_type=data_type, kwargs=kwargs)
with self.engine.new_context() as fl_ctx:
send_analytic_dxo(self, dxo=dxo, fl_ctx=fl_ctx)
- def add_scalar(self, tag: str, scalar: float, **kwargs):
+ def add_scalar(self, tag: str, scalar: float, global_step: Optional[int] = None, **kwargs):
"""Sends a scalar.
Args:
tag (str): Data identifier.
scalar (float): Value to send.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=scalar, data_type=AnalyticsDataType.SCALAR, kwargs=kwargs)
+ self._add(tag=tag, value=scalar, data_type=AnalyticsDataType.SCALAR, global_step=global_step, kwargs=kwargs)
- def add_scalars(self, tag: str, scalars: dict, **kwargs):
+ def add_scalars(self, tag: str, scalars: dict, global_step: Optional[int] = None, **kwargs):
"""Sends scalars.
Args:
tag (str): The parent name for the tags.
scalars (dict): Key-value pair storing the tag and corresponding values.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=scalars, data_type=AnalyticsDataType.SCALARS, kwargs=kwargs)
+ self._add(tag=tag, value=scalars, data_type=AnalyticsDataType.SCALARS, global_step=global_step, kwargs=kwargs)
- def add_text(self, tag: str, text: str, **kwargs):
+ def add_text(self, tag: str, text: str, global_step: Optional[int] = None, **kwargs):
"""Sends a text.
Args:
tag (str): Data identifier.
text (str): String to send.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=text, data_type=AnalyticsDataType.TEXT, kwargs=kwargs)
+ self._add(tag=tag, value=text, data_type=AnalyticsDataType.TEXT, global_step=global_step, kwargs=kwargs)
- def add_image(self, tag: str, image, **kwargs):
+ def add_image(self, tag: str, image, global_step: Optional[int] = None, **kwargs):
"""Sends an image.
Args:
tag (str): Data identifier.
image: Image to send.
+ global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
- self._add(tag=tag, value=image, data_type=AnalyticsDataType.IMAGE, kwargs=kwargs)
+ self._add(tag=tag, value=image, data_type=AnalyticsDataType.IMAGE, global_step=global_step, kwargs=kwargs)
def _log(self, tag: LogMessageTag, msg: str, event_type: str, *args, **kwargs):
"""Logs a message.
@@ -210,6 +226,18 @@ class AnalyticsSender(Widget):
"""Logs a message with tag LogMessageTag.CRITICAL."""
self._log(tag=LogMessageTag.CRITICAL, msg=msg, event_type=_LOG_CRITICAL_EVENT_TYPE, args=args, kwargs=kwargs)
+ def flush(self):
+ """Flushes out the message.
+
+ This is doing nothing, it is defined for mimic the PyTorch SummaryWriter behavior.
+ """
+ pass
+
+ def close(self):
+ """Close resources."""
+ if self.engine:
+ self.engine = None
+
class AnalyticsReceiver(Widget, ABC):
def __init__(self, events: Optional[List[str]] = None):
@@ -223,6 +251,7 @@ class AnalyticsReceiver(Widget, ABC):
events = [_ANALYTIC_EVENT_TYPE, f"fed.{_ANALYTIC_EVENT_TYPE}"]
self.events = events
self._save_lock = Lock()
+ self._end = False
@abstractmethod
def initialize(self, fl_ctx: FLContext):
@@ -250,32 +279,34 @@ class AnalyticsReceiver(Widget, ABC):
Args:
fl_ctx (FLContext): fl context.
-
"""
pass
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
- elif event_type in self.events:
+ elif event_type in self.events and not self._end:
data = fl_ctx.get_prop(FLContextKey.EVENT_DATA, None)
if data is None:
- self.log_error(fl_ctx, "Missing event data.")
+ self.log_error(fl_ctx, "Missing event data.", fire_event=False)
return
if not isinstance(data, Shareable):
- self.log_error(fl_ctx, f"Expect shareable but get {type(data)}")
+ self.log_error(
+ fl_ctx, f"Expect data to be an instance of shareable but get {type(data)}", fire_event=False
+ )
return
- record_origin = fl_ctx.get_identity_name()
# if fed event use peer name to save
if fl_ctx.get_prop(FLContextKey.EVENT_SCOPE) == EventScope.FEDERATION:
- peer_name = data.get_peer_prop(ReservedKey.IDENTITY_NAME, None)
- record_origin = peer_name
+ record_origin = data.get_peer_prop(ReservedKey.IDENTITY_NAME, None)
+ else:
+ record_origin = fl_ctx.get_identity_name()
if record_origin is None:
- self.log_error(fl_ctx, "record_origin can't be None.")
+ self.log_error(fl_ctx, "record_origin can't be None.", fire_event=False)
return
with self._save_lock:
self.save(shareable=data, fl_ctx=fl_ctx, record_origin=record_origin)
elif event_type == EventType.END_RUN:
+ self._end = True
self.finalize(fl_ctx)
| NVIDIA/NVFlare | 94dfcbbd33db8746297d39c06cfa0955592fd255 | diff --git a/test/test_analytix.py b/test/test_analytix.py
new file mode 100644
index 00000000..1b61d292
--- /dev/null
+++ b/test/test_analytix.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from nvflare.apis.analytix import _DATA_TYPE_KEY, _KWARGS_KEY, AnalyticsData, AnalyticsDataType
+from nvflare.apis.dxo import DXO, DataKind
+
+FROM_DXO_TEST_CASES = [
+ ("hello", 3.0, AnalyticsDataType.SCALAR),
+ ("world", "text", AnalyticsDataType.TEXT),
+ ("dict", {"key": 1.0}, AnalyticsDataType.SCALARS),
+]
+
+TO_DXO_TEST_CASES = [
+ AnalyticsData(tag="hello", value=3.0, data_type=AnalyticsDataType.SCALAR),
+ AnalyticsData(tag="world", value="text", data_type=AnalyticsDataType.TEXT),
+ AnalyticsData(tag="dict", value={"key": 1.0}, data_type=AnalyticsDataType.SCALARS),
+]
+
+FROM_DXO_INVALID_TEST_CASES = [
+ (dict(), TypeError, f"expect dxo to be an instance of DXO, but got {type(dict())}."),
+ (
+ DXO(data_kind=DataKind.WEIGHTS, data={"w": 1.0}),
+ TypeError,
+ f"expect data_type to be an instance of AnalyticsDataType, but got {type(None)}.",
+ ),
+]
+
+INVALID_TEST_CASES = [
+ (
+ dict(),
+ 1.0,
+ AnalyticsDataType.SCALAR,
+ None,
+ TypeError,
+ f"expect tag to be an instance of str, but got {type(dict())}.",
+ ),
+ (
+ "tag",
+ 1.0,
+ "scalar",
+ None,
+ TypeError,
+ f"expect data_type to be an instance of AnalyticsDataType, but got {type('')}.",
+ ),
+ (
+ "tag",
+ 1.0,
+ AnalyticsDataType.SCALAR,
+ [1],
+ TypeError,
+ f"expect kwargs to be an instance of dict, but got {type(list())}.",
+ ),
+]
+
+
+class TestAnalytix:
+ @pytest.mark.parametrize("tag,value,data_type,kwargs,expected_error,expected_msg", INVALID_TEST_CASES)
+ def test_invalid(self, tag, value, data_type, kwargs, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ _ = AnalyticsData(tag=tag, value=value, data_type=data_type, kwargs=kwargs)
+
+ @pytest.mark.parametrize("tag,value,data_type", FROM_DXO_TEST_CASES)
+ def test_from_dxo(self, tag, value, data_type):
+ dxo = DXO(data_kind=DataKind.ANALYTIC, data={tag: value})
+ dxo.set_meta_prop(_DATA_TYPE_KEY, data_type)
+ result = AnalyticsData.from_dxo(dxo)
+ assert result.tag == tag
+ assert result.value == value
+
+ @pytest.mark.parametrize("data", TO_DXO_TEST_CASES)
+ def test_to_dxo(self, data: AnalyticsData):
+ result = data.to_dxo()
+ assert result.data_kind == DataKind.ANALYTIC
+ assert result.data == {data.tag: data.value}
+ assert result.get_meta_prop(_DATA_TYPE_KEY) == data.data_type
+ assert result.get_meta_prop(_KWARGS_KEY) == data.kwargs
+
+ @pytest.mark.parametrize("dxo,expected_error,expected_msg", FROM_DXO_INVALID_TEST_CASES)
+ def test_from_dxo_invalid(self, dxo, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ _ = AnalyticsData.from_dxo(dxo)
diff --git a/test/test_streaming.py b/test/test_streaming.py
new file mode 100644
index 00000000..b1a105c1
--- /dev/null
+++ b/test/test_streaming.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from nvflare.apis.dxo import DXO, DataKind
+from nvflare.apis.fl_component import FLComponent
+from nvflare.apis.fl_context import FLContext
+from nvflare.app_common.widgets.streaming import send_analytic_dxo, write_scalar, write_scalars, write_text, write_image
+
+INVALID_TEST_CASES = [
+ (list(), dict(), FLContext(), TypeError, f"expect comp to be an instance of FLComponent, but got {type(list())}"),
+ (FLComponent(), dict(), FLContext(), TypeError, f"expect dxo to be an instance of DXO, but got {type(dict())}"),
+ (
+ FLComponent(),
+ DXO(data={"k": "v"}, data_kind=DataKind.ANALYTIC),
+ list(),
+ TypeError,
+ f"expect fl_ctx to be an instance of FLContext, but got {type(list())}",
+ ),
+]
+
+INVALID_WRITE_TEST_CASES = [
+ (write_scalar, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+ (write_scalar, "tag", list(), TypeError, f"expect value to be an instance of float, but got {type(list())}"),
+ (write_scalars, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+ (write_scalars, "tag", 1.0, TypeError, f"expect value to be an instance of dict, but got {type(1.0)}"),
+ (write_text, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+ (write_text, "tag", 1.0, TypeError, f"expect value to be an instance of str, but got {type(1.0)}"),
+ (write_image, list(), 1.0, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
+]
+
+
+class TestStreaming:
+ @pytest.mark.parametrize("comp,dxo,fl_ctx,expected_error,expected_msg", INVALID_TEST_CASES)
+ def test_invalid_send_analytic_dxo(self, comp, dxo, fl_ctx, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ send_analytic_dxo(comp=comp, dxo=dxo, fl_ctx=fl_ctx)
+
+ @pytest.mark.parametrize("func,tag,value,expected_error,expected_msg", INVALID_WRITE_TEST_CASES)
+ def test_invalid_write_func(self, func, tag, value, expected_error, expected_msg):
+ with pytest.raises(expected_error, match=expected_msg):
+ func(tag, value)
| Errors in streaming.py
@yanchengnv notice some issues in nvflare/app_common/widgets/streaming.py:
- Line 47 to Line 52, the checking of the args and error messages are wrong.
- All these write_xxx() methods, should check the tag and data arg and make sure they are what we expect (str, dict, …)
- Line 257, in the call self.log_xxx(), we should set send_event=False; otherwise it may cause recursive events
- Since fed events are handled by a separate thread, there is a potential racing condition that a fed event could be fired after END_RUN event. In the Receiver code, we need to make sure to discard other events after END_RUN (and hence finalize) is done. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_analytix.py::TestAnalytix::test_invalid[tag0-1.0-AnalyticsDataType.SCALAR-None-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_invalid[tag-1.0-scalar-None-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_invalid[tag-1.0-AnalyticsDataType.SCALAR-kwargs2-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_from_dxo[hello-3.0-AnalyticsDataType.SCALAR]",
"test/test_analytix.py::TestAnalytix::test_from_dxo[world-text-AnalyticsDataType.TEXT]",
"test/test_analytix.py::TestAnalytix::test_from_dxo[dict-value2-AnalyticsDataType.SCALARS]",
"test/test_analytix.py::TestAnalytix::test_to_dxo[data0]",
"test/test_analytix.py::TestAnalytix::test_to_dxo[data1]",
"test/test_analytix.py::TestAnalytix::test_to_dxo[data2]",
"test/test_analytix.py::TestAnalytix::test_from_dxo_invalid[dxo0-TypeError-expect",
"test/test_analytix.py::TestAnalytix::test_from_dxo_invalid[dxo1-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_send_analytic_dxo[comp0-dxo0-fl_ctx0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_send_analytic_dxo[comp1-dxo1-fl_ctx1-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_send_analytic_dxo[comp2-dxo2-fl_ctx2-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalar-tag0-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalar-tag-value1-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalars-tag2-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_scalars-tag-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_text-tag4-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_text-tag-1.0-TypeError-expect",
"test/test_streaming.py::TestStreaming::test_invalid_write_func[write_image-tag6-1.0-TypeError-expect"
] | [] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2021-12-08T22:42:30Z" | apache-2.0 |
|
NVIDIA__hpc-container-maker-221 | diff --git a/hpccm/building_blocks/netcdf.py b/hpccm/building_blocks/netcdf.py
index c18e907..3b074e9 100644
--- a/hpccm/building_blocks/netcdf.py
+++ b/hpccm/building_blocks/netcdf.py
@@ -122,7 +122,9 @@ class netcdf(bb_base, hpccm.templates.ConfigureMake, hpccm.templates.envvars,
self.configure_opts = kwargs.get('configure_opts', [])
- self.__baseurl = 'ftp://ftp.unidata.ucar.edu/pub/netcdf'
+ self.__c_baseurl = 'https://github.com/Unidata/netcdf-c/archive'
+ self.__cxx_baseurl = 'https://github.com/Unidata/netcdf-cxx4/archive'
+ self.__fortran_baseurl = 'https://github.com/Unidata/netcdf-fortran/archive'
self.__check = kwargs.get('check', False)
self.__cxx = kwargs.get('cxx', True)
self.__fortran = kwargs.get('fortran', True)
@@ -209,13 +211,15 @@ class netcdf(bb_base, hpccm.templates.ConfigureMake, hpccm.templates.envvars,
if not toolchain.LDFLAGS:
toolchain.LDFLAGS = '-L{}/lib'.format(self.__hdf5_dir)
- # Version 4.7.0 changed the package name
- if LooseVersion(self.__version) >= LooseVersion('4.7.0'):
+ # Version 4.3.1 changed the package name
+ if LooseVersion(self.__version) >= LooseVersion('4.3.1'):
pkgname = 'netcdf-c'
+ tarball = 'v{0}.tar.gz'.format(self.__version)
else:
pkgname = 'netcdf'
- tarball = '{0}-{1}.tar.gz'.format(pkgname, self.__version)
- url = '{0}/{1}'.format(self.__baseurl, tarball)
+ tarball = '{0}-{1}.tar.gz'.format(pkgname, self.__version)
+
+ url = '{0}/{1}'.format(self.__c_baseurl, tarball)
# Download source from web
self.__commands.append(self.download_step(url=url,
@@ -257,16 +261,23 @@ class netcdf(bb_base, hpccm.templates.ConfigureMake, hpccm.templates.envvars,
# without impacting the original.
toolchain = _copy(self.__toolchain)
- # Need to tell it where to find NetCDF
+ # Need to tell it where to find NetCDF and HDF5
if not toolchain.CPPFLAGS:
- toolchain.CPPFLAGS = '-I{}/include'.format(self.prefix)
+ toolchain.CPPFLAGS = '-I{0}/include -I{1}/include'.format(self.prefix, self.__hdf5_dir)
if not toolchain.LDFLAGS:
toolchain.LDFLAGS = '-L{}/lib'.format(self.prefix)
if not toolchain.LD_LIBRARY_PATH:
toolchain.LD_LIBRARY_PATH = '{}/lib:$LD_LIBRARY_PATH'.format(self.prefix)
- tarball = '{0}-{1}.tar.gz'.format(pkg, version)
- url = '{0}/{1}'.format(self.__baseurl, tarball)
+ if pkg == 'netcdf-cxx4':
+ baseurl = self.__cxx_baseurl
+ elif pkg == 'netcdf-fortran':
+ baseurl = self.__fortran_baseurl
+ else:
+ raise RuntimeError('unrecognized package name: "{}"'.format(pkg))
+
+ tarball = 'v{0}.tar.gz'.format(version)
+ url = '{0}/{1}'.format(baseurl, tarball)
# Download source from web
self.__commands.append(self.download_step(url=url,
| NVIDIA/hpc-container-maker | 278af2acfae3296ebcdf6b6ab796d54a1ad2f343 | diff --git a/test/test_netcdf.py b/test/test_netcdf.py
index d944a1d..926d484 100644
--- a/test/test_netcdf.py
+++ b/test/test_netcdf.py
@@ -49,24 +49,24 @@ RUN apt-get update -y && \
wget \
zlib1g-dev && \
rm -rf /var/lib/apt/lists/*
-RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-c-4.7.0.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-c-4.7.0.tar.gz -C /var/tmp -z && \
+RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-c/archive/v4.7.0.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.7.0.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-c-4.7.0 && CPPFLAGS=-I/usr/local/hdf5/include LDFLAGS=-L/usr/local/hdf5/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-c-4.7.0.tar.gz /var/tmp/netcdf-c-4.7.0 && \
- mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-cxx4-4.3.0.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-cxx4-4.3.0.tar.gz -C /var/tmp -z && \
- cd /var/tmp/netcdf-cxx4-4.3.0 && CPPFLAGS=-I/usr/local/netcdf/include LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
+ rm -rf /var/tmp/v4.7.0.tar.gz /var/tmp/netcdf-c-4.7.0 && \
+ mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-cxx4/archive/v4.3.0.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.3.0.tar.gz -C /var/tmp -z && \
+ cd /var/tmp/netcdf-cxx4-4.3.0 && CPPFLAGS='-I/usr/local/netcdf/include -I/usr/local/hdf5/include' LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-cxx4-4.3.0.tar.gz /var/tmp/netcdf-cxx4-4.3.0 && \
- mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-fortran-4.4.5.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-fortran-4.4.5.tar.gz -C /var/tmp -z && \
- cd /var/tmp/netcdf-fortran-4.4.5 && CPPFLAGS=-I/usr/local/netcdf/include LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
+ rm -rf /var/tmp/v4.3.0.tar.gz /var/tmp/netcdf-cxx4-4.3.0 && \
+ mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-fortran/archive/v4.4.5.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.4.5.tar.gz -C /var/tmp -z && \
+ cd /var/tmp/netcdf-fortran-4.4.5 && CPPFLAGS='-I/usr/local/netcdf/include -I/usr/local/hdf5/include' LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-fortran-4.4.5.tar.gz /var/tmp/netcdf-fortran-4.4.5
+ rm -rf /var/tmp/v4.4.5.tar.gz /var/tmp/netcdf-fortran-4.4.5
ENV LD_LIBRARY_PATH=/usr/local/netcdf/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/netcdf/bin:$PATH''')
@@ -87,24 +87,24 @@ RUN yum install -y \
wget \
zlib-devel && \
rm -rf /var/cache/yum/*
-RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-c-4.7.0.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-c-4.7.0.tar.gz -C /var/tmp -z && \
+RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-c/archive/v4.7.0.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.7.0.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-c-4.7.0 && CPPFLAGS=-I/usr/local/hdf5/include LDFLAGS=-L/usr/local/hdf5/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-c-4.7.0.tar.gz /var/tmp/netcdf-c-4.7.0 && \
- mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-cxx4-4.3.0.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-cxx4-4.3.0.tar.gz -C /var/tmp -z && \
- cd /var/tmp/netcdf-cxx4-4.3.0 && CPPFLAGS=-I/usr/local/netcdf/include LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
+ rm -rf /var/tmp/v4.7.0.tar.gz /var/tmp/netcdf-c-4.7.0 && \
+ mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-cxx4/archive/v4.3.0.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.3.0.tar.gz -C /var/tmp -z && \
+ cd /var/tmp/netcdf-cxx4-4.3.0 && CPPFLAGS='-I/usr/local/netcdf/include -I/usr/local/hdf5/include' LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-cxx4-4.3.0.tar.gz /var/tmp/netcdf-cxx4-4.3.0 && \
- mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-fortran-4.4.5.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-fortran-4.4.5.tar.gz -C /var/tmp -z && \
- cd /var/tmp/netcdf-fortran-4.4.5 && CPPFLAGS=-I/usr/local/netcdf/include LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
+ rm -rf /var/tmp/v4.3.0.tar.gz /var/tmp/netcdf-cxx4-4.3.0 && \
+ mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-fortran/archive/v4.4.5.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.4.5.tar.gz -C /var/tmp -z && \
+ cd /var/tmp/netcdf-fortran-4.4.5 && CPPFLAGS='-I/usr/local/netcdf/include -I/usr/local/hdf5/include' LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-fortran-4.4.5.tar.gz /var/tmp/netcdf-fortran-4.4.5
+ rm -rf /var/tmp/v4.4.5.tar.gz /var/tmp/netcdf-fortran-4.4.5
ENV LD_LIBRARY_PATH=/usr/local/netcdf/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/netcdf/bin:$PATH''')
@@ -127,25 +127,25 @@ RUN apt-get update -y && \
wget \
zlib1g-dev && \
rm -rf /var/lib/apt/lists/*
-RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.6.1.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-4.6.1.tar.gz -C /var/tmp -z && \
- cd /var/tmp/netcdf-4.6.1 && CPPFLAGS=-I/usr/local/hdf5/include LDFLAGS=-L/usr/local/hdf5/lib ./configure --prefix=/usr/local/netcdf && \
+RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-c/archive/v4.6.1.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.6.1.tar.gz -C /var/tmp -z && \
+ cd /var/tmp/netcdf-c-4.6.1 && CPPFLAGS=-I/usr/local/hdf5/include LDFLAGS=-L/usr/local/hdf5/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
echo "/usr/local/netcdf/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
- rm -rf /var/tmp/netcdf-4.6.1.tar.gz /var/tmp/netcdf-4.6.1 && \
- mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-cxx4-4.3.0.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-cxx4-4.3.0.tar.gz -C /var/tmp -z && \
- cd /var/tmp/netcdf-cxx4-4.3.0 && CPPFLAGS=-I/usr/local/netcdf/include LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
+ rm -rf /var/tmp/v4.6.1.tar.gz /var/tmp/netcdf-c-4.6.1 && \
+ mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-cxx4/archive/v4.3.0.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.3.0.tar.gz -C /var/tmp -z && \
+ cd /var/tmp/netcdf-cxx4-4.3.0 && CPPFLAGS='-I/usr/local/netcdf/include -I/usr/local/hdf5/include' LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-cxx4-4.3.0.tar.gz /var/tmp/netcdf-cxx4-4.3.0 && \
- mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-fortran-4.4.4.tar.gz && \
- mkdir -p /var/tmp && tar -x -f /var/tmp/netcdf-fortran-4.4.4.tar.gz -C /var/tmp -z && \
- cd /var/tmp/netcdf-fortran-4.4.4 && CPPFLAGS=-I/usr/local/netcdf/include LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
+ rm -rf /var/tmp/v4.3.0.tar.gz /var/tmp/netcdf-cxx4-4.3.0 && \
+ mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-fortran/archive/v4.4.4.tar.gz && \
+ mkdir -p /var/tmp && tar -x -f /var/tmp/v4.4.4.tar.gz -C /var/tmp -z && \
+ cd /var/tmp/netcdf-fortran-4.4.4 && CPPFLAGS='-I/usr/local/netcdf/include -I/usr/local/hdf5/include' LD_LIBRARY_PATH='/usr/local/netcdf/lib:$LD_LIBRARY_PATH' LDFLAGS=-L/usr/local/netcdf/lib ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
- rm -rf /var/tmp/netcdf-fortran-4.4.4.tar.gz /var/tmp/netcdf-fortran-4.4.4
+ rm -rf /var/tmp/v4.4.4.tar.gz /var/tmp/netcdf-fortran-4.4.4
ENV PATH=/usr/local/netcdf/bin:$PATH''')
@ubuntu
| NetCDF issues
Hi guys, I'm having troubles installing NetCDF with HPCCM. The issue is that the default version of netcdf-c library is now 4.7.3, and the later releases are not available via ftp anymore, see ftp://ftp.unidata.ucar.edu/pub/netcdf/
Specifying explicit version='4.7.3' works fine.
A different solution would also be to change the FTP web-address to the one directly from github releases (there the version archive is still maintained): https://github.com/Unidata/netcdf-c/releases
Exactly same stuff with versions apply to netcdf-fortran and netcdf-cxx
Cheers! | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_netcdf.py::Test_netcdf::test_defaults_centos",
"test/test_netcdf.py::Test_netcdf::test_defaults_ubuntu",
"test/test_netcdf.py::Test_netcdf::test_ldconfig"
] | [
"test/test_netcdf.py::Test_netcdf::test_runtime"
] | {
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
} | "2020-01-21T15:09:55Z" | apache-2.0 |
|
NVIDIA__hpc-container-maker-311 | diff --git a/docs/building_blocks.md b/docs/building_blocks.md
index e50a59c..8a15602 100644
--- a/docs/building_blocks.md
+++ b/docs/building_blocks.md
@@ -2163,6 +2163,9 @@ include the LLVM compilers when necessary. The default is True.
- __extra_tools__: Boolean flag to specify whether to also install
`clang-format` and `clang-tidy`. The default is False.
+- __openmp__: Boolean flag to specify whether to also install OpenMP
+support. The default is True.
+
- __toolset__: Boolean flag to specify whether to also install the
full LLVM toolset. The default is False.
diff --git a/hpccm/building_blocks/llvm.py b/hpccm/building_blocks/llvm.py
index 5f1d143..2ad8b86 100644
--- a/hpccm/building_blocks/llvm.py
+++ b/hpccm/building_blocks/llvm.py
@@ -51,6 +51,9 @@ class llvm(bb_base, hpccm.templates.envvars):
extra_tools: Boolean flag to specify whether to also install
`clang-format` and `clang-tidy`. The default is False.
+ openmp: Boolean flag to specify whether to also install OpenMP
+ support. The default is True.
+
toolset: Boolean flag to specify whether to also install the
full LLVM toolset. The default is False.
@@ -97,6 +100,7 @@ class llvm(bb_base, hpccm.templates.envvars):
self.__compiler_debs = [] # Filled in below
self.__compiler_rpms = [] # Filled in below
self.__extra_tools = kwargs.get('extra_tools', False)
+ self.__openmp = kwargs.get('openmp', True)
self.__ospackages = kwargs.get('ospackages', []) # Filled in below
self.__runtime_debs = [] # Filled in below
self.__runtime_ospackages = [] # Filled in below
@@ -129,21 +133,26 @@ class llvm(bb_base, hpccm.templates.envvars):
if self.__version:
if LooseVersion(self.__version) <= LooseVersion('6.0'):
+ self.__compiler_debs = ['clang-{}'.format(self.__version)]
+ self.__runtime_debs = [
+ 'libclang1-{}'.format(self.__version)]
+
# Versioned OpenMP libraries do not exist for
# older versions
- self.__compiler_debs = [
- 'clang-{}'.format(self.__version),
- 'libomp-dev']
- self.__runtime_debs = [
- 'libclang1-{}'.format(self.__version),
- 'libomp5']
+ if self.__openmp:
+ self.__compiler_debs.append('libomp-dev')
+ self.__runtime_debs.append('libomp5')
+
else:
- self.__compiler_debs = [
- 'clang-{}'.format(self.__version),
- 'libomp-{}-dev'.format(self.__version)]
+ self.__compiler_debs = ['clang-{}'.format(self.__version)]
self.__runtime_debs = [
- 'libclang1-{}'.format(self.__version),
- 'libomp5-{}'.format(self.__version)]
+ 'libclang1-{}'.format(self.__version)]
+
+ if self.__openmp:
+ self.__compiler_debs.append(
+ 'libomp-{}-dev'.format(self.__version))
+ self.__runtime_debs.append(
+ 'libomp5-{}'.format(self.__version))
if self.__upstream:
# Upstream packages from apt.llvm.org
@@ -196,8 +205,12 @@ class llvm(bb_base, hpccm.templates.envvars):
else:
# Distro default
- self.__compiler_debs = ['clang', 'libomp-dev']
- self.__runtime_debs = ['libclang1', 'libomp5']
+ self.__compiler_debs = ['clang']
+ self.__runtime_debs = ['libclang1']
+
+ if self.__openmp:
+ self.__compiler_debs.append('libomp-dev')
+ self.__runtime_debs.append('libomp5')
if self.__toolset or self.__extra_tools:
self.__compiler_debs.extend(['clang-format', 'clang-tidy'])
@@ -224,10 +237,14 @@ class llvm(bb_base, hpccm.templates.envvars):
if self.__version:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# Multiple versions are not available for CentOS 8
- self.__compiler_rpms = ['clang', 'llvm-libs', 'libomp']
- self.__runtime_rpms = ['llvm-libs', 'libomp']
+ self.__compiler_rpms = ['clang', 'llvm-libs']
+ self.__runtime_rpms = ['llvm-libs']
compiler_version = '8'
+ if self.__openmp:
+ self.__compiler_rpms.append('libomp')
+ self.__runtime_rpms.append('libomp')
+
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('clang-tools-extra')
@@ -236,14 +253,18 @@ class llvm(bb_base, hpccm.templates.envvars):
else:
# CentOS 7
self.__compiler_rpms = [
- 'llvm-toolset-{}-clang'.format(self.__version),
- 'llvm-toolset-{}-libomp-devel'.format(self.__version)]
+ 'llvm-toolset-{}-clang'.format(self.__version)]
self.__runtime_rpms = [
'llvm-toolset-{}-runtime'.format(self.__version),
- 'llvm-toolset-{}-libomp'.format(self.__version),
'llvm-toolset-{}-compiler-rt'.format(self.__version)]
compiler_version = '4.8.2'
+ if self.__openmp:
+ self.__compiler_rpms.append(
+ 'llvm-toolset-{}-libomp-devel'.format(self.__version))
+ self.__runtime_rpms.append(
+ 'llvm-toolset-{}-libomp'.format(self.__version))
+
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('llvm-toolset-{}-clang-tools-extra'.format(self.__version))
@@ -258,9 +279,12 @@ class llvm(bb_base, hpccm.templates.envvars):
self.__compiler_rpms = ['clang']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# CentOS 8
- self.__runtime_rpms = ['llvm-libs', 'libomp']
+ self.__runtime_rpms = ['llvm-libs']
compiler_version = '8'
+ if self.__openmp:
+ self.__runtime_rpms.append('libomp')
+
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('clang-tools-extra')
| NVIDIA/hpc-container-maker | f065b74de7fd09bebdde1be55b1e6c53dc629c68 | diff --git a/test/test_llvm.py b/test/test_llvm.py
index 693be94..f8a246b 100644
--- a/test/test_llvm.py
+++ b/test/test_llvm.py
@@ -169,6 +169,19 @@ ENV COMPILER_PATH=/usr/lib/gcc/aarch64-redhat-linux/8:$COMPILER_PATH \
with self.assertRaises(RuntimeError):
llvm()
+ @x86_64
+ @ubuntu
+ @docker
+ def test_openmp_ubuntu(self):
+ """openmp disabled"""
+ l = llvm(openmp=False)
+ self.assertEqual(str(l),
+r'''# LLVM compiler
+RUN apt-get update -y && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ clang && \
+ rm -rf /var/lib/apt/lists/*''')
+
@x86_64
@ubuntu
@docker
| LLVM package: make OpenMP installation optinal
For our CI we install different versions of Clang/LLVM in parallel in a container. Unfortunately there is a dependency between `lipomp5-7` and `lipomp5-8`:
```bash
Step 20/28 : RUN apt-get update -y && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends clang-8 libomp-8-dev && rm -rf /var/lib/apt/lists/*
---> Running in c82cf74f02b3
Get:2 http://archive.ubuntu.com/ubuntu bionic InRelease [242 kB]
Get:1 https://apt.llvm.org/bionic llvm-toolchain-bionic InRelease [4232 B]
Get:3 https://apt.llvm.org/bionic llvm-toolchain-bionic-10 InRelease [4232 B]
Get:4 https://apt.llvm.org/bionic llvm-toolchain-bionic-11 InRelease [4232 B]
Get:5 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]
Get:6 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]
Get:7 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]
Get:8 https://apt.llvm.org/bionic llvm-toolchain-bionic/main Sources [2193 B]
Get:9 https://apt.llvm.org/bionic llvm-toolchain-bionic/main amd64 Packages [10.6 kB]
Get:10 http://archive.ubuntu.com/ubuntu bionic/restricted amd64 Packages [13.5 kB]
Get:11 http://archive.ubuntu.com/ubuntu bionic/main amd64 Packages [1344 kB]
Get:12 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [11.3 MB]
Get:13 https://apt.llvm.org/bionic llvm-toolchain-bionic-10/main Sources [1665 B]
Get:14 https://apt.llvm.org/bionic llvm-toolchain-bionic-10/main amd64 Packages [8762 B]
Get:15 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [186 kB]
Get:16 https://apt.llvm.org/bionic llvm-toolchain-bionic-11/main Sources [1666 B]
Get:17 https://apt.llvm.org/bionic llvm-toolchain-bionic-11/main amd64 Packages [8738 B]
Get:18 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [44.6 kB]
Get:19 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [220 kB]
Get:20 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2110 kB]
Get:21 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2095 kB]
Get:22 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [11.3 kB]
Get:23 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [11.4 kB]
Get:24 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1332 kB]
Get:25 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [193 kB]
Get:26 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [1693 kB]
Get:27 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [14.6 kB]
Fetched 21.1 MB in 2s (8756 kB/s)
Reading package lists...
Reading package lists...
Building dependency tree...
Reading state information...
The following additional packages will be installed:
libclang-common-8-dev libclang1-8 libllvm8 libomp5-8
Suggested packages:
clang-8-doc libomp-8-doc
Recommended packages:
llvm-8-dev python
The following NEW packages will be installed:
clang-8 libclang-common-8-dev libclang1-8 libllvm8 libomp-8-dev libomp5-8
0 upgraded, 6 newly installed, 0 to remove and 18 not upgraded.
Need to get 31.9 MB of archives.
After this operation, 173 MB of additional disk space will be used.
Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libllvm8 amd64 1:8-3~ubuntu18.04.2 [13.6 MB]
Get:2 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libclang-common-8-dev amd64 1:8-3~ubuntu18.04.2 [3802 kB]
Get:3 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libclang1-8 amd64 1:8-3~ubuntu18.04.2 [6225 kB]
Get:4 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 clang-8 amd64 1:8-3~ubuntu18.04.2 [7940 kB]
Get:5 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libomp5-8 amd64 1:8-3~ubuntu18.04.2 [299 kB]
Get:6 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 libomp-8-dev amd64 1:8-3~ubuntu18.04.2 [56.2 kB]
debconf: delaying package configuration, since apt-utils is not installed
Fetched 31.9 MB in 1s (35.2 MB/s)
Selecting previously unselected package libllvm8:amd64.
(Reading database ... 14420 files and directories currently installed.)
Preparing to unpack .../0-libllvm8_1%3a8-3~ubuntu18.04.2_amd64.deb ...
Unpacking libllvm8:amd64 (1:8-3~ubuntu18.04.2) ...
Selecting previously unselected package libclang-common-8-dev.
Preparing to unpack .../1-libclang-common-8-dev_1%3a8-3~ubuntu18.04.2_amd64.deb ...
Unpacking libclang-common-8-dev (1:8-3~ubuntu18.04.2) ...
Selecting previously unselected package libclang1-8.
Preparing to unpack .../2-libclang1-8_1%3a8-3~ubuntu18.04.2_amd64.deb ...
Unpacking libclang1-8 (1:8-3~ubuntu18.04.2) ...
Selecting previously unselected package clang-8.
Preparing to unpack .../3-clang-8_1%3a8-3~ubuntu18.04.2_amd64.deb ...
Unpacking clang-8 (1:8-3~ubuntu18.04.2) ...
Selecting previously unselected package libomp5-8:amd64.
Preparing to unpack .../4-libomp5-8_1%3a8-3~ubuntu18.04.2_amd64.deb ...
Unpacking libomp5-8:amd64 (1:8-3~ubuntu18.04.2) ...
dpkg: error processing archive /tmp/apt-dpkg-install-04VwC3/4-libomp5-8_1%3a8-3~ubuntu18.04.2_amd64.deb (--unpack):
trying to overwrite '/usr/lib/x86_64-linux-gnu/libomp.so.5', which is also in package libomp5-7:amd64 1:7-3~ubuntu0.18.04.1
Selecting previously unselected package libomp-8-dev.
Preparing to unpack .../5-libomp-8-dev_1%3a8-3~ubuntu18.04.2_amd64.deb ...
Unpacking libomp-8-dev (1:8-3~ubuntu18.04.2) ...
Errors were encountered while processing:
/tmp/apt-dpkg-install-04VwC3/4-libomp5-8_1%3a8-3~ubuntu18.04.2_amd64.deb
E: Sub-process /usr/bin/dpkg returned an error code (1)
The command '/bin/sh -c apt-get update -y && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends clang-8 libomp-8-dev && rm -rf /var/lib/apt/lists/*' returned a non-zero code: 100
```
For us it is not necessary to install different OpenMP versions in parallel. Can you please make the OpenMP installation optional.
Cheers,
Simeon | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_llvm.py::Test_llvm::test_openmp_ubuntu"
] | [
"test/test_llvm.py::Test_llvm::test_aarch64_centos",
"test/test_llvm.py::Test_llvm::test_aarch64_centos8",
"test/test_llvm.py::Test_llvm::test_defaults_centos",
"test/test_llvm.py::Test_llvm::test_defaults_centos8",
"test/test_llvm.py::Test_llvm::test_defaults_ubuntu",
"test/test_llvm.py::Test_llvm::test_extra_tools_centos8",
"test/test_llvm.py::Test_llvm::test_extra_tools_ubuntu",
"test/test_llvm.py::Test_llvm::test_ppc64le_centos",
"test/test_llvm.py::Test_llvm::test_runtime",
"test/test_llvm.py::Test_llvm::test_toolchain",
"test/test_llvm.py::Test_llvm::test_toolset8_ubuntu",
"test/test_llvm.py::Test_llvm::test_toolset_centos7",
"test/test_llvm.py::Test_llvm::test_toolset_centos8",
"test/test_llvm.py::Test_llvm::test_toolset_ubuntu18",
"test/test_llvm.py::Test_llvm::test_upstream_aarch64",
"test/test_llvm.py::Test_llvm::test_upstream_ubuntu16",
"test/test_llvm.py::Test_llvm::test_upstream_ubuntu18",
"test/test_llvm.py::Test_llvm::test_version_centos",
"test/test_llvm.py::Test_llvm::test_version_ubuntu"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-02T14:31:19Z" | apache-2.0 |
|
NVIDIA__hpc-container-maker-312 | diff --git a/docs/building_blocks.md b/docs/building_blocks.md
index e50a59c..00a7abf 100644
--- a/docs/building_blocks.md
+++ b/docs/building_blocks.md
@@ -595,6 +595,10 @@ empty list.
- __python2__: Boolean flag to specify that the Python 2 version of
Anaconda should be installed. The default is False.
+- __python_subversion__: The Python version to install. This value is
+ignored if the Conda version is less than 4.8. The default is
+`py38` if using Python 3, and `py27` if using Python 2.
+
- __version__: The version of Anaconda to download. The default value
is `4.8.3`.
@@ -2163,6 +2167,9 @@ include the LLVM compilers when necessary. The default is True.
- __extra_tools__: Boolean flag to specify whether to also install
`clang-format` and `clang-tidy`. The default is False.
+- __openmp__: Boolean flag to specify whether to also install OpenMP
+support. The default is True.
+
- __toolset__: Boolean flag to specify whether to also install the
full LLVM toolset. The default is False.
diff --git a/hpccm/building_blocks/conda.py b/hpccm/building_blocks/conda.py
index a8f4ba4..20d3beb 100644
--- a/hpccm/building_blocks/conda.py
+++ b/hpccm/building_blocks/conda.py
@@ -64,6 +64,10 @@ class conda(bb_base, hpccm.templates.rm, hpccm.templates.wget):
python2: Boolean flag to specify that the Python 2 version of
Anaconda should be installed. The default is False.
+ python_subversion: The Python version to install. This value is
+ ignored if the Conda version is less than 4.8. The default is
+ `py38` if using Python 3, and `py27` if using Python 2.
+
version: The version of Anaconda to download. The default value
is `4.8.3`.
@@ -104,7 +108,8 @@ class conda(bb_base, hpccm.templates.rm, hpccm.templates.wget):
self.__prefix = kwargs.get('prefix', '/usr/local/anaconda')
self.__python2 = kwargs.get('python2', False)
self.__python_version = '2' if self.__python2 else '3'
- self.__python_subversion = 'py27' if self.__python2 else 'py38'
+ self.__python_subversion = kwargs.get(
+ 'python_subversion', 'py27' if self.__python2 else 'py38')
self.__version = kwargs.get('version', '4.8.3')
self.__commands = [] # Filled in by __setup()
diff --git a/hpccm/building_blocks/llvm.py b/hpccm/building_blocks/llvm.py
index 5f1d143..2ad8b86 100644
--- a/hpccm/building_blocks/llvm.py
+++ b/hpccm/building_blocks/llvm.py
@@ -51,6 +51,9 @@ class llvm(bb_base, hpccm.templates.envvars):
extra_tools: Boolean flag to specify whether to also install
`clang-format` and `clang-tidy`. The default is False.
+ openmp: Boolean flag to specify whether to also install OpenMP
+ support. The default is True.
+
toolset: Boolean flag to specify whether to also install the
full LLVM toolset. The default is False.
@@ -97,6 +100,7 @@ class llvm(bb_base, hpccm.templates.envvars):
self.__compiler_debs = [] # Filled in below
self.__compiler_rpms = [] # Filled in below
self.__extra_tools = kwargs.get('extra_tools', False)
+ self.__openmp = kwargs.get('openmp', True)
self.__ospackages = kwargs.get('ospackages', []) # Filled in below
self.__runtime_debs = [] # Filled in below
self.__runtime_ospackages = [] # Filled in below
@@ -129,21 +133,26 @@ class llvm(bb_base, hpccm.templates.envvars):
if self.__version:
if LooseVersion(self.__version) <= LooseVersion('6.0'):
+ self.__compiler_debs = ['clang-{}'.format(self.__version)]
+ self.__runtime_debs = [
+ 'libclang1-{}'.format(self.__version)]
+
# Versioned OpenMP libraries do not exist for
# older versions
- self.__compiler_debs = [
- 'clang-{}'.format(self.__version),
- 'libomp-dev']
- self.__runtime_debs = [
- 'libclang1-{}'.format(self.__version),
- 'libomp5']
+ if self.__openmp:
+ self.__compiler_debs.append('libomp-dev')
+ self.__runtime_debs.append('libomp5')
+
else:
- self.__compiler_debs = [
- 'clang-{}'.format(self.__version),
- 'libomp-{}-dev'.format(self.__version)]
+ self.__compiler_debs = ['clang-{}'.format(self.__version)]
self.__runtime_debs = [
- 'libclang1-{}'.format(self.__version),
- 'libomp5-{}'.format(self.__version)]
+ 'libclang1-{}'.format(self.__version)]
+
+ if self.__openmp:
+ self.__compiler_debs.append(
+ 'libomp-{}-dev'.format(self.__version))
+ self.__runtime_debs.append(
+ 'libomp5-{}'.format(self.__version))
if self.__upstream:
# Upstream packages from apt.llvm.org
@@ -196,8 +205,12 @@ class llvm(bb_base, hpccm.templates.envvars):
else:
# Distro default
- self.__compiler_debs = ['clang', 'libomp-dev']
- self.__runtime_debs = ['libclang1', 'libomp5']
+ self.__compiler_debs = ['clang']
+ self.__runtime_debs = ['libclang1']
+
+ if self.__openmp:
+ self.__compiler_debs.append('libomp-dev')
+ self.__runtime_debs.append('libomp5')
if self.__toolset or self.__extra_tools:
self.__compiler_debs.extend(['clang-format', 'clang-tidy'])
@@ -224,10 +237,14 @@ class llvm(bb_base, hpccm.templates.envvars):
if self.__version:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# Multiple versions are not available for CentOS 8
- self.__compiler_rpms = ['clang', 'llvm-libs', 'libomp']
- self.__runtime_rpms = ['llvm-libs', 'libomp']
+ self.__compiler_rpms = ['clang', 'llvm-libs']
+ self.__runtime_rpms = ['llvm-libs']
compiler_version = '8'
+ if self.__openmp:
+ self.__compiler_rpms.append('libomp')
+ self.__runtime_rpms.append('libomp')
+
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('clang-tools-extra')
@@ -236,14 +253,18 @@ class llvm(bb_base, hpccm.templates.envvars):
else:
# CentOS 7
self.__compiler_rpms = [
- 'llvm-toolset-{}-clang'.format(self.__version),
- 'llvm-toolset-{}-libomp-devel'.format(self.__version)]
+ 'llvm-toolset-{}-clang'.format(self.__version)]
self.__runtime_rpms = [
'llvm-toolset-{}-runtime'.format(self.__version),
- 'llvm-toolset-{}-libomp'.format(self.__version),
'llvm-toolset-{}-compiler-rt'.format(self.__version)]
compiler_version = '4.8.2'
+ if self.__openmp:
+ self.__compiler_rpms.append(
+ 'llvm-toolset-{}-libomp-devel'.format(self.__version))
+ self.__runtime_rpms.append(
+ 'llvm-toolset-{}-libomp'.format(self.__version))
+
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('llvm-toolset-{}-clang-tools-extra'.format(self.__version))
@@ -258,9 +279,12 @@ class llvm(bb_base, hpccm.templates.envvars):
self.__compiler_rpms = ['clang']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# CentOS 8
- self.__runtime_rpms = ['llvm-libs', 'libomp']
+ self.__runtime_rpms = ['llvm-libs']
compiler_version = '8'
+ if self.__openmp:
+ self.__runtime_rpms.append('libomp')
+
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('clang-tools-extra')
| NVIDIA/hpc-container-maker | f065b74de7fd09bebdde1be55b1e6c53dc629c68 | diff --git a/test/test_conda.py b/test/test_conda.py
index d86bdec..4a24071 100644
--- a/test/test_conda.py
+++ b/test/test_conda.py
@@ -166,6 +166,26 @@ RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://r
/usr/local/anaconda/bin/conda clean -afy && \
rm -rf /var/tmp/Miniconda2-4.7.12-Linux-x86_64.sh''')
+ @x86_64
+ @ubuntu
+ @docker
+ def test_python_subversion(self):
+ """python subversion"""
+ c = conda(eula=True, python_subversion='py37', version='4.8.3')
+ self.assertEqual(str(c),
+r'''# Anaconda
+RUN apt-get update -y && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ ca-certificates \
+ wget && \
+ rm -rf /var/lib/apt/lists/*
+RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.3-Linux-x86_64.sh && \
+ bash /var/tmp/Miniconda3-py37_4.8.3-Linux-x86_64.sh -b -p /usr/local/anaconda && \
+ /usr/local/anaconda/bin/conda init && \
+ ln -s /usr/local/anaconda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
+ /usr/local/anaconda/bin/conda clean -afy && \
+ rm -rf /var/tmp/Miniconda3-py37_4.8.3-Linux-x86_64.sh''')
+
@x86_64
@ubuntu
@docker
diff --git a/test/test_llvm.py b/test/test_llvm.py
index 693be94..f8a246b 100644
--- a/test/test_llvm.py
+++ b/test/test_llvm.py
@@ -169,6 +169,19 @@ ENV COMPILER_PATH=/usr/lib/gcc/aarch64-redhat-linux/8:$COMPILER_PATH \
with self.assertRaises(RuntimeError):
llvm()
+ @x86_64
+ @ubuntu
+ @docker
+ def test_openmp_ubuntu(self):
+ """openmp disabled"""
+ l = llvm(openmp=False)
+ self.assertEqual(str(l),
+r'''# LLVM compiler
+RUN apt-get update -y && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ clang && \
+ rm -rf /var/lib/apt/lists/*''')
+
@x86_64
@ubuntu
@docker
| conda: allow changing python subversion
Hi,
our workflow needs for the moment a python 3.7 version of conda (nothing I can change on our side). Until last version, I was able to retrieve it by giving 'py37_4.8.3' as version argument, but since this version, the version argument is strictly checked in its format, and py38 (or py27) is forced.
Could the __python_version be modified by the user as well ? That would be useful for us. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_conda.py::Test_conda::test_python_subversion",
"test/test_llvm.py::Test_llvm::test_openmp_ubuntu"
] | [
"test/test_conda.py::Test_conda::test_channels",
"test/test_conda.py::Test_conda::test_defaults_centos",
"test/test_conda.py::Test_conda::test_defaults_ubuntu",
"test/test_conda.py::Test_conda::test_environment",
"test/test_conda.py::Test_conda::test_ppc64le",
"test/test_conda.py::Test_conda::test_python2",
"test/test_conda.py::Test_conda::test_runtime",
"test/test_llvm.py::Test_llvm::test_aarch64_centos",
"test/test_llvm.py::Test_llvm::test_aarch64_centos8",
"test/test_llvm.py::Test_llvm::test_defaults_centos",
"test/test_llvm.py::Test_llvm::test_defaults_centos8",
"test/test_llvm.py::Test_llvm::test_defaults_ubuntu",
"test/test_llvm.py::Test_llvm::test_extra_tools_centos8",
"test/test_llvm.py::Test_llvm::test_extra_tools_ubuntu",
"test/test_llvm.py::Test_llvm::test_ppc64le_centos",
"test/test_llvm.py::Test_llvm::test_runtime",
"test/test_llvm.py::Test_llvm::test_toolchain",
"test/test_llvm.py::Test_llvm::test_toolset8_ubuntu",
"test/test_llvm.py::Test_llvm::test_toolset_centos7",
"test/test_llvm.py::Test_llvm::test_toolset_centos8",
"test/test_llvm.py::Test_llvm::test_toolset_ubuntu18",
"test/test_llvm.py::Test_llvm::test_upstream_aarch64",
"test/test_llvm.py::Test_llvm::test_upstream_ubuntu16",
"test/test_llvm.py::Test_llvm::test_upstream_ubuntu18",
"test/test_llvm.py::Test_llvm::test_version_centos",
"test/test_llvm.py::Test_llvm::test_version_ubuntu"
] | {
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
} | "2020-10-02T19:26:38Z" | apache-2.0 |
|
NVIDIA__hpc-container-maker-368 | diff --git a/docs/building_blocks.md b/docs/building_blocks.md
index 8c492de..aac8f1c 100644
--- a/docs/building_blocks.md
+++ b/docs/building_blocks.md
@@ -3623,6 +3623,11 @@ packages are extracted they are deleted. This parameter is ignored
if `download` is False. If empty, then the downloaded packages are
not extracted. The default value is an empty string.
+- __force_add_repo__: Boolean flag to specify whether adding a
+repository should be considered successful no matter the actual
+result. This parameter is only valid for yum repositories. The
+default value is False.
+
- __ospackages__: A list of packages to install. The list is used for
both Ubuntu and RHEL-based Linux distributions, therefore only
packages with the consistent names across Linux distributions
@@ -4657,6 +4662,10 @@ packages are extracted they are deleted. This parameter is ignored
if `download` is False. If empty, then the downloaded packages are
not extracted. The default value is an empty string.
+- __force_add_repo__: Boolean flag to specify whether adding a
+repository should be considered successful no matter the actual
+result. The default value is False.
+
- __keys__: A list of GPG keys to import. The default is an empty list.
- __ospackages__: A list of packages to install. The default is an
diff --git a/hpccm/building_blocks/nsight_compute.py b/hpccm/building_blocks/nsight_compute.py
index 5bff9e7..33265a1 100644
--- a/hpccm/building_blocks/nsight_compute.py
+++ b/hpccm/building_blocks/nsight_compute.py
@@ -172,6 +172,8 @@ class nsight_compute(bb_base):
self += packages(
apt_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
apt_repositories=['deb https://developer.download.nvidia.com/devtools/repos/{0}/{1}/ /'.format(self.__distro_label, self.__arch_label)],
+ # https://github.com/NVIDIA/hpc-container-maker/issues/367
+ force_add_repo=True,
ospackages=['nsight-compute-{}'.format(self.__version)],
yum_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
yum_repositories=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}'.format(self.__distro_label, self.__arch_label)])
diff --git a/hpccm/building_blocks/nsight_systems.py b/hpccm/building_blocks/nsight_systems.py
index 24e1761..4a89e99 100644
--- a/hpccm/building_blocks/nsight_systems.py
+++ b/hpccm/building_blocks/nsight_systems.py
@@ -87,6 +87,8 @@ class nsight_systems(bb_base):
self += packages(
apt_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
apt_repositories=['deb https://developer.download.nvidia.com/devtools/repos/{0}/{1}/ /'.format(self.__distro_label, self.__arch_label)],
+ # https://github.com/NVIDIA/hpc-container-maker/issues/367
+ force_add_repo=True,
ospackages=[package],
yum_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
yum_repositories=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}'.format(self.__distro_label, self.__arch_label)])
diff --git a/hpccm/building_blocks/packages.py b/hpccm/building_blocks/packages.py
index c524b35..272fd16 100644
--- a/hpccm/building_blocks/packages.py
+++ b/hpccm/building_blocks/packages.py
@@ -74,6 +74,11 @@ class packages(bb_base):
if `download` is False. If empty, then the downloaded packages are
not extracted. The default value is an empty string.
+ force_add_repo: Boolean flag to specify whether adding a
+ repository should be considered successful no matter the actual
+ result. This parameter is only valid for yum repositories. The
+ default value is False.
+
ospackages: A list of packages to install. The list is used for
both Ubuntu and RHEL-based Linux distributions, therefore only
packages with the consistent names across Linux distributions
@@ -140,6 +145,7 @@ class packages(bb_base):
self.__extra_opts = kwargs.get('extra_opts', [])
self.__extract = kwargs.get('extract', None)
self.__epel = kwargs.get('epel', False)
+ self.__force_add_repo = kwargs.get('force_add_repo', False)
self.__ospackages = kwargs.get('ospackages', [])
self.__powertools = kwargs.get('powertools', False)
self.__release_stream = kwargs.get('release_stream', False)
@@ -180,6 +186,7 @@ class packages(bb_base):
extra_opts=self.__extra_opts,
extract=self.__extract,
epel=self.__epel,
+ force_add_repo=self.__force_add_repo,
keys=self.__yum_keys,
ospackages=ospackages,
powertools=self.__powertools,
diff --git a/hpccm/building_blocks/yum.py b/hpccm/building_blocks/yum.py
index 3d465cc..26fd687 100644
--- a/hpccm/building_blocks/yum.py
+++ b/hpccm/building_blocks/yum.py
@@ -58,6 +58,10 @@ class yum(bb_base):
if `download` is False. If empty, then the downloaded packages are
not extracted. The default value is an empty string.
+ force_add_repo: Boolean flag to specify whether adding a
+ repository should be considered successful no matter the actual
+ result. The default value is False.
+
keys: A list of GPG keys to import. The default is an empty list.
ospackages: A list of packages to install. The default is an
@@ -104,6 +108,7 @@ class yum(bb_base):
self.__epel = kwargs.get('epel', False)
self.__extra_opts = kwargs.get('extra_opts', [])
self.__extract = kwargs.get('extract', None)
+ self.__force_add_repo = kwargs.get('force_add_repo', False)
self.__keys = kwargs.get('keys', [])
self.__opts = ['-y']
self.ospackages = kwargs.get('ospackages', [])
@@ -166,8 +171,12 @@ class yum(bb_base):
self.__commands.append('yum install -y yum-utils')
for repo in self.__repositories:
- self.__commands.append(
- 'yum-config-manager --add-repo {}'.format(repo))
+ if self.__force_add_repo:
+ self.__commands.append(
+ '(yum-config-manager --add-repo {} || true)'.format(repo))
+ else:
+ self.__commands.append(
+ 'yum-config-manager --add-repo {}'.format(repo))
if self.__epel:
# This needs to be a discrete, preliminary step so that
| NVIDIA/hpc-container-maker | 887196021837ddf8c6b88031d3134ede54bea876 | diff --git a/test/test_nsight_compute.py b/test/test_nsight_compute.py
index 013329a..8dc5e23 100644
--- a/test/test_nsight_compute.py
+++ b/test/test_nsight_compute.py
@@ -63,7 +63,7 @@ RUN wget -qO - https://developer.download.nvidia.com/devtools/repos/ubuntu1604/a
r'''# NVIDIA Nsight Compute 2020.2.1
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel8/x86_64/nvidia.pub && \
yum install -y dnf-utils && \
- yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel8/x86_64 && \
+ (yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel8/x86_64 || true) && \
yum install -y \
nsight-compute-2020.2.1 && \
rm -rf /var/cache/yum/*''')
@@ -122,7 +122,7 @@ RUN wget -qO - https://developer.download.nvidia.com/devtools/repos/ubuntu1804/p
r'''# NVIDIA Nsight Compute 2020.2.1
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/ppc64le/nvidia.pub && \
yum install -y yum-utils && \
- yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/ppc64le && \
+ (yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/ppc64le || true) && \
yum install -y \
nsight-compute-2020.2.1 && \
rm -rf /var/cache/yum/*''')
@@ -137,7 +137,7 @@ RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/ppc6
r'''# NVIDIA Nsight Compute 2020.2.1
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/arm64/nvidia.pub && \
yum install -y yum-utils && \
- yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/arm64 && \
+ (yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/arm64 || true) && \
yum install -y \
nsight-compute-2020.2.1 && \
rm -rf /var/cache/yum/*''')
diff --git a/test/test_nsight_systems.py b/test/test_nsight_systems.py
index bd17daa..2d3d128 100644
--- a/test/test_nsight_systems.py
+++ b/test/test_nsight_systems.py
@@ -63,7 +63,7 @@ RUN wget -qO - https://developer.download.nvidia.com/devtools/repos/ubuntu1604/a
r'''# NVIDIA Nsight Systems 2021.1.1
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel8/x86_64/nvidia.pub && \
yum install -y dnf-utils && \
- yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel8/x86_64 && \
+ (yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel8/x86_64 || true) && \
yum install -y \
nsight-systems-cli-2021.1.1 && \
rm -rf /var/cache/yum/*''')
@@ -144,7 +144,7 @@ RUN wget -qO - https://developer.download.nvidia.com/devtools/repos/ubuntu1804/p
r'''# NVIDIA Nsight Systems 2020.1.1
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/ppc64le/nvidia.pub && \
yum install -y yum-utils && \
- yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/ppc64le && \
+ (yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/ppc64le || true) && \
yum install -y \
nsight-systems-cli-2020.1.1 && \
rm -rf /var/cache/yum/*''')
@@ -159,7 +159,7 @@ RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/ppc6
r'''# NVIDIA Nsight Systems 2020.2.1
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/arm64/nvidia.pub && \
yum install -y yum-utils && \
- yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/arm64 && \
+ (yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/arm64 || true) && \
yum install -y \
nsight-systems-cli-2020.2.1 && \
rm -rf /var/cache/yum/*''')
| yum errors with same repo twice
When installing both nsys and nsight-compute in a centos container, it issues the following command:
```
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/x86_64/nvidia.pub && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/x86_64 && \
yum install -y \
nsight-systems-cli-2021.1.1 && \
rm -rf /var/cache/yum/*
# NVIDIA Nsight Compute 2020.2.1
RUN rpm --import https://developer.download.nvidia.com/devtools/repos/rhel7/x86_64/nvidia.pub && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://developer.download.nvidia.com/devtools/repos/rhel7/x86_64 && \
yum install -y \
nsight-compute-2020.2.1 && \
rm -rf /var/cache/yum/*
```
This errors out because yum-config-manager does not allow the same repo to be added twice:
```
adding repo from: https://developer.download.nvidia.com/devtools/repos/rhel7/x86_64
Cannot add repo from https://developer.download.nvidia.com/devtools/repos/rhel7/x86_64 as is a duplicate of an existing repo
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_nsight_compute.py::Test_nsight_compute::test_aarch64_centos",
"test/test_nsight_compute.py::Test_nsight_compute::test_basic_centos8",
"test/test_nsight_compute.py::Test_nsight_compute::test_ppc64le_centos",
"test/test_nsight_systems.py::Test_nsight_systems::test_aarch64_centos",
"test/test_nsight_systems.py::Test_nsight_systems::test_basic_centos8",
"test/test_nsight_systems.py::Test_nsight_systems::test_ppc64le_centos"
] | [
"test/test_nsight_compute.py::Test_nsight_compute::test_basic_ubuntu",
"test/test_nsight_compute.py::Test_nsight_compute::test_basic_ubuntu_url",
"test/test_nsight_compute.py::Test_nsight_compute::test_ppc64le_ubuntu18",
"test/test_nsight_compute.py::Test_nsight_compute::test_runfile",
"test/test_nsight_compute.py::Test_nsight_compute::test_version",
"test/test_nsight_systems.py::Test_nsight_systems::test_basic_ubuntu",
"test/test_nsight_systems.py::Test_nsight_systems::test_cli",
"test/test_nsight_systems.py::Test_nsight_systems::test_ppc64le_ubuntu18",
"test/test_nsight_systems.py::Test_nsight_systems::test_version"
] | {
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2021-04-29T19:32:20Z" | apache-2.0 |
|
NVIDIA__hpc-container-maker-472 | diff --git a/hpccm/building_blocks/llvm.py b/hpccm/building_blocks/llvm.py
index f0d68fc..5897a99 100644
--- a/hpccm/building_blocks/llvm.py
+++ b/hpccm/building_blocks/llvm.py
@@ -104,7 +104,7 @@ class llvm(bb_base, hpccm.templates.envvars):
self.__runtime_rpms = [] # Filled in below
self.__toolset = kwargs.get('toolset', False)
# Current LLVM trunk version
- self.__trunk_version = kwargs.get('_trunk_version', '17')
+ self.__trunk_version = kwargs.get('_trunk_version', '18')
self.__upstream = kwargs.get('upstream', False)
self.__version = kwargs.get('version', None)
| NVIDIA/hpc-container-maker | 4f5ef11bab5c4803e170ba8ab6c6a93855e5a9f4 | diff --git a/test/test_llvm.py b/test/test_llvm.py
index c6c0f2e..f1527a6 100644
--- a/test/test_llvm.py
+++ b/test/test_llvm.py
@@ -361,15 +361,15 @@ RUN wget -qO - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
echo "deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- clang-17 \
- clang-format-17 \
- clang-tidy-17 \
- libomp-17-dev && \
+ clang-18 \
+ clang-format-18 \
+ clang-tidy-18 \
+ libomp-18-dev && \
rm -rf /var/lib/apt/lists/*
-RUN update-alternatives --install /usr/bin/clang clang $(which clang-17) 30 && \
- update-alternatives --install /usr/bin/clang++ clang++ $(which clang++-17) 30 && \
- update-alternatives --install /usr/bin/clang-format clang-format $(which clang-format-17) 30 && \
- update-alternatives --install /usr/bin/clang-tidy clang-tidy $(which clang-tidy-17) 30''')
+RUN update-alternatives --install /usr/bin/clang clang $(which clang-18) 30 && \
+ update-alternatives --install /usr/bin/clang++ clang++ $(which clang++-18) 30 && \
+ update-alternatives --install /usr/bin/clang-format clang-format $(which clang-format-18) 30 && \
+ update-alternatives --install /usr/bin/clang-tidy clang-tidy $(which clang-tidy-18) 30''')
@x86_64
@ubuntu20
@@ -391,15 +391,15 @@ RUN wget -qO - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- clang-17 \
- clang-format-17 \
- clang-tidy-17 \
- libomp-17-dev && \
+ clang-18 \
+ clang-format-18 \
+ clang-tidy-18 \
+ libomp-18-dev && \
rm -rf /var/lib/apt/lists/*
-RUN update-alternatives --install /usr/bin/clang clang $(which clang-17) 30 && \
- update-alternatives --install /usr/bin/clang++ clang++ $(which clang++-17) 30 && \
- update-alternatives --install /usr/bin/clang-format clang-format $(which clang-format-17) 30 && \
- update-alternatives --install /usr/bin/clang-tidy clang-tidy $(which clang-tidy-17) 30''')
+RUN update-alternatives --install /usr/bin/clang clang $(which clang-18) 30 && \
+ update-alternatives --install /usr/bin/clang++ clang++ $(which clang++-18) 30 && \
+ update-alternatives --install /usr/bin/clang-format clang-format $(which clang-format-18) 30 && \
+ update-alternatives --install /usr/bin/clang-tidy clang-tidy $(which clang-tidy-18) 30''')
@aarch64
@ubuntu
| LLVM trunk version out-of-date
Since Jul 26th 2023, LLVM trunk is version 18.
`self.__trunk_version` in `hpccm/building_blocks/llvm.py` should be bumped to allow installing Clang 17 or 18. | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/test_llvm.py::Test_llvm::test_upstream_ubuntu18",
"test/test_llvm.py::Test_llvm::test_upstream_ubuntu20"
] | [
"test/test_llvm.py::Test_llvm::test_aarch64_centos",
"test/test_llvm.py::Test_llvm::test_aarch64_centos8",
"test/test_llvm.py::Test_llvm::test_defaults_centos",
"test/test_llvm.py::Test_llvm::test_defaults_centos8",
"test/test_llvm.py::Test_llvm::test_defaults_ubuntu",
"test/test_llvm.py::Test_llvm::test_extra_tools_centos8",
"test/test_llvm.py::Test_llvm::test_extra_tools_ubuntu",
"test/test_llvm.py::Test_llvm::test_openmp_ubuntu",
"test/test_llvm.py::Test_llvm::test_ppc64le_centos",
"test/test_llvm.py::Test_llvm::test_runtime",
"test/test_llvm.py::Test_llvm::test_toolchain",
"test/test_llvm.py::Test_llvm::test_toolchain_zen2",
"test/test_llvm.py::Test_llvm::test_toolset8_ubuntu",
"test/test_llvm.py::Test_llvm::test_toolset_centos7",
"test/test_llvm.py::Test_llvm::test_toolset_centos8",
"test/test_llvm.py::Test_llvm::test_toolset_ubuntu18",
"test/test_llvm.py::Test_llvm::test_upstream_aarch64",
"test/test_llvm.py::Test_llvm::test_upstream_ubuntu16",
"test/test_llvm.py::Test_llvm::test_version_centos",
"test/test_llvm.py::Test_llvm::test_version_ubuntu"
] | {
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
} | "2023-10-11T13:44:23Z" | apache-2.0 |
|
Netflix-Skunkworks__policyuniverse-99 | diff --git a/policyuniverse/statement.py b/policyuniverse/statement.py
index a571b1d..4ad4d4d 100644
--- a/policyuniverse/statement.py
+++ b/policyuniverse/statement.py
@@ -28,6 +28,11 @@ from policyuniverse.common import ensure_array, is_array
import re
from collections import namedtuple
+try:
+ from collections.abc import Mapping
+except ImportError:
+ # Python 2.7 compatibility
+ from collections import Mapping
PrincipalTuple = namedtuple("Principal", "category value")
ConditionTuple = namedtuple("Condition", "category value")
@@ -115,7 +120,7 @@ class Statement(object):
# It is possible not to define a principal, AWS ignores these statements.
return principals
- if isinstance(principal, dict):
+ if isinstance(principal, Mapping):
if "AWS" in principal:
self._add_or_extend(principal["AWS"], principals)
| Netflix-Skunkworks/policyuniverse | 37eccbc5be0552c85d58fcf2b6c5425ff7fe0984 | diff --git a/policyuniverse/tests/helpers.py b/policyuniverse/tests/helpers.py
new file mode 100644
index 0000000..cdaf15c
--- /dev/null
+++ b/policyuniverse/tests/helpers.py
@@ -0,0 +1,27 @@
+from policyuniverse.statement import Mapping
+from policyuniverse.common import Sequence
+
+
+class CustomSequence(Sequence):
+ def __init__(self, data):
+ self._sequence = data
+
+ def __getitem__(self, item):
+ return self._sequence[item]
+
+ def __len__(self):
+ return len(self._sequence)
+
+
+class CustomMapping(Mapping):
+ def __init__(self, data):
+ self._mapping = data
+
+ def __getitem__(self, item):
+ return self._mapping[item]
+
+ def __len__(self):
+ return len(self._mapping)
+
+ def __iter__(self):
+ return iter(self._mapping)
diff --git a/policyuniverse/tests/test_policy.py b/policyuniverse/tests/test_policy.py
index 2f3238e..6197611 100644
--- a/policyuniverse/tests/test_policy.py
+++ b/policyuniverse/tests/test_policy.py
@@ -24,6 +24,8 @@ from policyuniverse import logger
import unittest
import json
+from .helpers import CustomMapping, CustomSequence
+
policy01 = dict(
Version="2012-10-08",
@@ -122,6 +124,26 @@ policy06 = dict(
],
)
+# Custom types
+policy07 = CustomMapping(
+ dict(
+ Statement=CustomSequence(
+ [
+ CustomMapping(
+ dict(
+ Action="s3:GetBucketAcl",
+ Effect="Allow",
+ Principal=CustomMapping({"AWS": "*"}),
+ Resource="arn:aws:s3:::example-bucket",
+ Sid="Public Access",
+ )
+ )
+ ]
+ ),
+ Version="2012-10-17",
+ )
+)
+
class PolicyTestCase(unittest.TestCase):
def test_internet_accessible(self):
@@ -258,3 +280,8 @@ class PolicyTestCase(unittest.TestCase):
list(s.statement for s in policy.statements),
[policy_document["Statement"][0]],
)
+
+ def test_mapping_and_sequence_policy_document(self):
+ policy = Policy(policy07)
+ self.assertSetEqual(policy.principals, set("*"))
+ self.assertIs(policy.is_internet_accessible(), True)
diff --git a/policyuniverse/tests/test_statement.py b/policyuniverse/tests/test_statement.py
index a685fe0..01f704f 100644
--- a/policyuniverse/tests/test_statement.py
+++ b/policyuniverse/tests/test_statement.py
@@ -22,6 +22,8 @@
from policyuniverse.statement import Statement
import unittest
+from .helpers import CustomMapping, CustomSequence
+
# NotPrincipal
statement01 = dict(
Effect="Allow",
@@ -327,6 +329,17 @@ statement30 = dict(
Condition={"StringLike": {"AWS:PrincipalOrgID": "o-*"}},
)
+# Custom Mapping / Sequence types
+statement31 = CustomMapping(
+ dict(
+ Action="s3:GetBucketAcl",
+ Effect="Allow",
+ Principal=CustomMapping({"AWS": "*"}),
+ Resource="arn:aws:s3:::example-bucket",
+ Sid="Public Access",
+ )
+)
+
class StatementTestCase(unittest.TestCase):
def test_statement_effect(self):
@@ -373,6 +386,10 @@ class StatementTestCase(unittest.TestCase):
statement = Statement(statement_wo_principal)
self.assertEqual(statement.principals, set([]))
+ # Custom types
+ statement = Statement(statement31)
+ self.assertSetEqual(statement.principals, set(["*"]))
+
def test_statement_conditions(self):
statement = Statement(statement07)
self.assertEqual(
| Perform Principal extraction with any Mapping type
Previous work in #94 did not include a expansion to all `Mapping` types for `Principal` extraction in `Statement`:
https://github.com/Netflix-Skunkworks/policyuniverse/blob/37eccbc5be0552c85d58fcf2b6c5425ff7fe0984/policyuniverse/statement.py#L118
The following error is raised if a custom `Mapping` (sub-class of `collections.Mapping`) is used:
```python
def _add_or_extend(self, value, structure):
if is_array(value):
structure.update(set(value))
else:
> structure.add(value)
E TypeError: unhashable type: 'CustomMapping'
``` | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_action_summary",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_condition_entries",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_evasion_policies",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_internet_accessible",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_internet_accessible_actions",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_mapping_and_sequence_policy_document",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_non_list_sequence_statement",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_principals",
"policyuniverse/tests/test_policy.py::PolicyTestCase::test_whos_allowed",
"policyuniverse/tests/test_statement.py::StatementTestCase::test_statement_conditions",
"policyuniverse/tests/test_statement.py::StatementTestCase::test_statement_effect",
"policyuniverse/tests/test_statement.py::StatementTestCase::test_statement_internet_accessible",
"policyuniverse/tests/test_statement.py::StatementTestCase::test_statement_not_principal",
"policyuniverse/tests/test_statement.py::StatementTestCase::test_statement_principals",
"policyuniverse/tests/test_statement.py::StatementTestCase::test_statement_summary"
] | [] | {
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
} | "2021-02-18T08:19:37Z" | apache-2.0 |
|
Netuitive__netuitive-statsd-14 | diff --git a/libs/config.py b/libs/config.py
index c43a9a0..5d68cb9 100644
--- a/libs/config.py
+++ b/libs/config.py
@@ -6,30 +6,18 @@ import socket
import os
import configobj
import sys
+import subprocess
logger = logging.getLogger(__name__)
def config(args=None):
- # try to find the hostname
- hostname = socket.getfqdn().split('.')[0]
-
- if hostname == 'localhost':
- hostname = socket.gethostname().split('.')[0]
-
- if hostname == 'localhost':
- hostname = os.uname()[1].split('.')[0]
-
- if hostname == 'localhost':
- logger.error('could not determine hostname')
-
# default config
ret = {
'enabled': True,
'url': 'https://api.app.netuitive.com/ingest',
'api_key': None,
- 'hostname': hostname,
'interval': 60,
'element_type': 'SERVER',
'prefix': 'statsd',
@@ -67,6 +55,7 @@ def config(args=None):
# assemble the config from config file
+ ret['hostname'] = get_hostname(cfg)
ret['configfile'] = configfile
ret['url'] = cfg['handlers']['NetuitiveHandler']['url']
ret['api_key'] = cfg['handlers'][
@@ -162,3 +151,98 @@ def config(args=None):
except Exception as e:
logger.error(e, exc_info=True)
raise(e)
+
+
+def get_hostname(fullconfig, method=None):
+ """
+ Returns a hostname as configured by the user
+ """
+ config = fullconfig.get('collectors').get('default')
+ method = method or config.get('hostname_method', 'smart')
+
+ # case insensitive method
+ method = method.lower()
+
+ if 'hostname' in config and method != 'shell':
+ return config['hostname']
+
+ if method == 'shell':
+ if 'hostname' not in config:
+ raise Exception(
+ "hostname must be set to a shell command for"
+ " hostname_method=shell")
+ else:
+ proc = subprocess.Popen(config['hostname'],
+ shell=True,
+ stdout=subprocess.PIPE)
+ hostname = proc.communicate()[0].strip()
+ if proc.returncode != 0:
+ raise subprocess.CalledProcessError(proc.returncode,
+ config['hostname'])
+ return hostname
+
+ if method == 'smart':
+ hostname = get_hostname(config, 'fqdn_short')
+ if hostname != 'localhost':
+ return hostname
+ hostname = get_hostname(config, 'hostname_short')
+ return hostname
+
+ if method == 'fqdn_short':
+ hostname = socket.getfqdn().split('.')[0]
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'fqdn':
+ hostname = socket.getfqdn().replace('.', '_')
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'fqdn_rev':
+ hostname = socket.getfqdn().split('.')
+ hostname.reverse()
+ hostname = '.'.join(hostname)
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'uname_short':
+ hostname = os.uname()[1].split('.')[0]
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'uname_rev':
+ hostname = os.uname()[1].split('.')
+ hostname.reverse()
+ hostname = '.'.join(hostname)
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'hostname':
+ hostname = socket.gethostname()
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'hostname_short':
+ hostname = socket.gethostname().split('.')[0]
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'hostname_rev':
+ hostname = socket.gethostname().split('.')
+ hostname.reverse()
+ hostname = '.'.join(hostname)
+ if hostname == '':
+ raise Exception('Hostname is empty?!')
+ return hostname
+
+ if method == 'none':
+ return None
+
+ raise NotImplementedError(config['hostname_method'])
diff --git a/netuitive-statsd.conf.example b/netuitive-statsd.conf.example
index b231aae..eb08f33 100644
--- a/netuitive-statsd.conf.example
+++ b/netuitive-statsd.conf.example
@@ -28,6 +28,26 @@ enabled = True
[[default]]
hostname = statsd-test-host
+# If you prefer to just use a different way of calculating the hostname
+# Uncomment and set this to one of these values:
+
+# smart = Default. Tries fqdn_short. If that's localhost, uses hostname_short
+
+# fqdn_short = Default. Similar to hostname -s
+# fqdn = hostname output
+# fqdn_rev = hostname in reverse (com.example.www)
+
+# uname_short = Similar to uname -n, but only the first part
+# uname_rev = uname -r in reverse (com.example.www)
+
+# hostname_short = `hostname -s`
+# hostname = `hostname`
+# hostname_rev = `hostname` in reverse (com.example.www)
+
+# shell = Run the string set in hostname as a shell command and use its
+# output(with spaces trimmed off from both ends) as the hostname.
+
+# hostname_method = smart
[logger_root]
| Netuitive/netuitive-statsd | 0b8c90d3a351de7f0b6d6ec5915af31263702590 | diff --git a/tests/test_config.py b/tests/test_config.py
index 197d8cb..9a5512c 100755
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -32,7 +32,6 @@ class Test_Config(unittest.TestCase):
'forward': False,
'forward_ip': None,
'forward_port': None,
- 'hostname': socket.getfqdn().split('.')[0],
'interval': 60,
'listen_ip': '127.0.0.1',
'listen_port': 8125,
| Support More Flexible Hostname Methods
Ideally this will match the Metricly Diamond project:
https://github.com/Netuitive/netuitive-diamond/blob/f48a0bc7de6038164e0919e2d3d0fd524c83e1d9/conf/diamond.conf.example#L146-L169 | 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/test_config.py::Test_Config::test_defaults"
] | [
"tests/test_config.py::Test_Config::test_args_and_configfile",
"tests/test_config.py::Test_Config::test_args_only",
"tests/test_config.py::Test_Config::test_missing_configfile"
] | {
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
} | "2017-08-28T14:34:19Z" | apache-2.0 |
|
NeuralEnsemble__python-neo-1297 | diff --git a/neo/io/__init__.py b/neo/io/__init__.py
index 22b6df66..4aa31692 100644
--- a/neo/io/__init__.py
+++ b/neo/io/__init__.py
@@ -451,8 +451,23 @@ def list_candidate_ios(file_or_folder, ignore_patterns=['*.ini', 'README.txt', '
# if only file prefix was provided, e.g /mydatafolder/session1-
# to select all files sharing the `session1-` prefix
elif file_or_folder.parent.exists():
- filenames = file_or_folder.parent.glob(file_or_folder.name + '*')
-
+ filenames = list(file_or_folder.parent.glob(file_or_folder.name + '*'))
+ # if filenames empty and suffix is provided then non-existent file
+ # may be written in current dir. So run check for io
+ if len(filenames)==0 and file_or_folder.suffix:
+ suffix = file_or_folder.suffix[1:].lower()
+ if suffix not in io_by_extension:
+ raise ValueError(f'{suffix} is not a supported format of any IO.')
+ return io_by_extension[suffix]
+
+ # If non-existent file in non-existent dir is given check if this
+ # structure could be created with an io writing the file
+ elif file_or_folder.suffix:
+ suffix = file_or_folder.suffix[1:].lower()
+ if suffix not in io_by_extension:
+ raise ValueError(f'{suffix} is not a supported format of any IO.')
+ return io_by_extension[suffix]
+
else:
raise ValueError(f'{file_or_folder} does not contain data files of a supported format')
| NeuralEnsemble/python-neo | f608309c5ce031ecd905349c140b07a3dafb057d | diff --git a/neo/test/iotest/test_get_io.py b/neo/test/iotest/test_get_io.py
new file mode 100644
index 00000000..b43499b0
--- /dev/null
+++ b/neo/test/iotest/test_get_io.py
@@ -0,0 +1,41 @@
+from pathlib import Path
+from tempfile import TemporaryDirectory
+from neo.io import get_io, list_candidate_ios, NixIO
+
+
+def test_list_candidate_ios_non_existant_file():
+ # use plexon io suffix for testing here
+ non_existant_file = Path('non_existant_folder/non_existant_file.plx')
+ non_existant_file.unlink(missing_ok=True)
+ ios = list_candidate_ios(non_existant_file)
+
+ assert ios
+
+ # cleanup
+ non_existant_file.unlink(missing_ok=True)
+
+
+def test_list_candidate_ios_filename_stub():
+ # create dummy folder with dummy files
+ with TemporaryDirectory(prefix='filename_stub_test_') as test_folder:
+ test_folder = Path(test_folder)
+ test_filename = (test_folder / 'dummy_file.nix')
+ test_filename.touch()
+ filename_stub = test_filename.with_suffix('')
+
+ # check that io is found even though file suffix was not provided
+ ios = list_candidate_ios(filename_stub)
+
+ assert NixIO in ios
+
+
+def test_get_io_non_existant_file_writable_io():
+ # use nixio for testing with writable io
+ non_existant_file = Path('non_existant_file.nix')
+ non_existant_file.unlink(missing_ok=True)
+ io = get_io(non_existant_file)
+
+ assert isinstance(io, NixIO)
+
+ # cleanup
+ non_existant_file.unlink(missing_ok=True)
| neo.get_io fails on file yet to be written
**Describe the bug**
Calling neo.get_io no longer works with the name of a file yet to be written
which means it can no longer be uses before a
io.write(... call
**To Reproduce**
import neo
neo.get_io("tobewritten.pkl")
**Expected behaviour**
return an io
**Environment:**
- OS: [e.g. macOS, Linux, Windows]
i think all but found with linux
- Python version
found with 3.11
- Neo version
0.12.0
Worked in neo 0.10
-
- NumPy version
not relevant
**Additional context**
our workaround is
def get_neo_io(file_or_folder):
try:
return neo.get_io(file_or_folder)
except ValueError as ex:
# As neo.get_io only works with existinf files
_, suffix = os.path.splitext(file_or_folder)
suffix = suffix[1:].lower()
if suffix in neo.io_by_extension:
writer_list = neo.io_by_extension[suffix]
return writer_list[0](file_or_folder)
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"neo/test/iotest/test_get_io.py::test_list_candidate_ios_non_existant_file",
"neo/test/iotest/test_get_io.py::test_get_io_non_existant_file_writable_io"
] | [
"neo/test/iotest/test_get_io.py::test_list_candidate_ios_filename_stub"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2023-06-19T16:48:00Z" | bsd-3-clause |
|
NeurodataWithoutBorders__pynwb-57 | diff --git a/src/form/data_utils.py b/src/form/data_utils.py
index 7e0f16f8..6b249dbc 100644
--- a/src/form/data_utils.py
+++ b/src/form/data_utils.py
@@ -98,7 +98,7 @@ class DataChunkIterator(AbstractDataChunkIterator):
# Try to get an accurate idea of max_shape for other Python datastructures if possible.
# Don't just callget_shape for a generator as that would potentially trigger loading of all the data
elif isinstance(self.data, list) or isinstance(self.data, tuple):
- self.max_shape = ShapeValidator.get_data_shape(self.data)
+ self.max_shape = ShapeValidator.get_data_shape(self.data, strict_no_data_load=True)
# If we have a data iterator, then read the first chunk
if self.__data_iter is not None: # and(self.max_shape is None or self.dtype is None):
@@ -108,13 +108,16 @@ class DataChunkIterator(AbstractDataChunkIterator):
if self.max_shape is None and self.__next_chunk.data is not None:
data_shape = ShapeValidator.get_data_shape(self.__next_chunk.data)
self.max_shape = list(data_shape)
- self.max_shape[0] = None
+ try:
+ self.max_shape[0] = len(self.data) # We use self.data here because self.__data_iter does not allow len
+ except TypeError:
+ self.max_shape[0] = None
self.max_shape = tuple(self.max_shape)
# Determine the type of the data if possible
if self.__next_chunk.data is not None:
self.dtype = self.__next_chunk.data.dtype
- self.__first_chunk_shape = self.__next_chunk.data.shape
+ self.__first_chunk_shape = ShapeValidator.get_data_shape(self.__next_chunk.data)
def __iter__(self):
"""Return the iterator object"""
| NeurodataWithoutBorders/pynwb | 17699d46b6bc3902b8fa47fb24f4f376b1b7ddc0 | diff --git a/tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py b/tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py
index b57867c1..58d86c3f 100644
--- a/tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py
+++ b/tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py
@@ -56,22 +56,22 @@ class DataChunkIteratorTests(unittest.TestCase):
def test_standard_iterator_unbuffered(self):
dci = DataChunkIterator(data=range(10), buffer_size=1)
self.assertEqual(dci.dtype, np.dtype(int))
- self.assertTupleEqual(dci.max_shape, (None,))
- self.assertTupleEqual(dci.recommended_data_shape(), (1,)) # Test before and after iteration
+ self.assertTupleEqual(dci.max_shape, (10,))
+ self.assertTupleEqual(dci.recommended_data_shape(), (10,)) # Test before and after iteration
count = 0
for chunk in dci:
self.assertEqual(chunk.data.shape[0], 1)
count+=1
self.assertEqual(count, 10)
- self.assertTupleEqual(dci.recommended_data_shape(), (1,)) # Test before and after iteration
+ self.assertTupleEqual(dci.recommended_data_shape(), (10,)) # Test before and after iteration
self.assertIsNone(dci.recommended_chunk_shape())
def test_standard_iterator_unmatched_buffersized(self):
dci = DataChunkIterator(data=range(10), buffer_size=3)
self.assertEquals(dci.dtype, np.dtype(int))
- self.assertTupleEqual(dci.max_shape, (None,))
+ self.assertTupleEqual(dci.max_shape, (10,))
self.assertIsNone(dci.recommended_chunk_shape())
- self.assertTupleEqual(dci.recommended_data_shape(), (3,)) # Test before and after iteration
+ self.assertTupleEqual(dci.recommended_data_shape(), (10,)) # Test before and after iteration
count = 0
for chunk in dci:
if count < 3:
@@ -79,7 +79,7 @@ class DataChunkIteratorTests(unittest.TestCase):
else:
self.assertEqual(chunk.data.shape[0], 1)
count +=1
- self.assertTupleEqual(dci.recommended_data_shape(), (3,)) # Test before and after iteration
+ self.assertTupleEqual(dci.recommended_data_shape(), (10,)) # Test before and after iteration
self.assertEqual(count, 4)
def test_multidimensional_list(self):
| Bug: unittest test_standard_iterator_unbuffered seems buggy
Subtle bug, but here it goes:
On test_standard_iterator_unbuffered (tests/unit/form_tests/utils_test/test_core_DataChunkIterator), line 57 and 59, we have
dci = DataChunkIterator(data=range(10), buffer_size=1)
self.assertTupleEqual(dci.max_shape, (None,))
I claim it should read:
self.assertTupleEqual(dci.max_shape, (10,))
This is because in the constructor of DataChunkIterator we have:
``` if self.max_shape is None:
# If the self.data object identifies it shape then use it
if hasattr(self.data, "shape"):
self.max_shape = self.data.shape
# Avoid the special case of scalar values by making them into a 1D numpy array
if len(self.max_shape) == 0:
self.data = np.asarray([self.data, ])
self.max_shape = self.data.shape
self.__data_iter = iter(self.data)
# Try to get an accurate idea of max_shape for other Python datastructures if possible.
# Don't just callget_shape for a generator as that would potentially trigger loading of all the data
elif isinstance(self.data, list) or isinstance(self.data, tuple):
self.max_shape = ShapeValidator.get_data_shape(self.data)```
```
Initially, max_shape is None will be true, and data will not have attribut. So the flow control will pass to line ```elif isinstance(self.data, list) or isinstance(self.data, tuple):```
However, this will not evaluate to true in python3, because data is type "range" (in python 2, it is type list, so the line will evaluate True, and max_shape would be set to 10).
In python3 max_shape will not be set, so the test will pass, but I think this is incorrect behavior for the test. I think ```elif isinstance(self.data, list) or isinstance(self.data, tuple):``` should be replaced by ```elif isinstance(self.data, (list, tuple, range)):``` and tests or other code should be adjusted accordingly. This is a short job, but one prone to mistakes, and since it involves rewriting test behavior, I vote for @ajtritt to make the modifications if he agrees.
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py::DataChunkIteratorTests::test_standard_iterator_unbuffered",
"tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py::DataChunkIteratorTests::test_standard_iterator_unmatched_buffersized"
] | [
"tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py::DataChunkIteratorTests::test_multidimensional_list",
"tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py::DataChunkIteratorTests::test_none_iter",
"tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py::DataChunkIteratorTests::test_numpy_iter_unmatched_buffer_size",
"tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py::DataChunkTests::test_len_operator_no_data",
"tests/unit/form_tests/utils_test/test_core_DataChunkIterator.py::DataChunkTests::test_len_operator_with_data"
] | {
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
} | "2017-08-08T07:38:20Z" | bsd-3-clause |
|
Nicoretti__crc-153 | diff --git a/docs/docs/changelog/unreleased.md b/docs/docs/changelog/unreleased.md
index 30c1938..49f4d41 100644
--- a/docs/docs/changelog/unreleased.md
+++ b/docs/docs/changelog/unreleased.md
@@ -1,5 +1,12 @@
# Unreleased
+## 🐞 Bug Fixes
+* Fixed unstable return values of `digest` function. For more details, see issue [#151](https://github.com/Nicoretti/crc/issues/151).
+
+ !!! bug
+ This issue specifically affected scenarios where the CRC register was manually manipulated. Standard usages of the `Calculator` class were not impacted.
+ Furthermore, this issue primarily occurred in configurations that required reverse output.
+
## 📚 Documentation
* Add overview of crc configurations
diff --git a/pyproject.toml b/pyproject.toml
index a38e36a..e3c2a8a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "crc"
-version = "6.1.1"
+version = "6.1.2"
description = "Library and CLI to calculate and verify all kinds of CRC checksums"
packages = [
{ include = "crc", from = "src" },
diff --git a/src/crc/_crc.py b/src/crc/_crc.py
index fb7a546..72c8779 100644
--- a/src/crc/_crc.py
+++ b/src/crc/_crc.py
@@ -253,9 +253,8 @@ class BasicRegister(AbstractRegister):
"""
See `AbstractRegister.digest`
"""
- if self._config.reverse_output:
- self.register = self.reverse()
- return self.register ^ self._config.final_xor_value
+ value = self.reverse() if self._config.reverse_output else self.register
+ return value ^ self._config.final_xor_value
def reverse(self) -> int:
"""
| Nicoretti/crc | 8d928145df5caf588d9ff5af83bd72feb241fd1e | diff --git a/test/unit/regression/test_unstable_digest.py b/test/unit/regression/test_unstable_digest.py
new file mode 100644
index 0000000..ea7f7f7
--- /dev/null
+++ b/test/unit/regression/test_unstable_digest.py
@@ -0,0 +1,42 @@
+"""
+This module contains regression tests regarding the issue of unstable return values from the `digest` function.
+The tests ensure that the `digest` function consistently returns the expected output for given inputs.
+For more context and a detailed discussion of the problem, refer to the GitHub issue:
+https://github.com/Nicoretti/crc/issues/151
+"""
+
+import itertools
+
+import pytest
+
+import crc
+
+
+def test_original_regression():
+ reg = crc.Register(crc.Crc8.BLUETOOTH)
+ reg.init()
+ reg.update(b"Hello World!")
+
+ times = 10
+ expected = [81 for _ in range(0, times)]
+ actual = [reg.digest() for _ in range(0, times)]
+
+ assert actual == expected
+
+
[email protected](
+ "configuration,times,expected",
+ [
+ (config, 10, crc.Calculator(config).checksum(b"Hello World!"))
+ for config in itertools.chain(crc.Crc8, crc.Crc16, crc.Crc32, crc.Crc64)
+ ],
+)
+def test_digest_is_stable(configuration, times, expected):
+ expected = [expected for _ in range(times)]
+
+ reg = crc.Register(configuration)
+ reg.init()
+ reg.update(b"Hello World!")
+ actual = [reg.digest() for _ in range(times)]
+
+ assert actual == expected
| 🐞 Return of digest() is not stable
# Summary
Multiple calls to `digest()` don't return the same answers.
## Reproducing the Issue
```
>>> import crc
>>> r = crc.Register(crc.Crc8.BLUETOOTH)
>>> r.init()
>>> r.update(b"Hello World!")
138
>>> r.digest()
81
>>> r.digest()
138
>>> r.digest()
81
```
## Expected Behaviour
Expected multiple calls to `digest()` to return the same value.
## Actual Behaviour
Return value flip-flops between two values.
## Root Cause (optional)
I think it is caused by in-place reverse in `digest()`, [L256-L257](https://github.com/Nicoretti/crc/blob/8d928145df5caf588d9ff5af83bd72feb241fd1e/src/crc/_crc.py#L256-L257)
| 0 | 2401580b6f41fe72f1360493ee46e8a842bd04ba | [
"test/unit/regression/test_unstable_digest.py::test_original_regression",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc8.BLUETOOTH-10-81]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc8.MAXIM_DOW-10-158]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc16.MODBUS-10-21978]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc32.CRC32-10-472456355]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc32.AUTOSAR-10-1260180594]"
] | [
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc8.CCITT-10-28]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc8.SAEJ1850-10-1]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc8.SAEJ1850_ZERO-10-178]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc8.AUTOSAR-10-55]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc16.CCITT-10-3283]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc16.GSM-10-62252]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc16.PROFIBUS-10-54720]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc16.IBM_3740-10-34858]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc32.BZIP2-10-1796897966]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc32.POSIX-10-1652959375]",
"test/unit/regression/test_unstable_digest.py::test_digest_is_stable[Crc64.CRC64-10-18075662732174520837]"
] | {
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
} | "2024-04-14T08:21:29Z" | bsd-2-clause |