From cea7d69ea2d97a11f7d4ce3838a7a673b14e5697 Mon Sep 17 00:00:00 2001 From: Ricardo Garcia Silva Date: Thu, 14 Mar 2024 18:31:39 +0000 Subject: [PATCH] Adding downloader for a thredds catalog --- arpav_ppcv/main.py | 414 ++++----------------------------- arpav_ppcv/thredds/__init__.py | 0 arpav_ppcv/thredds/crawler.py | 163 +++++++++++++ arpav_ppcv/thredds/models.py | 102 ++++++++ poetry.lock | 20 +- pyproject.toml | 2 + 6 files changed, 324 insertions(+), 377 deletions(-) create mode 100644 arpav_ppcv/thredds/__init__.py create mode 100644 arpav_ppcv/thredds/crawler.py create mode 100644 arpav_ppcv/thredds/models.py diff --git a/arpav_ppcv/main.py b/arpav_ppcv/main.py index dc2654ad..06d6ca59 100644 --- a/arpav_ppcv/main.py +++ b/arpav_ppcv/main.py @@ -1,386 +1,66 @@ """Command-line interface for the project.""" -import dataclasses -import datetime as dt import enum -import itertools -import typing +from typing import ( + Annotated, + Optional +) from pathlib import Path -from xml.etree import ElementTree as et -import requests +import anyio +import httpx import typer +from .thredds import crawler -@dataclasses.dataclass -class ForecastTemporalPeriodMetadata: - name: str - code: str +class KnownCatalogIdentifier(enum.Enum): + THIRTY_YEAR_ANOMALY_5_MODEL_AVERAGE = "30y-anomaly-ensemble" + THIRTY_YEAR_ANOMALY_TEMPERATURE_PRECIPITATION = "30y-anomaly-tas-pr" + THIRTY_YEAR_ANOMALY_CLIMATIC_INDICES = "30y-anomaly-climate-idx" -class ForecastTemporalPeriod(enum.Enum): - TW1 = ForecastTemporalPeriodMetadata(name="2021 - 2050", code="tw1") - TW2 = ForecastTemporalPeriodMetadata(name="2071 - 2100", code="tw2") - -@dataclasses.dataclass -class ForecastSeasonMetadata: - name: str - code: str - - -class ForecastSeason(enum.Enum): - DJF = ForecastSeasonMetadata(name="Winter", code="DJF") - MAM = ForecastSeasonMetadata(name="Spring", code="MAM") - JJA = ForecastSeasonMetadata(name="Summer", code="JJA") - SON = ForecastSeasonMetadata(name="Autumn", code="SON") - - -@dataclasses.dataclass -class ForecastModelMetadata: - name: str - code: str - thredds_base_path: str - - -@dataclasses.dataclass -class ForecastScenarioMetadata: - name: str - code: str - - -class ForecastScenario(enum.Enum): - RCP26 = ForecastScenarioMetadata(name="RCP26", code="rcp26") - RCP45 = ForecastScenarioMetadata(name="RCP45", code="rcp45") - RCP85 = ForecastScenarioMetadata(name="RCP85", code="rcp85") - - -class ForecastAnomalyVariablePathPattern(enum.Enum): - TAS_ENSEMBLE = "ensembletwbc/clipped/tas_avg_anom_{period}_{scenario}_{season}_VFVGTAA.nc" - TASMIN_ENSEMBLE = "ensembletwbc/clipped/tasmin_avg_anom_{period}_{scenario}_{season}_VFVGTAA.nc" - TASMAX_ENSEMBLE = "ensembletwbc/clipped/tasmax_avg_anom_{period}_{scenario}_{season}_VFVGTAA.nc" - PR_ENSEMBLE = "ensembletwbc/clipped/pr_avg_percentage_{period}_{scenario}_{season}_VFVGTAA.nc" - TR_ENSEMBLE = "ensembletwbc/clipped/ecatran_20_avg_{period}_{scenario}_ls_VFVG.nc" - SU30_ENSEMBLE = "ensembletwbc/clipped/ecasuan_30_avg_{period}_{scenario}_ls_VFVG.nc" - FD_ENSEMBLE = "ensembletwbc/clipped/ecafdan_0_avg_{period}_{scenario}_ls_VFVG.nc" - HWDI_ENSEMBLE = "ensembletwbc/clipped/heat_waves_anom_avg_55_{period}_{scenario}_JJA_VFVGTAA.nc" - TAS_ECEARTHCCLM4817 = "taspr5rcm/clipped/tas_EC-EARTH_CCLM4-8-17_{scenario}_seas_{period}{season}_VFVGTAA.nc" - TASMIN_ECEARTHCCLM4817 = "taspr5rcm/clipped/tasmin_EC-EARTH_CCLM4-8-17_{scenario}_seas_{period}{season}_VFVGTAA.nc" - TASMAX_ECEARTHCCLM4817 = "taspr5rcm/clipped/tasmax_EC-EARTH_CCLM4-8-17_{scenario}_seas_{period}{season}_VFVGTAA.nc" - HWDI_ECEARTHCCLM4817 = "indici5rcm/clipped/heat_waves_anom_EC-EARTH_CCLM4-8-17_{scenario}_JJA_55_{period}_VFVGTAA.nc" +def _get_catalog_url(catalog_identifier: KnownCatalogIdentifier) -> str: + return { + KnownCatalogIdentifier.THIRTY_YEAR_ANOMALY_5_MODEL_AVERAGE: ( + "https://thredds.arpa.veneto.it/thredds/catalog/ensembletwbc/clipped"), + KnownCatalogIdentifier.THIRTY_YEAR_ANOMALY_TEMPERATURE_PRECIPITATION: ( + "https://thredds.arpa.veneto.it/thredds/catalog/taspr5rcm/clipped"), + KnownCatalogIdentifier.THIRTY_YEAR_ANOMALY_CLIMATIC_INDICES: ( + "https://thredds.arpa.veneto.it/thredds/catalog/indici5rcm/clipped"), + }[catalog_identifier] app = typer.Typer() - @app.command() -def import_anomaly_forecast_datasets(target_dir: Path): - download_url_pattern = ( - "https://thredds.arpa.veneto.it/thredds/fileServer/{path}" - ) - session = requests.Session() - seasonal_patterns = [] - annual_patterns = [] - for pattern in ForecastAnomalyVariablePathPattern: - if all( - ( - "{season}" in pattern.value, - "{scenario}" in pattern.value, - "{period}" in pattern.value, +def import_thredds_datasets( + catalog: Annotated[KnownCatalogIdentifier, typer.Argument(...)], + output_base_dir: Annotated[ + Optional[Path], + typer.Option( + help=( + "Where datasets should be downloaded to. If this parameter is " + "not provided, only the total number of found datasets " + "is shown." ) - ): - seasonal_patterns.append(pattern) - else: - annual_patterns.append(pattern) - - paths = [] - for seasonal_pattern in seasonal_patterns: - combinator = itertools.product( - ForecastScenario, - ForecastTemporalPeriod, - ForecastSeason, - ) - for scenario, period, season in combinator: - path = seasonal_pattern.value.format( - scenario=scenario.value.code, - period=period.value.code, - season=season.value.code - ) - paths.append((seasonal_pattern, path)) - for annual_pattern in annual_patterns: - combinator = itertools.product( - ForecastScenario, - ForecastTemporalPeriod, - ) - for scenario, period in combinator: - path = annual_pattern.value.format( - scenario=scenario.value.code, - period=period.value.code, ) - paths.append((annual_pattern, path)) - for pattern, path in paths: - print(f"Processing {pattern.name!r}...") - output_path = target_dir / path - if not output_path.exists(): - print(f"Saving {output_path!r}...") - download_url = download_url_pattern.format(path=path) - response = session.get(download_url, stream=True) - response.raise_for_status() - output_path.parent.mkdir(parents=True, exist_ok=True) - with output_path.open("wb") as fh: - for chunk in response.iter_content(): - fh.write(chunk) - else: - print(f"path already exists ({output_path!r}) - skipping") - print("Done!") - -# -# -# @dataclasses.dataclass -# class ThreddsWildcardFilterSelector: -# type_: typing.Literal["wildcard", "regexp"] -# value: str -# applies_to_datasets: bool = True -# applies_to_collections: bool = False -# -# -# @dataclasses.dataclass -# class ThreddsDatasetScanFilter: -# includes: list[ThreddsWildcardFilterSelector] = dataclasses.field( -# default_factory=list) -# excludes: list[ThreddsWildcardFilterSelector] = dataclasses.field( -# default_factory=list) - - -@dataclasses.dataclass -class ThreddsClientService: - name: str - service_type: str - base: str - - -@dataclasses.dataclass -class ThreddsClientPublicDataset: - name: str - id: str - url_path: str - - -@dataclasses.dataclass -class ThreddsClientCatalogRef: - title: str - id: str - name: str - href: str - - -@dataclasses.dataclass -class ThreddsClientDataset: - name: str - properties: dict[str, str] - metadata: dict[str, str] - public_datasets: list[ThreddsClientPublicDataset] - catalog_refs: list[ThreddsClientCatalogRef] - - -@dataclasses.dataclass -class ThreddsClientCatalog: - url: str - services: dict[str, ThreddsClientService] - dataset: ThreddsClientDataset - - -_NAMESPACES: typing.Final = { - "thredds": "http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0", - "xlink": "http://www.w3.org/1999/xlink", -} - - -def discover_catalog_contents( - catalog_host: str, - catalog_ref: str, - http_client: requests.Session, - use_https: bool = True, -) -> ThreddsClientCatalog: - """ - host: thredds.arpa.veneto.it - catalog_ref: /thredds/catalog/ensembletwbc/clipped - """ - - - url = _build_catalog_url(catalog_host, catalog_ref, use_https) - response = http_client.get(url) - response.raise_for_status() - raw_catalog_description = response.content - parsed_services, parsed_dataset = _parse_catalog_client_description( - raw_catalog_description) - return ThreddsClientCatalog( - url=url, - services={service.service_type: service for service in parsed_services}, - dataset=parsed_dataset - ) - - -def build_download_urls( - catalog_host: str, - catalog_contents: ThreddsClientCatalog, - use_https: bool = True -) -> dict[str, str]: - urls = {} - url_pattern = "{scheme}://{host}/{service_base}/{dataset_path}" - for dataset in catalog_contents.dataset.public_datasets: - urls[dataset.id] = url_pattern.format( - scheme="https" if use_https else "http", - host=catalog_host, - service_base=catalog_contents.services["HTTPServer"].base.strip("/"), - dataset_path=dataset.url_path.strip("/") + ] = None, + wildcard_filter: Annotated[ + str, + typer.Option(help="Wildcard filter for selecting only relevant datasets") + ] = "*" +): + catalog_url = _get_catalog_url(catalog) + client = httpx.Client() + print(f"Parsing catalog contents...") + contents = crawler.discover_catalog_contents(catalog_url, client) + print(f"Found {len(contents.get_public_datasets(wildcard_filter))} datasets") + if output_base_dir is not None: + print("Downloading datasets...") + anyio.run( + crawler.download_datasets, + output_base_dir, + contents, + wildcard_filter ) - return urls - - - -def _build_catalog_url(host: str, catalog_ref: str, use_https: bool = True) -> str: - return "{scheme}://{host}/{path}/catalog.xml".format( - scheme="https" if use_https else "http", - host=host, - path=catalog_ref.strip("/") - ) - - -def _parse_catalog_client_description( - catalog_description: bytes -) -> tuple[list[ThreddsClientService], ThreddsClientDataset]: - root_element = et.fromstring(catalog_description) - service_qn = et.QName(_NAMESPACES["thredds"], "service") - dataset_qn = et.QName(_NAMESPACES["thredds"], "dataset") - services = [] - for service_element in root_element.findall(f"./{service_qn}/"): - service = _parse_service_element(service_element) - services.append(service) - dataset = _parse_dataset_element( - root_element.findall(f"./{dataset_qn}")[0]) - return services, dataset - - -def _parse_service_element(service_el: et.Element) -> ThreddsClientService: - return ThreddsClientService( - name=service_el.get("name", default=""), - service_type=service_el.get("serviceType", default=""), - base=service_el.get("base", default="") - ) - - -def _parse_dataset_element(dataset_el: et.Element) -> ThreddsClientDataset: - prop_qname = et.QName(_NAMESPACES["thredds"], "property") - meta_qname = et.QName(_NAMESPACES["thredds"], "metadata") - ds_qname = et.QName(_NAMESPACES["thredds"], "dataset") - cat_ref_qname = et.QName(_NAMESPACES["thredds"], "catalogRef") - properties = {} - metadata = {} - public_datasets = [] - catalog_references = [] - for element in dataset_el.findall("./"): - match element.tag: - case prop_qname.text: - properties[element.get("name")] = element.get("value") - case meta_qname.text: - for metadata_element in element.findall("./"): - key = metadata_element.tag.replace( - f"{{{_NAMESPACES['thredds']}}}", "") - metadata[key] = metadata_element.text - case ds_qname.text: - public_ds = ThreddsClientPublicDataset( - name=element.get("name", ""), - id=element.get("ID", ""), - url_path=element.get("urlPath", ""), - ) - public_datasets.append(public_ds) - case cat_ref_qname.text: - title_qname = et.QName(_NAMESPACES["xlink"], "title") - href_qname = et.QName(_NAMESPACES["xlink"], "href") - catalog_ref = ThreddsClientCatalogRef( - title=element.get(title_qname.text, ""), - id=element.get("ID", ""), - name=element.get("name", ""), - href=element.get(href_qname.text, ""), - ) - catalog_references.append(catalog_ref) - return ThreddsClientDataset( - name=dataset_el.get("name", default=""), - properties=properties, - metadata=metadata, - public_datasets=public_datasets, - catalog_refs=catalog_references, - ) -# -# -# @dataclasses.dataclass -# class ThreddsDatasetScan: -# """DatasetScan defines a single mapping between a URL base path and a directory. -# -# DatasetScan configuration enables TDS to discover and serve some or all of the -# datasets found in the mapped directory. It generates nested catalogs by scanning -# the directory named in the `location` property and creating a `Dataset` for each -# file found and a `CatalogRef` for each subdirectory -# -# In the THREDDS configuration, a DatasetScan element can be used wherever a Dataset -# is expected. -# -# In the client view, DatasetScan elements get converted to CatalogRef elements -# -# """ -# -# name: str -# id: str -# path: str # is used to create the URL for files and catalogs, must be globally unique and must not contain leading or trailing slashes -# location: str # must be an absolute path to a directory -# filter_: ThreddsDatasetScanFilter | None = None -# -# def build_client_catalog_url(self): -# ... -# -# def build_download_url(self) -> str: -# ... -# -# -# ensemble_5rcm_bc = ThreddsDatasetScan( -# name="ENSEMBLE 5rcm BC", -# id="ensembletwbc", -# path="", -# location="", -# filter_=ThreddsDatasetScanFilter( -# excludes=[ -# ThreddsWildcardFilterSelector(type_="wildcard", value="heat_waves_avg_*.nc"), -# ThreddsWildcardFilterSelector(type_="wildcard", value="heat_waves_*DJF.nc"), -# ThreddsWildcardFilterSelector(type_="wildcard", value="ecasuan_25_*.nc"), -# ThreddsWildcardFilterSelector( -# type_="wildcard", value="ts", -# applies_to_datasets=False, -# applies_to_collections=True, -# ), -# ThreddsWildcardFilterSelector( -# type_="wildcard", value="thralert", -# applies_to_datasets=False, -# applies_to_collections=True, -# ), -# ThreddsWildcardFilterSelector( -# type_="w_null", value="thralert", -# applies_to_datasets=False, -# applies_to_collections=True, -# ), -# ] -# ) -# ) -# -# -# @dataclasses.dataclass -# class AnomalyModelMetadata: -# name: str -# id: str -# path: Path -# location: str -# -# -# @app.command() -# def get_anomaly_data(target_dir: Path): -# ... diff --git a/arpav_ppcv/thredds/__init__.py b/arpav_ppcv/thredds/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/arpav_ppcv/thredds/crawler.py b/arpav_ppcv/thredds/crawler.py new file mode 100644 index 00000000..5c08bc8d --- /dev/null +++ b/arpav_ppcv/thredds/crawler.py @@ -0,0 +1,163 @@ +import logging +import typing +import urllib.parse +from itertools import islice +from pathlib import Path +from xml.etree import ElementTree as et + +import anyio +import httpx + +from . import models + +logger = logging.getLogger(__name__) + + +_NAMESPACES: typing.Final = { + "thredds": "http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0", + "xlink": "http://www.w3.org/1999/xlink", +} + + +def discover_catalog_contents( + catalog_reference_url: str, + http_client: httpx.Client, +) -> models.ThreddsClientCatalog: + """ + catalog_reference_url: + https://thredds.arpa.veneto.it/thredds/catalog/ensembletwbc/clipped + """ + + response = http_client.get(catalog_reference_url) + response.raise_for_status() + raw_catalog_description = response.content + parsed_services, parsed_dataset = _parse_catalog_client_description( + raw_catalog_description) + return models.ThreddsClientCatalog( + url=urllib.parse.urlparse(catalog_reference_url), + services={service.service_type: service for service in parsed_services}, + dataset=parsed_dataset + ) + + +async def download_datasets( + output_base_directory: Path, + catalog_contents: models.ThreddsClientCatalog, + dataset_wildcard_filter: str +) -> None: + client = httpx.AsyncClient() + relevant_datasets = catalog_contents.get_public_datasets( + dataset_wildcard_filter) + for batch in _batched(relevant_datasets.values(), 10): + logger.info(f"processing new batch") + async with anyio.create_task_group() as tg: + for public_dataset in batch: + logger.info(f"processing dataset {public_dataset.id!r}...") + tg.start_soon( + download_individual_dataset, + public_dataset.id, + catalog_contents, + output_base_directory, + client + ) + + +async def download_individual_dataset( + dataset_id: str, + catalog_contents: models.ThreddsClientCatalog, + output_base_directory: Path, + http_client: httpx.AsyncClient +) -> None: + url = catalog_contents.build_dataset_download_url(dataset_id) + async with http_client.stream("GET", url) as response: + response.raise_for_status() + output_path = output_base_directory / dataset_id + output_dir = output_path.parent + output_dir.mkdir(parents=True, exist_ok=True) + with output_path.open("wb") as fh: + async for chunk in response.aiter_bytes(): + fh.write(chunk) + + +def _parse_catalog_client_description( + catalog_description: bytes +) -> tuple[list[models.ThreddsClientService], models.ThreddsClientDataset]: + root_element = et.fromstring(catalog_description) + service_qn = et.QName(_NAMESPACES["thredds"], "service") + dataset_qn = et.QName(_NAMESPACES["thredds"], "dataset") + services = [] + for service_element in root_element.findall(f"./{service_qn}/"): + service = _parse_service_element(service_element) + services.append(service) + dataset = _parse_dataset_element( + root_element.findall(f"./{dataset_qn}")[0]) + return services, dataset + + +def _parse_service_element(service_el: et.Element) -> models.ThreddsClientService: + return models.ThreddsClientService( + name=service_el.get("name", default=""), + service_type=service_el.get("serviceType", default=""), + base=service_el.get("base", default="") + ) + + +def _parse_dataset_element(dataset_el: et.Element) -> models.ThreddsClientDataset: + prop_qname = et.QName(_NAMESPACES["thredds"], "property") + meta_qname = et.QName(_NAMESPACES["thredds"], "metadata") + ds_qname = et.QName(_NAMESPACES["thredds"], "dataset") + cat_ref_qname = et.QName(_NAMESPACES["thredds"], "catalogRef") + properties = {} + metadata = {} + public_datasets = {} + catalog_references = {} + for element in dataset_el.findall("./"): + match element.tag: + case prop_qname.text: + properties[element.get("name")] = element.get("value") + case meta_qname.text: + for metadata_element in element.findall("./"): + key = metadata_element.tag.replace( + f"{{{_NAMESPACES['thredds']}}}", "") + metadata[key] = metadata_element.text + case ds_qname.text: + public_ds = models.ThreddsClientPublicDataset( + name=element.get("name", ""), + id=element.get("ID", ""), + url_path=element.get("urlPath", ""), + ) + public_datasets[public_ds.id] = public_ds + case cat_ref_qname.text: + title_qname = et.QName(_NAMESPACES["xlink"], "title") + href_qname = et.QName(_NAMESPACES["xlink"], "href") + catalog_ref = models.ThreddsClientCatalogRef( + title=element.get(title_qname.text, ""), + id=element.get("ID", ""), + name=element.get("name", ""), + href=element.get(href_qname.text, ""), + ) + catalog_references[catalog_ref.id] = catalog_ref + return models.ThreddsClientDataset( + name=dataset_el.get("name", default=""), + properties=properties, + metadata=metadata, + public_datasets=public_datasets, + catalog_refs=catalog_references, + ) + + +def _batched(iterable, n): + """Custom implementation of `itertools.batched()`. + + This is a custom implementation of `itertools.batched()`, which is only availble + on Python 3.12+. This is copied verbatim from the python docs at: + + https://docs.python.org/3/library/itertools.html#itertools.batched + + """ + # batched('ABCDEFG', 3) --> ABC DEF G + if n < 1: + raise ValueError('n must be at least one') + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch \ No newline at end of file diff --git a/arpav_ppcv/thredds/models.py b/arpav_ppcv/thredds/models.py new file mode 100644 index 00000000..24b2de49 --- /dev/null +++ b/arpav_ppcv/thredds/models.py @@ -0,0 +1,102 @@ +import dataclasses +import enum +import fnmatch +import urllib.parse + + +@dataclasses.dataclass +class ForecastTemporalPeriodMetadata: + name: str + code: str + + +class ForecastTemporalPeriod(enum.Enum): + TW1 = ForecastTemporalPeriodMetadata(name="2021 - 2050", code="tw1") + TW2 = ForecastTemporalPeriodMetadata(name="2071 - 2100", code="tw2") + + +@dataclasses.dataclass +class ForecastSeasonMetadata: + name: str + code: str + + +class ForecastSeason(enum.Enum): + DJF = ForecastSeasonMetadata(name="Winter", code="DJF") + MAM = ForecastSeasonMetadata(name="Spring", code="MAM") + JJA = ForecastSeasonMetadata(name="Summer", code="JJA") + SON = ForecastSeasonMetadata(name="Autumn", code="SON") + + +@dataclasses.dataclass +class ForecastScenarioMetadata: + name: str + code: str + + +class ForecastScenario(enum.Enum): + RCP26 = ForecastScenarioMetadata(name="RCP26", code="rcp26") + RCP45 = ForecastScenarioMetadata(name="RCP45", code="rcp45") + RCP85 = ForecastScenarioMetadata(name="RCP85", code="rcp85") + + +class AveragingPeriod(enum.Enum): + YEAR = "year" + THIRTY_YEAR = "thirty-year" + + + +@dataclasses.dataclass +class ThreddsClientService: + name: str + service_type: str + base: str + + +@dataclasses.dataclass +class ThreddsClientPublicDataset: + name: str + id: str + url_path: str + + +@dataclasses.dataclass +class ThreddsClientCatalogRef: + title: str + id: str + name: str + href: str + + +@dataclasses.dataclass +class ThreddsClientDataset: + name: str + properties: dict[str, str] + metadata: dict[str, str] + public_datasets: dict[str, ThreddsClientPublicDataset] + catalog_refs: dict[str, ThreddsClientCatalogRef] + + +@dataclasses.dataclass +class ThreddsClientCatalog: + url: urllib.parse.ParseResult + services: dict[str, ThreddsClientService] + dataset: ThreddsClientDataset + + def build_dataset_download_url(self, dataset_id: str) -> str: + dataset = self.dataset.public_datasets[dataset_id] + url_pattern = "{scheme}://{host}/{service_base}/{dataset_path}" + return url_pattern.format( + scheme=self.url.scheme, + host=self.url.netloc, + service_base=self.services["HTTPServer"].base.strip("/"), + dataset_path=dataset.url_path.strip("/") + ) + + def get_public_datasets( + self, + wildcard_pattern: str = "*" + ) -> dict[str, ThreddsClientPublicDataset]: + relevant_ids = fnmatch.filter( + self.dataset.public_datasets.keys(), wildcard_pattern) + return {id_: self.dataset.public_datasets[id_] for id_ in relevant_ids} diff --git a/poetry.lock b/poetry.lock index 080d30ec..60e01b86 100644 --- a/poetry.lock +++ b/poetry.lock @@ -19,7 +19,7 @@ vine = ">=1.1.3,<5.0.0a1" name = "anyio" version = "4.3.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" -category = "dev" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -814,7 +814,7 @@ files = [ name = "exceptiongroup" version = "1.2.0" description = "Backport of PEP 654 (exception groups)" -category = "dev" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -871,7 +871,7 @@ files = [ name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -category = "dev" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -883,7 +883,7 @@ files = [ name = "httpcore" version = "1.0.3" description = "A minimal low-level HTTP client." -category = "dev" +category = "main" optional = false python-versions = ">=3.8" files = [ @@ -903,14 +903,14 @@ trio = ["trio (>=0.22.0,<0.24.0)"] [[package]] name = "httpx" -version = "0.26.0" +version = "0.27.0" description = "The next generation HTTP client." -category = "dev" +category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"}, - {file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"}, + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, ] [package.dependencies] @@ -1880,7 +1880,7 @@ files = [ name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" -category = "dev" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2283,4 +2283,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "1b996c98d861dfc3c1238b03f10f520d4f4f9d733c05a0dd6600e1a19592dc7a" +content-hash = "1fad407bb1c0f0d077ed051b1a1641ebfc03d0433c61ffec175bf814c5b1f7f7" diff --git a/pyproject.toml b/pyproject.toml index 5397c269..49ec9a42 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,6 +38,8 @@ requests = "2.23.0" python-dateutil = "2.8.1" pytz = "2020.1" django-redis-sessions = "0.6.1" +httpx = "^0.27.0" +anyio = "^4.3.0" [tool.poetry.group.dev.dependencies]