diff --git a/environment.yml b/environment.yml index 9282d31bc..e4dbfdd94 100644 --- a/environment.yml +++ b/environment.yml @@ -29,7 +29,7 @@ dependencies: - pip==23.1.2 - turbodbc==4.5.10 - numpy>=1.23.4 - - pandas>=1.3.0,<3.0.0 + - pandas>=1.5.2,<3.0.0 - oauthlib>=3.2.2 - cryptography>=38.0.3 - azure-identity==1.12.0 @@ -54,7 +54,7 @@ dependencies: - pygments==2.16.1 - pymdown-extensions==10.1.0 - databricks-sql-connector==2.9.2 - - databricks-sdk==0.2.1 + - databricks-sdk==0.6.0 - semver==3.0.0 - xlrd==2.0.1 - pygithub==1.59.0 @@ -64,17 +64,17 @@ dependencies: - delta-sharing-python==0.7.4 - polars==0.18.8 - moto[s3]==4.1.14 - - xarray>=2023.1.0 - - ecmwf-api-client>=1.6.3 - - netCDF4>=1.6.4 + - xarray>=2023.1.0,<2023.8.0 + - ecmwf-api-client==1.6.3 + - netCDF4==1.6.4 - black==23.7.0 - pip: - dependency-injector==4.41.0 - azure-functions==1.15.0 - nest_asyncio==1.5.6 - hvac==1.1.1 + - langchain==0.0.268 - build==0.10.0 - - langchain>=0.0.239,<=0.0.247 - deltalake==0.10.1 - mkdocs-material==9.2.0b3 \ No newline at end of file diff --git a/setup.py b/setup.py index d5e746f38..db713fa4c 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ "databricks-sql-connector==2.9.2", "azure-identity==1.12.0", "pyodbc==4.0.39", - "pandas>=2.0.1,<3.0.0", + "pandas>=1.5.2,<3.0.0", "jinja2==3.1.2", "importlib_metadata>=1.0.0", "semver==3.0.0", @@ -40,7 +40,7 @@ "grpcio>=1.48.1", "grpcio-status>=1.48.1", "googleapis-common-protos>=1.56.4", - "langchain>=0.0.239,<=0.0.247", + "langchain==0.0.268", "openai==0.27.8", ] @@ -60,7 +60,7 @@ "web3==6.5.0", "polars[deltalake]==0.18.8", "delta-sharing==0.7.4", - "xarray==2023.8.0", + "xarray>=2023.1.0,<2023.8.0", "ecmwf-api-client==1.6.3", "netCDF4==1.6.4", ] diff --git a/src/sdk/python/rtdip_sdk/_sdk_utils/pandas.py b/src/sdk/python/rtdip_sdk/_sdk_utils/pandas.py index 67b89ee7b..622bce0a7 100644 --- a/src/sdk/python/rtdip_sdk/_sdk_utils/pandas.py +++ b/src/sdk/python/rtdip_sdk/_sdk_utils/pandas.py @@ -19,8 +19,11 @@ def _prepare_pandas_to_convert_to_spark(df: DataFrame) -> DataFrame: # Spark < 3.4.0 does not support iteritems method in Pandas > 2.0.1 try: - _package_version_meets_minimum("pyspark", "3.4.0") + _package_version_meets_minimum("pandas", "2.0.0") + try: + _package_version_meets_minimum("pyspark", "3.4.0") + except: + df.iteritems = df.items + return df except: - df.iteritems = df.items - - return df + return df