Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/api/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,5 @@ oauthlib>=3.2.2
pandas==1.4.2
numpy>=1.22.0
jinja2==3.0.3
jinjasql==0.1.8
jinjasql==0.1.8
pytz==2022.6
2 changes: 1 addition & 1 deletion src/api/v1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import azure.functions as func
from fastapi import Depends
from src.api.FastAPIApp import app, api_v1_router
from src.api.v1 import metadata, raw, resample, interpolate, graphql
from src.api.v1 import metadata, raw, resample, interpolate, time_weighted_average, graphql
from src.api.auth.azuread import oauth2_scheme

app.include_router(api_v1_router)
Expand Down
5 changes: 4 additions & 1 deletion src/api/v1/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from src.sdk.python.rtdip_sdk.odbc.db_sql_connector import DatabricksSQLConnection
from src.api.auth import azuread

def common_api_setup_tasks(base_query_parameters, metadata_query_parameters = None, raw_query_parameters = None, tag_query_parameters = None, resample_query_parameters = None, interpolate_query_parameters = None):
def common_api_setup_tasks(base_query_parameters, metadata_query_parameters = None, raw_query_parameters = None, tag_query_parameters = None, resample_query_parameters = None, interpolate_query_parameters = None, time_weighted_average_query_parameters = None):
token = azuread.get_azure_ad_token(base_query_parameters.authorization)

connection = DatabricksSQLConnection(os.environ.get("DATABRICKS_SQL_SERVER_HOSTNAME"), os.environ.get("DATABRICKS_SQL_HTTP_PATH"), token)
Expand Down Expand Up @@ -46,5 +46,8 @@ def common_api_setup_tasks(base_query_parameters, metadata_query_parameters = No

if interpolate_query_parameters != None:
parameters = dict(parameters, **interpolate_query_parameters.__dict__)

if time_weighted_average_query_parameters != None:
parameters = dict(parameters, **time_weighted_average_query_parameters.__dict__)

return connection, parameters
14 changes: 13 additions & 1 deletion src/api/v1/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,4 +149,16 @@ def __init__(
self,
interpolation_method: str = Query(..., description="Interpolation Method", examples={"forward_fill": {"value": "forward_fill"}, "backward_fill": {"value": "backward_fill"}}),
):
self.interpolation_method = interpolation_method
self.interpolation_method = interpolation_method

class TimeWeightedAverageQueryParams:
def __init__(
self,
window_size_mins: int = Query(..., description="Window Size Mins", example=20),
window_length: int = Query(..., description="Window Length", examples=10),
step: Union[bool, str] = Query(..., description="step" )
#examples={"metadata": {"value": "metadata"}, "True": {"value": True}, "False": {"value": False}}),
):
self.window_size_mins = window_size_mins
self.window_length = window_length
self.step = step
69 changes: 69 additions & 0 deletions src/api/v1/time_weighted_average.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import logging
from src.api.FastAPIApp import api_v1_router
from fastapi import HTTPException, Depends, Body
import nest_asyncio
import json
from src.sdk.python.rtdip_sdk.functions import time_weighted_average
from src.api.v1.models import BaseQueryParams, ResampleInterpolateResponse, HTTPError, RawQueryParams, TagsQueryParams, TagsBodyParams, TimeWeightedAverageQueryParams
import src.api.v1.common

nest_asyncio.apply()

def time_weighted_average_events_get(base_query_parameters, raw_query_parameters, tag_query_parameters, time_weighted_average_parameters):
try:
(connection, parameters) = src.api.v1.common.common_api_setup_tasks(
base_query_parameters,
raw_query_parameters=raw_query_parameters,
tag_query_parameters=tag_query_parameters,
time_weighted_average_query_parameters=time_weighted_average_parameters
)

data = time_weighted_average.get(connection, parameters)
data = data.reset_index()
response = data.to_json(orient="table", index=False)
return ResampleInterpolateResponse(**json.loads(response))
except Exception as e:
logging.error(str(e))
raise HTTPException(status_code=400, detail=str(e))

get_description = """
## Time Weighted Average

Time weighted average of raw timeseries data. Refer to the following [documentation](https://www.rtdip.io/sdk/code-reference/time-weighted-average/) for further information.
"""

@api_v1_router.get(
path="/events/timeweightedaverage",
name="Time Weighted Average GET",
description=get_description,
tags=["Events"],
responses={200: {"model": ResampleInterpolateResponse}, 400: {"model": HTTPError}}
)
async def time_weighted_average_get(
base_query_parameters: BaseQueryParams = Depends(),
raw_query_parameters: RawQueryParams = Depends(),
tag_query_parameters: TagsQueryParams = Depends(),
time_weighted_average_parameters: TimeWeightedAverageQueryParams = Depends()
):
return time_weighted_average_events_get(base_query_parameters, raw_query_parameters, tag_query_parameters, time_weighted_average_parameters)

post_description = """
## Time Weighted Average

Time weighted average of raw timeseries data via a POST method to enable providing a list of tag names that can exceed url length restrictions via GET Query Parameters. Refer to the following [documentation](https://ssip-docs.shell.com/sdk/code-reference/interpolate/) for further information.
"""

@api_v1_router.post(
path="/events/timeweightedaverage",
name="Time Weighted Average POST",
description=get_description,
tags=["Events"],
responses={200: {"model": ResampleInterpolateResponse}, 400: {"model": HTTPError}}
)
async def time_weighted_average_post(
base_query_parameters: BaseQueryParams = Depends(),
raw_query_parameters: RawQueryParams = Depends(),
tag_query_parameters: TagsBodyParams = Body(default=...),
time_weighted_average_parameters: TimeWeightedAverageQueryParams = Depends()
):
return time_weighted_average_events_get(base_query_parameters, raw_query_parameters, tag_query_parameters, time_weighted_average_parameters)
31 changes: 18 additions & 13 deletions src/sdk/python/rtdip_sdk/functions/time_weighted_average.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def get(connection: object, parameters_dict: dict) -> pd.DataFrame:
window_size_mins (int): Window size in minutes
window_length (int): (Optional) add longer window time for the start or end of specified date to cater for edge cases
include_bad_data (bool): Include "Bad" data points with True or remove "Bad" data points with False
step (str/bool): data points with step "enabled" or "disabled". The options for step are "Pi" (string), True or False (bool)
step (str/bool): data points with step "enabled" or "disabled". The options for step are "metadata" (string), True or False (bool)

Returns:
DataFrame: A dataframe containing the time weighted averages.
Expand All @@ -66,30 +66,32 @@ def get(connection: object, parameters_dict: dict) -> pd.DataFrame:
original_end_date = datetime.strptime(parameters_dict["end_date"], datetime_format)

if "window_length" in parameters_dict:
parameters_dict["start_date"] = (datetime.strptime(parameters_dict["start_date"], datetime_format) - timedelta(minutes = parameters_dict["window_length"])).strftime(datetime_format)
parameters_dict["end_date"] = (datetime.strptime(parameters_dict["end_date"], datetime_format) + timedelta(minutes = parameters_dict["window_length"])).strftime(datetime_format)
parameters_dict["start_date"] = (datetime.strptime(parameters_dict["start_date"], datetime_format) - timedelta(minutes = int(parameters_dict["window_length"]))).strftime(datetime_format)
parameters_dict["end_date"] = (datetime.strptime(parameters_dict["end_date"], datetime_format) + timedelta(minutes = int(parameters_dict["window_length"]))).strftime(datetime_format)
else:
parameters_dict["start_date"] = (datetime.strptime(parameters_dict["start_date"], datetime_format) - timedelta(minutes = parameters_dict["window_size_mins"])).strftime(datetime_format)
parameters_dict["end_date"] = (datetime.strptime(parameters_dict["end_date"], datetime_format) + timedelta(minutes = parameters_dict["window_size_mins"])).strftime(datetime_format)
parameters_dict["start_date"] = (datetime.strptime(parameters_dict["start_date"], datetime_format) - timedelta(minutes = int(parameters_dict["window_size_mins"]))).strftime(datetime_format)
parameters_dict["end_date"] = (datetime.strptime(parameters_dict["end_date"], datetime_format) + timedelta(minutes = int(parameters_dict["window_size_mins"]))).strftime(datetime_format)

pandas_df = raw_get(connection, parameters_dict)

pandas_df["EventDate"] = pd.to_datetime(pandas_df["EventTime"]).dt.date
pandas_df["EventDate"] = pd.to_datetime(pandas_df["EventTime"]).dt.date

boundaries_df = pd.DataFrame(columns=["EventTime", "TagName"])

boundaries_df = pd.DataFrame(columns=["EventTime", "TagName", "Value", "EventDate"])
for tag in parameters_dict["tag_names"]:
boundaries_df = boundaries_df.append({"EventTime": pd.to_datetime(parameters_dict["start_date"]).replace(tzinfo=pytz.timezone(utc)), "TagName": tag, "Value": 0, "EventDate": datetime.strptime(parameters_dict["start_date"], datetime_format).date()}, ignore_index=True)
boundaries_df = boundaries_df.append({"EventTime": pd.to_datetime(parameters_dict["end_date"]).replace(tzinfo=pytz.timezone(utc)), "TagName": tag, "Value": 0, "EventDate": datetime.strptime(parameters_dict["end_date"], datetime_format).date()}, ignore_index=True)
start_date_new_row = pd.DataFrame([[pd.to_datetime(parameters_dict["start_date"]).replace(tzinfo=pytz.timezone(utc)), tag]], columns=["EventTime", "TagName"])
end_date_new_row = pd.DataFrame([[pd.to_datetime(parameters_dict["end_date"]).replace(tzinfo=pytz.timezone(utc)), tag]], columns=["EventTime", "TagName"])
boundaries_df = pd.concat([boundaries_df, start_date_new_row, end_date_new_row], ignore_index=True)
boundaries_df.set_index(pd.DatetimeIndex(boundaries_df["EventTime"]), inplace=True)
boundaries_df.drop(columns="EventTime", inplace=True)
boundaries_df = boundaries_df.groupby(["TagName"]).resample("{}T".format(str(parameters_dict["window_size_mins"]))).mean().drop(columns="Value")
boundaries_df = boundaries_df.groupby(["TagName"]).resample("{}T".format(str(parameters_dict["window_size_mins"]))).ffill().drop(columns='TagName')

#preprocess - add boundaries and time interpolate missing boundary values
preprocess_df = pandas_df.copy()
preprocess_df["EventTime"] = preprocess_df["EventTime"].round("S")
preprocess_df.set_index(["EventTime", "TagName", "EventDate"], inplace=True)
preprocess_df = preprocess_df.join(boundaries_df, how="outer", rsuffix="right")
if parameters_dict["step"] == "pi" or parameters_dict["step"] == "Pi":
if isinstance(parameters_dict["step"], str) and parameters_dict["step"].lower() == "metadata":
metadata_df = metadata_get(connection, parameters_dict)
metadata_df.set_index("TagName", inplace=True)
metadata_df = metadata_df.loc[:, "Step"]
Expand All @@ -98,6 +100,8 @@ def get(connection: object, parameters_dict: dict) -> pd.DataFrame:
preprocess_df["Step"] = True
elif parameters_dict["step"] == False:
preprocess_df["Step"] = False
else:
raise Exception('Unexpected step value', parameters_dict["step"])

def process_time_weighted_averages_step(pandas_df):
if pandas_df["Step"].any() == False:
Expand All @@ -119,9 +123,10 @@ def process_time_weighted_averages_step(pandas_df):
time_weighted_averages_datetime = time_weighted_averages.index.to_pydatetime()
weighted_averages_timezones = np.array([z.replace(tzinfo=pytz.timezone(utc)) for z in time_weighted_averages_datetime])
time_weighted_averages = time_weighted_averages[(original_start_date.replace(tzinfo=pytz.timezone(utc)) < weighted_averages_timezones) & (weighted_averages_timezones <= original_end_date.replace(tzinfo=pytz.timezone(utc)) + timedelta(seconds = 1))]

pd.set_option('display.max_rows', None)
print(time_weighted_averages)
return time_weighted_averages

except Exception as e:
logging.exception('error with time weighted average function')
logging.exception('error with time weighted average function', str(e))
raise e
16 changes: 16 additions & 0 deletions tests/api/v1/api_test_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,22 @@
INTERPOLATE_POST_BODY_MOCKED_PARAMETER_DICT = {}
INTERPOLATE_POST_BODY_MOCKED_PARAMETER_DICT["tag_name"] = ["MOCKED-TAGNAME1", "MOCKED-TAGNAME2"]

TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT = RAW_MOCKED_PARAMETER_DICT.copy()
TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_ERROR_DICT = RAW_MOCKED_PARAMETER_ERROR_DICT.copy()

TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT["window_size_mins"] = 10
TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT["window_length"] = 10
TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT["step"] = "metadata"
TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_ERROR_DICT["window_size_mins"] = 10
TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_ERROR_DICT["window_length"] = 10
TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_ERROR_DICT["step"] = "metadata"

TIME_WEIGHTED_AVERAGE_POST_MOCKED_PARAMETER_DICT = TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT.copy()
TIME_WEIGHTED_AVERAGE_POST_MOCKED_PARAMETER_DICT.pop("tag_name")

TIME_WEIGHTED_AVERAGE_POST_BODY_MOCKED_PARAMETER_DICT = {}
TIME_WEIGHTED_AVERAGE_POST_BODY_MOCKED_PARAMETER_DICT["tag_name"] = ["MOCKED-TAGNAME1", "MOCKED-TAGNAME2"]

TEST_HEADERS = {"Authorization": "Bearer Test Token"}
TEST_HEADERS_POST = {"Authorization": "Bearer Test Token", "Content-Type": "application/json"}

Expand Down
101 changes: 101 additions & 0 deletions tests/api/v1/test_api_time_weighted_average.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
# Copyright 2022 RTDIP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from pytest_mock import MockerFixture
import pandas as pd
from datetime import datetime
from tests.api.v1.api_test_objects import TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT, TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_ERROR_DICT, TIME_WEIGHTED_AVERAGE_POST_MOCKED_PARAMETER_DICT, TIME_WEIGHTED_AVERAGE_POST_BODY_MOCKED_PARAMETER_DICT, mocker_setup, TEST_HEADERS
from fastapi.testclient import TestClient
from src.api.v1 import app

MOCK_METHOD = "src.sdk.python.rtdip_sdk.functions.time_weighted_average.get"
MOCK_API_NAME = "/api/v1/events/timeweightedaverage"

def test_api_time_weighted_average_get_success(mocker: MockerFixture):
client = TestClient(app)

test_data = pd.DataFrame({"EventTime": [datetime.utcnow()], "TagName": ["TestTag"], "Value": [1.01]})
test_data = test_data.set_index("EventTime")
mocker = mocker_setup(mocker, MOCK_METHOD, test_data)

response = client.get(MOCK_API_NAME, headers=TEST_HEADERS, params=TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT)
actual = response.text
test_data = test_data.reset_index()
expected = test_data.to_json(orient="table", index=False).replace("Z", "000+00:00")

assert response.status_code == 200
assert actual == expected

def test_api_time_weighted_average_get_validation_error(mocker: MockerFixture):
client = TestClient(app)

test_data = pd.DataFrame({"EventTime": [datetime.utcnow()], "TagName": ["TestTag"], "Value": [1.01]})
mocker = mocker_setup(mocker, MOCK_METHOD, test_data)

response = client.get(MOCK_API_NAME, headers=TEST_HEADERS, params=TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_ERROR_DICT)
actual = response.text

assert response.status_code == 422
assert actual == '{"detail":[{"loc":["query","start_date"],"msg":"field required","type":"value_error.missing"}]}'

def test_api_time_weighted_average_get_error(mocker: MockerFixture):
client = TestClient(app)

test_data = pd.DataFrame({"EventTime": [datetime.utcnow()], "TagName": ["TestTag"], "Value": [1.01]})
mocker = mocker_setup(mocker, MOCK_METHOD, test_data, Exception("Error Connecting to Database"))

response = client.get(MOCK_API_NAME, headers=TEST_HEADERS, params=TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT)
actual = response.text

assert response.status_code == 400
assert actual == '{"detail":"Error Connecting to Database"}'

def test_api_time_weighted_average_post_success(mocker: MockerFixture):
client = TestClient(app)

test_data = pd.DataFrame({"EventTime": [datetime.utcnow()], "TagName": ["TestTag"], "Value": [1.01]})
test_data = test_data.set_index("EventTime")
mocker = mocker_setup(mocker, MOCK_METHOD, test_data)

response = client.post(MOCK_API_NAME, headers=TEST_HEADERS, params=TIME_WEIGHTED_AVERAGE_POST_MOCKED_PARAMETER_DICT, json=TIME_WEIGHTED_AVERAGE_POST_BODY_MOCKED_PARAMETER_DICT)
actual = response.text
test_data = test_data.reset_index()
expected = test_data.to_json(orient="table", index=False).replace("Z", "000+00:00")

assert response.status_code == 200
assert actual == expected

def test_api_time_weighted_average_post_validation_error(mocker: MockerFixture):
client = TestClient(app)

test_data = pd.DataFrame({"EventTime": [datetime.utcnow()], "TagName": ["TestTag"], "Value": [1.01]})
mocker = mocker_setup(mocker, MOCK_METHOD, test_data)

response = client.post(MOCK_API_NAME, headers=TEST_HEADERS, params=TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_ERROR_DICT, json=TIME_WEIGHTED_AVERAGE_POST_BODY_MOCKED_PARAMETER_DICT)
actual = response.text

assert response.status_code == 422
assert actual == '{"detail":[{"loc":["query","start_date"],"msg":"field required","type":"value_error.missing"}]}'

def test_api_time_weighted_average_post_error(mocker: MockerFixture):
client = TestClient(app)

test_data = pd.DataFrame({"EventTime": [datetime.utcnow()], "TagName": ["TestTag"], "Value": [1.01]})
mocker = mocker_setup(mocker, MOCK_METHOD, test_data, Exception("Error Connecting to Database"))

response = client.post(MOCK_API_NAME, headers=TEST_HEADERS, params=TIME_WEIGHTED_AVERAGE_MOCKED_PARAMETER_DICT, json=TIME_WEIGHTED_AVERAGE_POST_BODY_MOCKED_PARAMETER_DICT)
actual = response.text

assert response.status_code == 400
assert actual == '{"detail":"Error Connecting to Database"}'