diff --git a/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.py b/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.py index ac7a48d..3f4df2d 100644 --- a/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.py +++ b/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.py @@ -26,8 +26,7 @@ def automl_eval_metrics( thresholds: str = '{"mean_absolute_error": 460}', confidence_threshold: float = 0.5 # for classification -# ) -> NamedTuple('Outputs', [('deploy', str)]): # this gives the same result -) -> NamedTuple('Outputs', [('deploy', 'String')]): +) -> NamedTuple('Outputs', [('deploy', str)]): import subprocess import sys subprocess.run([sys.executable, '-m', 'pip', 'install', 'googleapis-common-protos==1.6.0', @@ -141,7 +140,7 @@ def classif_threshold_check(eval_info): with open(mlpipeline_metrics_path, 'w') as mlpipeline_metrics_file: mlpipeline_metrics_file.write(json.dumps(metrics)) logging.info('deploy flag: {}'.format(res)) - return res + return (res,) if classif and thresholds_dict: res, eresults = classif_threshold_check(eval_info) @@ -159,14 +158,14 @@ def classif_threshold_check(eval_info): with open(mlpipeline_ui_metadata_path, 'w') as mlpipeline_ui_metadata_file: mlpipeline_ui_metadata_file.write(json.dumps(metadata)) logging.info('deploy flag: {}'.format(res)) - return res - return 'deploy' + return (res,) + return ('deploy',) except Exception as e: logging.warning(e) # If can't reconstruct the eval, or don't have thresholds defined, # return True as a signal to deploy. # TODO: is this the right default? - return 'deploy' + return ('deploy',) if __name__ == '__main__': diff --git a/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.yaml b/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.yaml index 757d5ee..80dbf48 100644 --- a/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.yaml +++ b/ml/automl/tables/kfp_e2e/create_model_for_tables/tables_eval_metrics_component.yaml @@ -38,7 +38,6 @@ implementation: thresholds = '{"mean_absolute_error": 460}', confidence_threshold = 0.5 # for classification - # ) -> NamedTuple('Outputs', [('deploy', str)]): ) : import subprocess import sys @@ -153,7 +152,7 @@ implementation: with open(mlpipeline_metrics_path, 'w') as mlpipeline_metrics_file: mlpipeline_metrics_file.write(json.dumps(metrics)) logging.info('deploy flag: {}'.format(res)) - return res + return (res,) if classif and thresholds_dict: res, eresults = classif_threshold_check(eval_info) @@ -171,14 +170,14 @@ implementation: with open(mlpipeline_ui_metadata_path, 'w') as mlpipeline_ui_metadata_file: mlpipeline_ui_metadata_file.write(json.dumps(metadata)) logging.info('deploy flag: {}'.format(res)) - return res - return 'deploy' + return (res,) + return ('deploy',) except Exception as e: logging.warning(e) # If can't reconstruct the eval, or don't have thresholds defined, # return True as a signal to deploy. # TODO: is this the right default? - return 'deploy' + return ('deploy',) def _serialize_str(str_value: str) -> str: if not isinstance(str_value, str): diff --git a/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py b/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py index b5938bd..5ac91d9 100644 --- a/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py +++ b/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py @@ -130,7 +130,7 @@ def automl_tables( #pylint: disable=unused-argument eval_data=eval_model.outputs['eval_data'], ) - with dsl.Condition(eval_metrics.outputs['deploy'] == 'd'): + with dsl.Condition(eval_metrics.outputs['deploy'] == 'deploy'): deploy_model = deploy_model_op( gcp_project_id=gcp_project_id, gcp_region=gcp_region, diff --git a/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py.tar.gz b/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py.tar.gz index 7a65e79..a125f40 100644 Binary files a/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py.tar.gz and b/ml/automl/tables/kfp_e2e/tables_pipeline_caip.py.tar.gz differ diff --git a/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py b/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py index b93f028..3185091 100644 --- a/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py +++ b/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py @@ -130,7 +130,7 @@ def automl_tables( #pylint: disable=unused-argument eval_data=eval_model.outputs['eval_data'], ).apply(gcp.use_gcp_secret('user-gcp-sa')) - with dsl.Condition(eval_metrics.outputs['deploy'] == 'd'): + with dsl.Condition(eval_metrics.outputs['deploy'] == 'deploy'): deploy_model = deploy_model_op( gcp_project_id=gcp_project_id, gcp_region=gcp_region, diff --git a/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py.tar.gz b/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py.tar.gz index 2676987..cde8e1a 100644 Binary files a/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py.tar.gz and b/ml/automl/tables/kfp_e2e/tables_pipeline_kf.py.tar.gz differ