Skip to content

Commit b6b4d78

Browse files
jindalshivam09tf-model-analysis-team
authored and
tf-model-analysis-team
committed
Support for output_file_format in tfma.load_eval_result
PiperOrigin-RevId: 317736209
1 parent 355a83c commit b6b4d78

File tree

2 files changed

+10
-3
lines changed

2 files changed

+10
-3
lines changed

RELEASE.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@
168168
* Added evaluation comparison feature to the Fairness Indicators UI in Colab.
169169
* Added better defaults handling for eval config so that a single model spec
170170
can be used for both candidate and baseline.
171+
* Added support to provide output file format in load_eval_result API.
171172

172173
## Bug fixes and other changes
173174

tensorflow_model_analysis/api/model_eval_lib.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -210,12 +210,14 @@ def make_eval_results(results: List[view_types.EvalResult],
210210

211211
def load_eval_results(
212212
output_paths: Union[Text, List[Text]],
213+
output_file_format: Optional[Text] = '',
213214
mode: Text = constants.MODEL_CENTRIC_MODE,
214215
model_name: Optional[Text] = None) -> view_types.EvalResults:
215216
"""Loads results for multiple models or multiple data sets.
216217
217218
Args:
218219
output_paths: A single path or list of output paths of completed tfma runs.
220+
output_file_format: Optional file extension to filter files by.
219221
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
220222
tfma.MODEL_CENTRIC_MODE are supported.
221223
model_name: Filters to only return results for given model. If unset all
@@ -235,17 +237,21 @@ def load_eval_results(
235237
else:
236238
model_names = [model_name]
237239
for model_name in model_names:
238-
results.append(load_eval_result(output_path, model_name=model_name))
240+
results.append(
241+
load_eval_result(
242+
output_path, output_file_format, model_name=model_name))
239243
return make_eval_results(results, mode)
240244

241245

242246
def load_eval_result(
243247
output_path: Text,
248+
output_file_format: Optional[Text] = '',
244249
model_name: Optional[Text] = None) -> view_types.EvalResult:
245250
"""Loads EvalResult object for use with the visualization functions.
246251
247252
Args:
248253
output_path: Output directory containing config, metrics, plots, etc.
254+
output_file_format: Optional file extension to filter files by.
249255
model_name: Optional model name. Required if multi-model evaluation was run.
250256
251257
Returns:
@@ -259,12 +265,12 @@ def load_eval_result(
259265
eval_config_writer.load_eval_run(output_path))
260266
metrics_list = []
261267
for p in metrics_plots_and_validations_writer.load_and_deserialize_metrics(
262-
output_path):
268+
output_path, output_file_format):
263269
metrics_list.append(
264270
util.convert_metrics_proto_to_dict(p, model_name=model_name))
265271
plots_list = []
266272
for p in metrics_plots_and_validations_writer.load_and_deserialize_plots(
267-
output_path):
273+
output_path, output_file_format):
268274
plots_list.append(
269275
util.convert_plots_proto_to_dict(p, model_name=model_name))
270276
if not model_locations:

0 commit comments

Comments
 (0)