diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 51c0bebe..db85f130 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,12 +49,13 @@ jobs: - name: Filter projects id: set-matrix run: | - echo "::set-output name=projects::$(python ./.github/workflows/filter_projects.py $HOME/files.json)" + echo "projects=$(python ./.github/workflows/filter_projects.py $HOME/files.json)" >> $GITHUB_OUTPUT ci: runs-on: ubuntu-latest needs: filter_projects timeout-minutes: 60 + if: needs.filter_examples.outputs.projects != '[]' strategy: # Test for each project in parallel using ci_max and ci_min to ensure # tested in range of tfx/tensorflow supported versions diff --git a/.github/workflows/ci_examples.yml b/.github/workflows/ci_examples.yml index 9f5b540d..6e149bbb 100644 --- a/.github/workflows/ci_examples.yml +++ b/.github/workflows/ci_examples.yml @@ -49,11 +49,12 @@ jobs: - name: Filter example projects id: set-matrix run: | - echo "::set-output name=projects::$(python ./.github/workflows/filter_examples.py $HOME/files.json)" + echo "projects=$(python ./.github/workflows/filter_examples.py $HOME/files.json)" >> $GITHUB_OUTPUT ci-examples: runs-on: ubuntu-latest needs: filter_examples timeout-minutes: 60 + if: needs.filter_examples.outputs.projects != '[]' strategy: # Test for each project in parallel using ci_max and ci_min to ensure # tested in range of tfx/tensorflow supported versions diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3bdb975a..a711fc36 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,7 +17,7 @@ on: jobs: build-and-publish: name: Build TFX Addons PyPI package and release to PyPI and TestPyPI - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python 3.7 diff --git a/.gitignore b/.gitignore index 6b5d59dc..5b8e7688 100644 --- a/.gitignore +++ b/.gitignore @@ -39,4 +39,4 @@ env/* #Editor .idea/* -.vscode/* \ No newline at end of file +.vscode/* diff --git a/CODEOWNERS b/CODEOWNERS index 04d9a19d..cfd83b21 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -55,5 +55,18 @@ /tfx_addons/message_exit_handler @hanneshapke /tfx_addons/utils @hanneshapke +# Predictions to Bigquery Component +/tfx_addons/predictions_to_bigquery @hanneshapke @cfezequiel + # PandasTransform Component /tfx_addons/pandas_transform @rcrowe-google + +# PandasTransform Component +/tfx_addons/model_card_generator @codesue @hanneshapke +/examples/model_card_generator @codesue @hanneshapke + +# Apache Airflow Orchestrator +/tfx_addons/apache_airflow @lego0901 + +# CopyExampleGen Component +/tfx_addons/copy_example_gen @alxndrnh diff --git a/README.md b/README.md index b4315102..7636d02b 100644 --- a/README.md +++ b/README.md @@ -45,15 +45,18 @@ tfxa.feast_examplegen.FeastExampleGen(...) ## TFX Addons projects -* [tfxa.mlmd_client](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/mlmd_client) -* [tfxa.schema_curation](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/schema_curation) -* [tfxa.feature_selection](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/feature_selection) * [tfxa.feast_examplegen](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/feast_examplegen) -* [tfxa.xgboost_evaluator](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/xgboost_evaluator) -* [tfxa.sampling](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/sampling) +* [tfxa.feature_selection](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/feature_selection) +* [tfxa.firebase_publisher](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/firebase_publisher) +* [tfxa.huggingface_pusher](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/huggingface_pusher) * [tfxa.message_exit_handler](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/message_exit_handler) +* [tfxa.mlmd_client](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/mlmd_client) +* [tfxa.model_card_generator](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/model_card_generator) * [tfxa.pandas_transform](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/pandas_transform) -* [tfxa.firebase_publisher](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/firebase_publisher) +* [tfxa.sampling](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/sampling) +* [tfxa.schema_curation](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/schema_curation) +* [tfxa.xgboost_evaluator](https://github.com/tensorflow/tfx-addons/tree/main/tfx_addons/xgboost_evaluator) + Check out [proposals](https://github.com/tensorflow/tfx-addons/tree/main/proposals) for a list of existing or upcoming projects proposals for TFX Addons. diff --git a/RELEASE.md b/RELEASE.md index 2e23c2c1..c9f8f553 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -6,7 +6,7 @@ TFX Addons follows [Semantic Versioning 2.0](https://semver.org/) strategy. ## Minor automatic release from main -1. Trigger [Create Minor Release](https://github.com/tensorflow/tfx-addons/actions/workflows/minor_release.yml] workflow and ensure it runs to completion. +1. Trigger [Create Minor Release](https://github.com/tensorflow/tfx-addons/actions/workflows/minor_release.yml) workflow and ensure it runs to completion. 2. Find created [draft release](https://github.com/tensorflow/tfx-addons/releases). * Add updates for new features, enhancements, bug fixes * Add contributors using `git shortlog ..HEAD -s` diff --git a/examples/model_card_generator/.gitignore b/examples/model_card_generator/.gitignore new file mode 100644 index 00000000..a2ba38ba --- /dev/null +++ b/examples/model_card_generator/.gitignore @@ -0,0 +1,4 @@ +# unnecessary project files +census_income_constants.py +census_income_trainer.py +census_income_transform.py diff --git a/examples/model_card_generator/MLMD_Model_Card_Toolkit_Demo.ipynb b/examples/model_card_generator/MLMD_Model_Card_Toolkit_Demo.ipynb new file mode 100644 index 00000000..ce9dd4e4 --- /dev/null +++ b/examples/model_card_generator/MLMD_Model_Card_Toolkit_Demo.ipynb @@ -0,0 +1,15568 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "Tce3stUlHN0L" + }, + "source": [ + "##### Copyright 2022 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "form", + "id": "tuOe1ymfHZPu" + }, + "outputs": [], + "source": [ + "# Copyright 2023 The TensorFlow Authors. All Rights Reserved.\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "23R0Z9RojXYW" + }, + "source": [ + "# MLMD Model Card Toolkit Demo" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MfBg1C5NB3X0" + }, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " View on TensorFlow.org\n", + " \n", + " Run in Google Colab\n", + " \n", + " View on GitHub\n", + " \n", + " Download notebook\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sfSQ-kX-MLEr" + }, + "source": [ + "## Background\n", + "\n", + "This notebook demonstrates how to generate a model card using the Model Card Toolkit with MLMD and TFX pipeline in a Jupyter/Colab environment. You can learn more about model cards at https://modelcards.withgoogle.com/about. \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2GivNBNYjb3b" + }, + "source": [ + "## Setup\n", + "We first need to a) install and import the necessary packages, and b) download the data." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Fmgi8ZvQkScg" + }, + "source": [ + "### Upgrade to Pip 21 (or later) and Install Model Card Toolkit" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "NYtxxdriz5VO" + }, + "outputs": [], + "source": [ + "# %pip install --upgrade pip\n", + "# %pip install tfx-addons[model-card-toolkit]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EwT0nov5QO1M" + }, + "source": [ + "####*Did you restart the runtime?*\n", + "\n", + "If you are using Google Colab, the runtime must be restarted after installing new packages." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "N-ePgV0Lj68Q" + }, + "source": [ + "### Import packages\n", + "\n", + "We import necessary packages, including standard TFX component classes and check the library versions.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "YIqpWK9efviJ" + }, + "outputs": [], + "source": [ + "import os\n", + "import tempfile\n", + "import urllib\n", + "\n", + "import absl\n", + "import json\n", + "import tensorflow as tf\n", + "import tensorflow_model_analysis as tfma\n", + "import tfx.v1 as tfx\n", + "import tfx_addons as tfxa\n", + "import ml_metadata as mlmd\n", + "import model_card_toolkit as mct\n", + "\n", + "from tfx.components import CsvExampleGen\n", + "from tfx.components import Evaluator\n", + "from tfx.components import SchemaGen\n", + "from tfx.components import StatisticsGen\n", + "from tfx.components import Trainer\n", + "from tfx.components import Transform\n", + "from tfx.components.trainer.executor import GenericExecutor\n", + "from tfx.dsl.components.base import executor_spec\n", + "from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext\n", + "from tfx.proto import example_gen_pb2\n", + "from tfx.proto import trainer_pb2\n", + "\n", + "from tfx_addons.model_card_generator.component import ModelCardGenerator" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "eZ4K18_DN2D8" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TensorFlow version: 2.11.1\n", + "TFX version: 1.12.0\n", + "TFX Addons version: 0.4.0-dev\n", + "MLMD version: 1.12.0\n", + "Model Card Toolkit version: 2.0.0\n" + ] + } + ], + "source": [ + "print('TensorFlow version: {}'.format(tf.__version__))\n", + "print('TFX version: {}'.format(tfx.__version__))\n", + "print('TFX Addons version: {}'.format(tfxa.__version__))\n", + "print('MLMD version: {}'.format(mlmd.__version__))\n", + "print('Model Card Toolkit version: {}'.format(mct.__version__))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ufJKQ6OvkJlY" + }, + "source": [ + "### Set up pipeline paths" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "ad5JLpKbf6sN" + }, + "outputs": [], + "source": [ + "# This is the root directory for your TFX pip package installation.\n", + "# _tfx_root = tfx.__path__\n", + "\n", + "# Set up logging.\n", + "absl.logging.set_verbosity(absl.logging.INFO)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "n2cMMAbSkGfX" + }, + "source": [ + "### Download example data\n", + "We download the example dataset for use in our TFX pipeline.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "BywX6OUEhAqn" + }, + "outputs": [], + "source": [ + "DATA_PATH = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/' \\\n", + " 'adult.data'\n", + "_data_root = tempfile.mkdtemp(prefix='tfx-data')\n", + "_data_filepath = os.path.join(_data_root, \"data.csv\")\n", + "urllib.request.urlretrieve(DATA_PATH, _data_filepath)\n", + "\n", + "columns = [\n", + " \"Age\", \"Workclass\", \"fnlwgt\", \"Education\", \"Education-Num\", \"Marital-Status\",\n", + " \"Occupation\", \"Relationship\", \"Race\", \"Sex\", \"Capital-Gain\", \"Capital-Loss\",\n", + " \"Hours-per-week\", \"Country\", \"Over-50K\"]\n", + "\n", + "with open(_data_filepath, 'r') as f:\n", + " content = f.read()\n", + " content = content.replace(\", <=50K\", ', 0').replace(\", >50K\", ', 1')\n", + "\n", + "with open(_data_filepath, 'w') as f:\n", + " f.write(','.join(columns) + '\\n' + content)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "blZC1sIQOWfH" + }, + "source": [ + "Take a quick look at the CSV file." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "c5YPeLPFOXaD" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Age,Workclass,fnlwgt,Education,Education-Num,Marital-Status,Occupation,Relationship,Race,Sex,Capital-Gain,Capital-Loss,Hours-per-week,Country,Over-50K\n", + "39, State-gov, 77516, Bachelors, 13, Never-married, Adm-clerical, Not-in-family, White, Male, 2174, 0, 40, United-States, 0\n", + "50, Self-emp-not-inc, 83311, Bachelors, 13, Married-civ-spouse, Exec-managerial, Husband, White, Male, 0, 0, 13, United-States, 0\n", + "38, Private, 215646, HS-grad, 9, Divorced, Handlers-cleaners, Not-in-family, White, Male, 0, 0, 40, United-States, 0\n", + "53, Private, 234721, 11th, 7, Married-civ-spouse, Handlers-cleaners, Husband, Black, Male, 0, 0, 40, United-States, 0\n", + "28, Private, 338409, Bachelors, 13, Married-civ-spouse, Prof-specialty, Wife, Black, Female, 0, 0, 40, Cuba, 0\n", + "37, Private, 284582, Masters, 14, Married-civ-spouse, Exec-managerial, Wife, White, Female, 0, 0, 40, United-States, 0\n", + "49, Private, 160187, 9th, 5, Married-spouse-absent, Other-service, Not-in-family, Black, Female, 0, 0, 16, Jamaica, 0\n", + "52, Self-emp-not-inc, 209642, HS-grad, 9, Married-civ-spouse, Exec-managerial, Husband, White, Male, 0, 0, 45, United-States, 1\n", + "31, Private, 45781, Masters, 14, Never-married, Prof-specialty, Not-in-family, White, Female, 14084, 0, 50, United-States, 1\n" + ] + } + ], + "source": [ + "!head {_data_filepath}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8ONIE_hdkPS4" + }, + "source": [ + "### Create the InteractiveContext\n", + "Last, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "0Rh6K5sUf9dd" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:absl:InteractiveContext pipeline_root argument not provided: using temporary directory /tmp/tfx-Census Income Classification Pipeline-2_2mej8l as root for pipeline outputs.\n", + "WARNING:absl:InteractiveContext metadata_connection_config not provided: using SQLite ML Metadata database at /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/metadata.sqlite.\n" + ] + } + ], + "source": [ + "# Here, we create an InteractiveContext using default parameters. This will\n", + "# use a temporary directory with an ephemeral ML Metadata database instance.\n", + "# To use your own pipeline root or database, the optional properties\n", + "# `pipeline_root` and `metadata_connection_config` may be passed to\n", + "# InteractiveContext. Calls to InteractiveContext are no-ops outside of the\n", + "# notebook.\n", + "context = InteractiveContext(pipeline_name=\"Census Income Classification Pipeline\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HdQWxfsVkzdJ" + }, + "source": [ + "## Run TFX components interactively\n", + "In the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts. In this notebook, we won’t provide detailed explanations of each TFX component, but you can see what each does at [TFX Colab workshop](https://github.com/tensorflow/workshops/blob/master/tfx_labs/Lab_1_Pipeline_in_Colab.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L9fwt9gQk3BR" + }, + "source": [ + "### ExampleGen\n", + "\n", + "Create the `ExampleGen` component to split data into training and evaluation sets, convert the data into `tf.Example` format, and copy data into the `_tfx_root` directory for other components to access. " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "PyXjuMt8f-9u" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:absl:Running driver for CsvExampleGen\n", + "INFO:absl:MetadataStore with DB connection initialized\n", + "INFO:absl:select span and version = (0, None)\n", + "INFO:absl:latest span and version = (0, None)\n", + "INFO:absl:Running executor for CsvExampleGen\n", + "INFO:absl:Generating examples.\n", + "WARNING:apache_beam.runners.interactive.interactive_environment:Dependencies required for Interactive Beam PCollection visualization are not available, please use: `pip install apache-beam[interactive]` to install necessary dependencies to enable all data visualization features.\n" + ] + }, + { + "data": { + "application/javascript": "\n if (typeof window.interactive_beam_jquery == 'undefined') {\n var jqueryScript = document.createElement('script');\n jqueryScript.src = 'https://code.jquery.com/jquery-3.4.1.slim.min.js';\n jqueryScript.type = 'text/javascript';\n jqueryScript.onload = function() {\n var datatableScript = document.createElement('script');\n datatableScript.src = 'https://cdn.datatables.net/1.10.20/js/jquery.dataTables.min.js';\n datatableScript.type = 'text/javascript';\n datatableScript.onload = function() {\n window.interactive_beam_jquery = jQuery.noConflict(true);\n window.interactive_beam_jquery(document).ready(function($){\n \n });\n }\n document.head.appendChild(datatableScript);\n };\n document.head.appendChild(jqueryScript);\n } else {\n window.interactive_beam_jquery(document).ready(function($){\n \n });\n }" + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:absl:Processing input csv data /tmp/tfx-datatpwerxc8/* to TFExample.\n", + "WARNING:apache_beam.io.tfrecordio:Couldn't find python-snappy so the implementation of _TFRecordUtil._masked_crc32c is not as fast as it could be.\n", + "INFO:absl:Examples generated.\n", + "INFO:absl:Running publisher for CsvExampleGen\n", + "INFO:absl:MetadataStore with DB connection initialized\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "
ExecutionResult at 0x7fdfb8383ac0
.execution_id1
.component\n", + "\n", + "
CsvExampleGen at 0x7fdfb8399a90
.inputs{}
.outputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
.exec_properties
['input_base']/tmp/tfx-datatpwerxc8
['input_config']{\n", + " "splits": [\n", + " {\n", + " "name": "single_split",\n", + " "pattern": "*"\n", + " }\n", + " ]\n", + "}
['output_config']{\n", + " "split_config": {\n", + " "splits": [\n", + " {\n", + " "hash_buckets": 9,\n", + " "name": "train"\n", + " },\n", + " {\n", + " "hash_buckets": 1,\n", + " "name": "eval"\n", + " }\n", + " ]\n", + " }\n", + "}
['output_data_format']6
['output_file_format']5
['custom_config']None
['range_config']None
['span']0
['version']None
['input_fingerprint']split:single_split,num_files:1,total_bytes:3852053,xor_checksum:1680577707,sum_checksum:1680577707
.component.inputs{}
.component.outputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
" + ], + "text/plain": [ + "ExecutionResult(\n", + " component_id: CsvExampleGen\n", + " execution_id: 1\n", + " outputs:\n", + " examples: OutputChannel(artifact_type=Examples, producer_component_id=CsvExampleGen, output_key=examples, additional_properties={}, additional_custom_properties={}))" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "output = tfx.proto.Output(\n", + " split_config=example_gen_pb2.SplitConfig(splits=[\n", + " tfx.proto.SplitConfig.Split(name='train', hash_buckets=9),\n", + " tfx.proto.SplitConfig.Split(name='eval', hash_buckets=1)\n", + " ]))\n", + "\n", + "example_gen = CsvExampleGen(input_base=_data_root, output_config=output)\n", + "context.run(example_gen)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "880KkTAkPeUg" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\"train\", \"eval\"] /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1\n" + ] + } + ], + "source": [ + "artifact = example_gen.outputs['examples'].get()[0]\n", + "print(artifact.split_names, artifact.uri)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J6vcbW_wPqvl" + }, + "source": [ + "Let’s take a look at the first three training examples:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "H4XIXjiCPwzQ" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "features {\n", + " feature {\n", + " key: \"Age\"\n", + " value {\n", + " int64_list {\n", + " value: 39\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Capital-Gain\"\n", + " value {\n", + " int64_list {\n", + " value: 2174\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Capital-Loss\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Country\"\n", + " value {\n", + " bytes_list {\n", + " value: \" United-States\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Education\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Bachelors\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Education-Num\"\n", + " value {\n", + " int64_list {\n", + " value: 13\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Hours-per-week\"\n", + " value {\n", + " int64_list {\n", + " value: 40\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Marital-Status\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Never-married\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Occupation\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Adm-clerical\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Over-50K\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Race\"\n", + " value {\n", + " bytes_list {\n", + " value: \" White\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Relationship\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Not-in-family\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Sex\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Male\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Workclass\"\n", + " value {\n", + " bytes_list {\n", + " value: \" State-gov\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"fnlwgt\"\n", + " value {\n", + " int64_list {\n", + " value: 77516\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "features {\n", + " feature {\n", + " key: \"Age\"\n", + " value {\n", + " int64_list {\n", + " value: 50\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Capital-Gain\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Capital-Loss\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Country\"\n", + " value {\n", + " bytes_list {\n", + " value: \" United-States\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Education\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Bachelors\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Education-Num\"\n", + " value {\n", + " int64_list {\n", + " value: 13\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Hours-per-week\"\n", + " value {\n", + " int64_list {\n", + " value: 13\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Marital-Status\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Married-civ-spouse\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Occupation\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Exec-managerial\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Over-50K\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Race\"\n", + " value {\n", + " bytes_list {\n", + " value: \" White\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Relationship\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Husband\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Sex\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Male\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Workclass\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Self-emp-not-inc\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"fnlwgt\"\n", + " value {\n", + " int64_list {\n", + " value: 83311\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "features {\n", + " feature {\n", + " key: \"Age\"\n", + " value {\n", + " int64_list {\n", + " value: 38\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Capital-Gain\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Capital-Loss\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Country\"\n", + " value {\n", + " bytes_list {\n", + " value: \" United-States\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Education\"\n", + " value {\n", + " bytes_list {\n", + " value: \" HS-grad\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Education-Num\"\n", + " value {\n", + " int64_list {\n", + " value: 9\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Hours-per-week\"\n", + " value {\n", + " int64_list {\n", + " value: 40\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Marital-Status\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Divorced\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Occupation\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Handlers-cleaners\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Over-50K\"\n", + " value {\n", + " int64_list {\n", + " value: 0\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Race\"\n", + " value {\n", + " bytes_list {\n", + " value: \" White\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Relationship\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Not-in-family\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Sex\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Male\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"Workclass\"\n", + " value {\n", + " bytes_list {\n", + " value: \" Private\"\n", + " }\n", + " }\n", + " }\n", + " feature {\n", + " key: \"fnlwgt\"\n", + " value {\n", + " int64_list {\n", + " value: 215646\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n" + ] + } + ], + "source": [ + "# Get the URI of the output artifact representing the training examples, which is a directory\n", + "train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train')\n", + "\n", + "# Get the list of files in this directory (all compressed TFRecord files)\n", + "tfrecord_filenames = [os.path.join(train_uri, name)\n", + " for name in os.listdir(train_uri)]\n", + "\n", + "# Create a `TFRecordDataset` to read these files\n", + "dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n", + "\n", + "# Iterate over the first 3 records and decode them.\n", + "for tfrecord in dataset.take(3):\n", + " serialized_example = tfrecord.numpy()\n", + " example = tf.train.Example()\n", + " example.ParseFromString(serialized_example)\n", + " print(example)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "csM6BFhtk5Aa" + }, + "source": [ + "### StatisticsGen\n", + "\n", + "`StatisticsGen` takes as input the dataset we just ingested using `ExampleGen` and allows you to perform some analysis of your dataset using TensorFlow Data Validation (TFDV)." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "id": "MAscCCYWgA-9" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:absl:Excluding no splits because exclude_splits is not set.\n", + "INFO:absl:Running driver for StatisticsGen\n", + "INFO:absl:MetadataStore with DB connection initialized\n", + "INFO:absl:Running executor for StatisticsGen\n", + "INFO:absl:Generating statistics for split train.\n", + "INFO:absl:Statistics for split train written to /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2/Split-train.\n", + "INFO:absl:Generating statistics for split eval.\n", + "INFO:absl:Statistics for split eval written to /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2/Split-eval.\n", + "INFO:absl:Running publisher for StatisticsGen\n", + "INFO:absl:MetadataStore with DB connection initialized\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "
ExecutionResult at 0x7fdfb8399eb0
.execution_id2
.component\n", + "\n", + "
StatisticsGen at 0x7fdfb827db50
.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
.outputs
['statistics']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdfb827d5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2) at 0x7fdfb83998b0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
.exec_properties
['stats_options_json']None
['exclude_splits'][]
.component.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
.component.outputs
['statistics']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdfb827d5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2) at 0x7fdfb83998b0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
" + ], + "text/plain": [ + "ExecutionResult(\n", + " component_id: StatisticsGen\n", + " execution_id: 2\n", + " outputs:\n", + " statistics: OutputChannel(artifact_type=ExampleStatistics, producer_component_id=StatisticsGen, output_key=statistics, additional_properties={}, additional_custom_properties={}))" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "statistics_gen = StatisticsGen(\n", + " examples=example_gen.outputs['examples'])\n", + "context.run(statistics_gen)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0BDfOjGy048O" + }, + "source": [ + "After `StatisticsGen` finishes running, we can visualize the outputted statistics. Try playing with the different plots!" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "tLjXy7K6Tp_G" + }, + "outputs": [ + { + "data": { + "text/html": [ + "Artifact at /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
'train' split:

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
'eval' split:

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "context.show(statistics_gen.outputs['statistics'])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HLKLTO9Nk60p" + }, + "source": [ + "### SchemaGen\n", + "\n", + "`SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "id": "ygQvZ6hsiQ_J" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:absl:Excluding no splits because exclude_splits is not set.\n", + "INFO:absl:Running driver for SchemaGen\n", + "INFO:absl:MetadataStore with DB connection initialized\n", + "INFO:absl:Running executor for SchemaGen\n", + "INFO:absl:Processing schema from statistics for split train.\n", + "INFO:absl:Processing schema from statistics for split eval.\n", + "INFO:absl:Schema written to /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3/schema.pbtxt.\n", + "INFO:absl:Running publisher for SchemaGen\n", + "INFO:absl:MetadataStore with DB connection initialized\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "
ExecutionResult at 0x7fdfa83f73d0
.execution_id3
.component\n", + "\n", + "
SchemaGen at 0x7fdf4c777c70
.inputs
['statistics']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdfb827d5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2) at 0x7fdfb83998b0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
.outputs
['schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdfb827da30
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3) at 0x7fdfb827dac0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3
.exec_properties
['infer_feature_shape']0
['exclude_splits'][]
.component.inputs
['statistics']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdfb827d5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2) at 0x7fdfb83998b0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
.component.outputs
['schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdfb827da30
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3) at 0x7fdfb827dac0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3
" + ], + "text/plain": [ + "ExecutionResult(\n", + " component_id: SchemaGen\n", + " execution_id: 3\n", + " outputs:\n", + " schema: OutputChannel(artifact_type=Schema, producer_component_id=SchemaGen, output_key=schema, additional_properties={}, additional_custom_properties={}))" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "schema_gen = SchemaGen(\n", + " statistics=statistics_gen.outputs['statistics'],\n", + " infer_feature_shape=False)\n", + "context.run(schema_gen)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "id": "Ec9vqDXpXeMb" + }, + "outputs": [ + { + "data": { + "text/html": [ + "Artifact at /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TypePresenceValencyDomain
Feature name
'Age'INTrequiredsingle-
'Capital-Gain'INTrequiredsingle-
'Capital-Loss'INTrequiredsingle-
'Country'STRINGrequiredsingle'Country'
'Education'STRINGrequiredsingle'Education'
'Education-Num'INTrequiredsingle-
'Hours-per-week'INTrequiredsingle-
'Marital-Status'STRINGrequiredsingle'Marital-Status'
'Occupation'STRINGrequiredsingle'Occupation'
'Over-50K'INTrequiredsingle-
'Race'STRINGrequiredsingle'Race'
'Relationship'STRINGrequiredsingle'Relationship'
'Sex'STRINGrequiredsingle'Sex'
'Workclass'STRINGrequiredsingle'Workclass'
'fnlwgt'INTrequiredsingle-
\n", + "
" + ], + "text/plain": [ + " Type Presence Valency Domain\n", + "Feature name \n", + "'Age' INT required single -\n", + "'Capital-Gain' INT required single -\n", + "'Capital-Loss' INT required single -\n", + "'Country' STRING required single 'Country'\n", + "'Education' STRING required single 'Education'\n", + "'Education-Num' INT required single -\n", + "'Hours-per-week' INT required single -\n", + "'Marital-Status' STRING required single 'Marital-Status'\n", + "'Occupation' STRING required single 'Occupation'\n", + "'Over-50K' INT required single -\n", + "'Race' STRING required single 'Race'\n", + "'Relationship' STRING required single 'Relationship'\n", + "'Sex' STRING required single 'Sex'\n", + "'Workclass' STRING required single 'Workclass'\n", + "'fnlwgt' INT required single -" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
Values
Domain
'Country'' ?', ' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Holand-Netherlands', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia'
'Education'' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'
'Marital-Status'' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'
'Occupation'' ?', ' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'
'Race'' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'
'Relationship'' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'
'Sex'' Female', ' Male'
'Workclass'' ?', ' Federal-gov', ' Local-gov', ' Never-worked', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'
\n", + "
" + ], + "text/plain": [ + " Values\n", + "Domain \n", + "'Country' ' ?', ' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Holand-Netherlands', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia'\n", + "'Education' ' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'\n", + "'Marital-Status' ' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'\n", + "'Occupation' ' ?', ' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'\n", + "'Race' ' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'\n", + "'Relationship' ' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'\n", + "'Sex' ' Female', ' Male'\n", + "'Workclass' ' ?', ' Federal-gov', ' Local-gov', ' Never-worked', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "context.show(schema_gen.outputs['schema'])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kZWWdbA-m7zp" + }, + "source": [ + "To learn more about schemas, see [the SchemaGen documentation](https://www.tensorflow.org/tfx/guide/schemagen)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JPViEz5RlA36" + }, + "source": [ + "### Transform\n", + "\n", + "`Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module that contains user-defined Transform code.\n", + "\n", + "Let's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, [see the tutorial](https://www.tensorflow.org/tfx/tutorials/transform/simple)).\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "id": "PuNSiUKb4YJf" + }, + "outputs": [], + "source": [ + "_census_income_constants_module_file = 'census_income_constants.py'" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "id": "HPjhXuIF4YJh", + "jupyter": { + "source_hidden": true + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting census_income_constants.py\n" + ] + } + ], + "source": [ + "%%writefile {_census_income_constants_module_file}\n", + "\n", + "# Categorical features are assumed to each have a maximum value in the dataset.\n", + "MAX_CATEGORICAL_FEATURE_VALUES = [20]\n", + "\n", + "CATEGORICAL_FEATURE_KEYS = [\"Education-Num\"]\n", + "\n", + "\n", + "DENSE_FLOAT_FEATURE_KEYS = [\"Capital-Gain\", \"Hours-per-week\", \"Capital-Loss\"]\n", + "\n", + "# Number of buckets used by tf.transform for encoding each feature.\n", + "FEATURE_BUCKET_COUNT = 10\n", + "\n", + "BUCKET_FEATURE_KEYS = [\"Age\"]\n", + "\n", + "# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform\n", + "VOCAB_SIZE = 200\n", + "\n", + "# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.\n", + "OOV_SIZE = 10\n", + "\n", + "VOCAB_FEATURE_KEYS = [\"Workclass\", \"Education\", \"Marital-Status\", \"Occupation\", \n", + " \"Relationship\", \"Race\", \"Sex\", \"Country\"]\n", + "\n", + "# Keys\n", + "LABEL_KEY = \"Over-50K\"\n", + "\n", + "def transformed_name(key):\n", + " return key + '_xf'" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "id": "4AJ9hBs94YJm" + }, + "outputs": [], + "source": [ + "_census_income_transform_module_file = 'census_income_transform.py'" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "id": "MYmxxx9A4YJn", + "jupyter": { + "source_hidden": true + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting census_income_transform.py\n" + ] + } + ], + "source": [ + "%%writefile {_census_income_transform_module_file}\n", + "\n", + "import tensorflow as tf\n", + "import tensorflow_transform as tft\n", + "\n", + "import census_income_constants\n", + "\n", + "_DENSE_FLOAT_FEATURE_KEYS = census_income_constants.DENSE_FLOAT_FEATURE_KEYS\n", + "_VOCAB_FEATURE_KEYS = census_income_constants.VOCAB_FEATURE_KEYS\n", + "_VOCAB_SIZE = census_income_constants.VOCAB_SIZE\n", + "_OOV_SIZE = census_income_constants.OOV_SIZE\n", + "_FEATURE_BUCKET_COUNT = census_income_constants.FEATURE_BUCKET_COUNT\n", + "_BUCKET_FEATURE_KEYS = census_income_constants.BUCKET_FEATURE_KEYS\n", + "_CATEGORICAL_FEATURE_KEYS = census_income_constants.CATEGORICAL_FEATURE_KEYS\n", + "_LABEL_KEY = census_income_constants.LABEL_KEY\n", + "_transformed_name = census_income_constants.transformed_name\n", + "\n", + "\n", + "def preprocessing_fn(inputs):\n", + " \"\"\"tf.transform's callback function for preprocessing inputs.\n", + " Args:\n", + " inputs: map from feature keys to raw not-yet-transformed features.\n", + " Returns:\n", + " Map from string feature key to transformed feature operations.\n", + " \"\"\"\n", + " outputs = {}\n", + " for key in _DENSE_FLOAT_FEATURE_KEYS:\n", + " # Preserve this feature as a dense float, setting nan's to the mean.\n", + " outputs[_transformed_name(key)] = tft.scale_to_z_score(\n", + " _fill_in_missing(inputs[key]))\n", + "\n", + " for key in _VOCAB_FEATURE_KEYS:\n", + " # Build a vocabulary for this feature.\n", + " outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(\n", + " _fill_in_missing(inputs[key]),\n", + " top_k=_VOCAB_SIZE,\n", + " num_oov_buckets=_OOV_SIZE)\n", + "\n", + " for key in _BUCKET_FEATURE_KEYS:\n", + " outputs[_transformed_name(key)] = tft.bucketize(\n", + " _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)\n", + "\n", + " for key in _CATEGORICAL_FEATURE_KEYS:\n", + " outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])\n", + "\n", + " label = _fill_in_missing(inputs[_LABEL_KEY])\n", + " outputs[_transformed_name(_LABEL_KEY)] = label\n", + " \n", + " return outputs\n", + "\n", + "\n", + "def _fill_in_missing(x):\n", + " \"\"\"Replace missing values in a SparseTensor.\n", + " Fills in missing values of `x` with '' or 0, and converts to a dense tensor.\n", + " Args:\n", + " x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1\n", + " in the second dimension.\n", + " Returns:\n", + " A rank 1 tensor where missing values of `x` have been filled in.\n", + " \"\"\"\n", + " default_value = '' if x.dtype == tf.string else 0\n", + " return tf.squeeze(\n", + " tf.sparse.to_dense(\n", + " tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),\n", + " default_value),\n", + " axis=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "id": "jHfhth_GiZI9" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:absl:Generating ephemeral wheel package for '/home/hannes/tfx-addons/examples/model_card_generator/census_income_transform.py' (including modules: ['census_income_trainer', 'census_income_constants', 'census_income_transform']).\n", + "INFO:absl:User module package has hash fingerprint version aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.\n", + "INFO:absl:Executing: ['/bin/python3', '/tmp/tmpb107nej5/_tfx_generated_setup.py', 'bdist_wheel', '--bdist-dir', '/tmp/tmpbzrqa90p', '--dist-dir', '/tmp/tmprvxg66f3']\n", + "INFO:absl:Successfully built user code wheel distribution at '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'; target user module is 'census_income_transform'.\n", + "INFO:absl:Full user module path is 'census_income_transform@/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'\n", + "INFO:absl:Running driver for Transform\n", + "INFO:absl:MetadataStore with DB connection initialized\n", + "INFO:absl:Running executor for Transform\n", + "INFO:absl:Analyze the 'train' split and transform all splits when splits_config is not set.\n", + "INFO:absl:udf_utils.get_fn {'module_file': None, 'module_path': 'census_income_transform@/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl', 'preprocessing_fn': None} 'preprocessing_fn'\n", + "INFO:absl:Installing '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl' to a temporary directory.\n", + "INFO:absl:Executing: ['/bin/python3', '-m', 'pip', 'install', '--target', '/tmp/tmpociwwoao', '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl']\n", + "INFO:absl:Successfully installed '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'.\n", + "INFO:absl:udf_utils.get_fn {'module_file': None, 'module_path': 'census_income_transform@/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl', 'stats_options_updater_fn': None} 'stats_options_updater_fn'\n", + "INFO:absl:Installing '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl' to a temporary directory.\n", + "INFO:absl:Executing: ['/bin/python3', '-m', 'pip', 'install', '--target', '/tmp/tmpdkd25vdf', '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl']\n", + "INFO:absl:Successfully installed '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'.\n", + "INFO:absl:Installing '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl' to a temporary directory.\n", + "INFO:absl:Executing: ['/bin/python3', '-m', 'pip', 'install', '--target', '/tmp/tmpjon0p89a', '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl']\n", + "INFO:absl:Successfully installed '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/hannes/.local/lib/python3.8/site-packages/tensorflow_transform/tf_utils.py:324: Tensor.experimental_ref (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use ref() instead.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/hannes/.local/lib/python3.8/site-packages/tensorflow_transform/tf_utils.py:324: Tensor.experimental_ref (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use ref() instead.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "WARNING:root:This output type hint will be ignored and not used for type-checking purposes. Typically, output type hints for a PTransform are single (or nested) types wrapped by a PCollection, PDone, or None. Got: Tuple[Dict[, Union[, ]], Union[, Dict[, Dict[, ]]], ] instead.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_1/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_2/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_3/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_4/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_5/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_6/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_7/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_1/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_2/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_3/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_4/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_5/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_6/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_7/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n", + "WARNING:root:This output type hint will be ignored and not used for type-checking purposes. Typically, output type hints for a PTransform are single (or nested) types wrapped by a PCollection, PDone, or None. Got: Tuple[Dict[, Union[, ]], Union[, Dict[, Dict[, ]]], ] instead.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "WARNING:root:This input type hint will be ignored and not used for type-checking purposes. Typically, input type hints for a PTransform are single (or nested) types wrapped by a PCollection, or PBegin. Got: Dict[, ] instead.\n", + "WARNING:root:This output type hint will be ignored and not used for type-checking purposes. Typically, output type hints for a PTransform are single (or nested) types wrapped by a PCollection, PDone, or None. Got: List[] instead.\n", + "WARNING:root:This input type hint will be ignored and not used for type-checking purposes. Typically, input type hints for a PTransform are single (or nested) types wrapped by a PCollection, or PBegin. Got: Dict[, ] instead.\n", + "WARNING:root:This output type hint will be ignored and not used for type-checking purposes. Typically, output type hints for a PTransform are single (or nested) types wrapped by a PCollection, PDone, or None. Got: List[] instead.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Assets written to: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4/.temp_path/tftransform_tmp/a4dc7527b15548dba3b151b359b24b3e/assets\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Assets written to: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4/.temp_path/tftransform_tmp/a4dc7527b15548dba3b151b359b24b3e/assets\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Assets written to: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4/.temp_path/tftransform_tmp/2af7cdf70dae420e843627516e16b884/assets\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Assets written to: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4/.temp_path/tftransform_tmp/2af7cdf70dae420e843627516e16b884/assets\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Running publisher for Transform\n", + "INFO:absl:MetadataStore with DB connection initialized\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "
ExecutionResult at 0x7fdfa8a036d0
.execution_id4
.component\n", + "\n", + "
Transform at 0x7fdf4c799c10
.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdfb827da30
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3) at 0x7fdfb827dac0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3
.outputs
['transform_graph']\n", + "\n", + "
Channel of type 'TransformGraph' (1 artifact) at 0x7fdf4c77a220
.type_nameTransformGraph
._artifacts
[0]\n", + "\n", + "
Artifact of type 'TransformGraph' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4) at 0x7fdf4c77aa00
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4
['transformed_examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf4c77a250
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4) at 0x7fdf4c77a610
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['updated_analyzer_cache']\n", + "\n", + "
Channel of type 'TransformCache' (1 artifact) at 0x7fdf4c77aa30
.type_nameTransformCache
._artifacts
[0]\n", + "\n", + "
Artifact of type 'TransformCache' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/updated_analyzer_cache/4) at 0x7fdf4c77a880
.type<class 'tfx.types.standard_artifacts.TransformCache'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/updated_analyzer_cache/4
['pre_transform_schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdf4c77aee0
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_schema/4) at 0x7fdf4c77a310
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_schema/4
['pre_transform_stats']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdf4c77a5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_stats/4) at 0x7fdf4c77a7c0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_stats/4
.span0
.split_names
['post_transform_schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdf4c77a3d0
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_schema/4) at 0x7fdf4c77a5e0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_schema/4
['post_transform_stats']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdf4c77a190
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_stats/4) at 0x7fdf4c77aaf0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_stats/4
.span0
.split_names
['post_transform_anomalies']\n", + "\n", + "
Channel of type 'ExampleAnomalies' (1 artifact) at 0x7fdf4c77a2b0
.type_nameExampleAnomalies
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleAnomalies' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_anomalies/4) at 0x7fdf4c77a8e0
.type<class 'tfx.types.standard_artifacts.ExampleAnomalies'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_anomalies/4
.span0
.split_names
.exec_properties
['module_file']None
['preprocessing_fn']None
['stats_options_updater_fn']None
['force_tf_compat_v1']0
['custom_config']null
['splits_config']None
['disable_statistics']0
['module_path']census_income_transform@/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Transform-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl
.component.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdfb827da30
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3) at 0x7fdfb827dac0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3
.component.outputs
['transform_graph']\n", + "\n", + "
Channel of type 'TransformGraph' (1 artifact) at 0x7fdf4c77a220
.type_nameTransformGraph
._artifacts
[0]\n", + "\n", + "
Artifact of type 'TransformGraph' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4) at 0x7fdf4c77aa00
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4
['transformed_examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf4c77a250
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4) at 0x7fdf4c77a610
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['updated_analyzer_cache']\n", + "\n", + "
Channel of type 'TransformCache' (1 artifact) at 0x7fdf4c77aa30
.type_nameTransformCache
._artifacts
[0]\n", + "\n", + "
Artifact of type 'TransformCache' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/updated_analyzer_cache/4) at 0x7fdf4c77a880
.type<class 'tfx.types.standard_artifacts.TransformCache'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/updated_analyzer_cache/4
['pre_transform_schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdf4c77aee0
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_schema/4) at 0x7fdf4c77a310
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_schema/4
['pre_transform_stats']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdf4c77a5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_stats/4) at 0x7fdf4c77a7c0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/pre_transform_stats/4
.span0
.split_names
['post_transform_schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdf4c77a3d0
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_schema/4) at 0x7fdf4c77a5e0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_schema/4
['post_transform_stats']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdf4c77a190
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_stats/4) at 0x7fdf4c77aaf0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_stats/4
.span0
.split_names
['post_transform_anomalies']\n", + "\n", + "
Channel of type 'ExampleAnomalies' (1 artifact) at 0x7fdf4c77a2b0
.type_nameExampleAnomalies
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleAnomalies' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_anomalies/4) at 0x7fdf4c77a8e0
.type<class 'tfx.types.standard_artifacts.ExampleAnomalies'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/post_transform_anomalies/4
.span0
.split_names
" + ], + "text/plain": [ + "ExecutionResult(\n", + " component_id: Transform\n", + " execution_id: 4\n", + " outputs:\n", + " transform_graph: OutputChannel(artifact_type=TransformGraph, producer_component_id=Transform, output_key=transform_graph, additional_properties={}, additional_custom_properties={})\n", + " transformed_examples: OutputChannel(artifact_type=Examples, producer_component_id=Transform, output_key=transformed_examples, additional_properties={}, additional_custom_properties={})\n", + " updated_analyzer_cache: OutputChannel(artifact_type=TransformCache, producer_component_id=Transform, output_key=updated_analyzer_cache, additional_properties={}, additional_custom_properties={})\n", + " pre_transform_schema: OutputChannel(artifact_type=Schema, producer_component_id=Transform, output_key=pre_transform_schema, additional_properties={}, additional_custom_properties={})\n", + " pre_transform_stats: OutputChannel(artifact_type=ExampleStatistics, producer_component_id=Transform, output_key=pre_transform_stats, additional_properties={}, additional_custom_properties={})\n", + " post_transform_schema: OutputChannel(artifact_type=Schema, producer_component_id=Transform, output_key=post_transform_schema, additional_properties={}, additional_custom_properties={})\n", + " post_transform_stats: OutputChannel(artifact_type=ExampleStatistics, producer_component_id=Transform, output_key=post_transform_stats, additional_properties={}, additional_custom_properties={})\n", + " post_transform_anomalies: OutputChannel(artifact_type=ExampleAnomalies, producer_component_id=Transform, output_key=post_transform_anomalies, additional_properties={}, additional_custom_properties={}))" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "transform = Transform(\n", + " examples=example_gen.outputs['examples'],\n", + " schema=schema_gen.outputs['schema'],\n", + " module_file=os.path.abspath(_census_income_transform_module_file))\n", + "context.run(transform)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "id": "SClrAaEGR1O5" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "
Channel of type 'TransformGraph' (1 artifact) at 0x7fdf4c77a220
.type_nameTransformGraph
._artifacts
[0]\n", + "\n", + "
Artifact of type 'TransformGraph' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4) at 0x7fdf4c77aa00
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4
" + ], + "text/plain": [ + "OutputChannel(artifact_type=TransformGraph, producer_component_id=Transform, output_key=transform_graph, additional_properties={}, additional_custom_properties={})" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "transform.outputs['transform_graph']" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OBJFtnl6lCg9" + }, + "source": [ + "### Trainer\n", + "Let's see an example of user-defined model code below (for an introduction to the TensorFlow Keras APIs, [see the tutorial](https://www.tensorflow.org/guide/keras)):" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "id": "N1376oq04YJt" + }, + "outputs": [], + "source": [ + "_census_income_trainer_module_file = 'census_income_trainer.py'" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "id": "nf9UuNng4YJu", + "jupyter": { + "source_hidden": true + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting census_income_trainer.py\n" + ] + } + ], + "source": [ + "%%writefile {_census_income_trainer_module_file}\n", + "\n", + "from typing import List, Text\n", + "\n", + "import os\n", + "import absl\n", + "import datetime\n", + "import tensorflow as tf\n", + "import tensorflow_transform as tft\n", + "\n", + "from tfx.components.trainer.executor import TrainerFnArgs\n", + "\n", + "import census_income_constants\n", + "\n", + "_DENSE_FLOAT_FEATURE_KEYS = census_income_constants.DENSE_FLOAT_FEATURE_KEYS\n", + "_VOCAB_FEATURE_KEYS = census_income_constants.VOCAB_FEATURE_KEYS\n", + "_VOCAB_SIZE = census_income_constants.VOCAB_SIZE\n", + "_OOV_SIZE = census_income_constants.OOV_SIZE\n", + "_FEATURE_BUCKET_COUNT = census_income_constants.FEATURE_BUCKET_COUNT\n", + "_BUCKET_FEATURE_KEYS = census_income_constants.BUCKET_FEATURE_KEYS\n", + "_CATEGORICAL_FEATURE_KEYS = census_income_constants.CATEGORICAL_FEATURE_KEYS\n", + "_MAX_CATEGORICAL_FEATURE_VALUES = census_income_constants.MAX_CATEGORICAL_FEATURE_VALUES\n", + "_LABEL_KEY = census_income_constants.LABEL_KEY\n", + "_transformed_name = census_income_constants.transformed_name\n", + "\n", + "\n", + "def _transformed_names(keys):\n", + " return [_transformed_name(key) for key in keys]\n", + "\n", + "\n", + "def _gzip_reader_fn(filenames):\n", + " \"\"\"Small utility returning a record reader that can read gzip'ed files.\"\"\"\n", + " return tf.data.TFRecordDataset(\n", + " filenames,\n", + " compression_type='GZIP')\n", + "\n", + "\n", + "def _get_serve_tf_examples_fn(model, tf_transform_output):\n", + " \"\"\"Returns a function that parses a serialized tf.Example and applies TFT.\"\"\"\n", + "\n", + " model.tft_layer = tf_transform_output.transform_features_layer()\n", + "\n", + " @tf.function\n", + " def serve_tf_examples_fn(serialized_tf_examples):\n", + " \"\"\"Returns the output to be used in the serving signature.\"\"\"\n", + " feature_spec = tf_transform_output.raw_feature_spec()\n", + " feature_spec.pop(_LABEL_KEY)\n", + " parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n", + "\n", + " transformed_features = model.tft_layer(parsed_features)\n", + " if _transformed_name(_LABEL_KEY) in transformed_features:\n", + " transformed_features.pop(_transformed_name(_LABEL_KEY))\n", + "\n", + " return model(transformed_features)\n", + "\n", + " return serve_tf_examples_fn\n", + "\n", + "\n", + "def _input_fn(file_pattern: List[Text],\n", + " tf_transform_output: tft.TFTransformOutput,\n", + " batch_size: int = 200) -> tf.data.Dataset:\n", + " \"\"\"Generates features and label for tuning/training.\n", + "\n", + " Args:\n", + " file_pattern: List of paths or patterns of input tfrecord files.\n", + " tf_transform_output: A TFTransformOutput.\n", + " batch_size: representing the number of consecutive elements of returned\n", + " dataset to combine in a single batch\n", + "\n", + " Returns:\n", + " A dataset that contains (features, indices) tuple where features is a\n", + " dictionary of Tensors, and indices is a single Tensor of label indices.\n", + " \"\"\"\n", + " transformed_feature_spec = (\n", + " tf_transform_output.transformed_feature_spec().copy())\n", + "\n", + " dataset = tf.data.experimental.make_batched_features_dataset(\n", + " file_pattern=file_pattern,\n", + " batch_size=batch_size,\n", + " features=transformed_feature_spec,\n", + " reader=_gzip_reader_fn,\n", + " label_key=_transformed_name(_LABEL_KEY))\n", + "\n", + " return dataset\n", + "\n", + "\n", + "def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:\n", + " \"\"\"Creates a DNN Keras model.\n", + "\n", + " Args:\n", + " hidden_units: [int], the layer sizes of the DNN (input layer first).\n", + "\n", + " Returns:\n", + " A keras Model.\n", + " \"\"\"\n", + " real_valued_columns = [\n", + " tf.feature_column.numeric_column(key, shape=())\n", + " for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n", + " ]\n", + " categorical_columns = [\n", + " tf.feature_column.categorical_column_with_identity(\n", + " key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)\n", + " for key in _transformed_names(_VOCAB_FEATURE_KEYS)\n", + " ]\n", + " categorical_columns += [\n", + " tf.feature_column.categorical_column_with_identity(\n", + " key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)\n", + " for key in _transformed_names(_BUCKET_FEATURE_KEYS)\n", + " ]\n", + " categorical_columns += [\n", + " tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension\n", + " key,\n", + " num_buckets=num_buckets,\n", + " default_value=0) for key, num_buckets in zip(\n", + " _transformed_names(_CATEGORICAL_FEATURE_KEYS),\n", + " _MAX_CATEGORICAL_FEATURE_VALUES)\n", + " ]\n", + " indicator_column = [\n", + " tf.feature_column.indicator_column(categorical_column)\n", + " for categorical_column in categorical_columns\n", + " ]\n", + "\n", + " model = _wide_and_deep_classifier(\n", + " # TODO(b/139668410) replace with premade wide_and_deep keras model\n", + " wide_columns=indicator_column,\n", + " deep_columns=real_valued_columns,\n", + " dnn_hidden_units=hidden_units or [100, 70, 50, 25])\n", + " return model\n", + "\n", + "\n", + "def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):\n", + " \"\"\"Build a simple keras wide and deep model.\n", + "\n", + " Args:\n", + " wide_columns: Feature columns wrapped in indicator_column for wide (linear)\n", + " part of the model.\n", + " deep_columns: Feature columns for deep part of the model.\n", + " dnn_hidden_units: [int], the layer sizes of the hidden DNN.\n", + "\n", + " Returns:\n", + " A Wide and Deep Keras model\n", + " \"\"\"\n", + " # Following values are hard coded for simplicity in this example,\n", + " # However prefarably they should be passsed in as hparams.\n", + "\n", + " # Keras needs the feature definitions at compile time.\n", + " # TODO(b/139081439): Automate generation of input layers from FeatureColumn.\n", + " input_layers = {\n", + " colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)\n", + " for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n", + " }\n", + " input_layers.update({\n", + " colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n", + " for colname in _transformed_names(_VOCAB_FEATURE_KEYS)\n", + " })\n", + " input_layers.update({\n", + " colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n", + " for colname in _transformed_names(_BUCKET_FEATURE_KEYS)\n", + " })\n", + " input_layers.update({\n", + " colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n", + " for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)\n", + " })\n", + "\n", + " # TODO(b/161816639): SparseFeatures for feature columns + Keras.\n", + " deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)\n", + " for numnodes in dnn_hidden_units:\n", + " deep = tf.keras.layers.Dense(numnodes)(deep)\n", + " wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)\n", + "\n", + " output = tf.keras.layers.Dense(\n", + " 1, activation='sigmoid')(\n", + " tf.keras.layers.concatenate([deep, wide]))\n", + "\n", + " model = tf.keras.Model(input_layers, output)\n", + " model.compile(\n", + " loss='binary_crossentropy',\n", + " optimizer=tf.keras.optimizers.Adam(lr=0.001),\n", + " metrics=[tf.keras.metrics.BinaryAccuracy()])\n", + " model.summary(print_fn=absl.logging.info)\n", + " return model\n", + "\n", + "\n", + "# TFX Trainer will call this function.\n", + "def run_fn(fn_args: TrainerFnArgs):\n", + " \"\"\"Train the model based on given args.\n", + "\n", + " Args:\n", + " fn_args: Holds args used to train the model as name/value pairs.\n", + " \"\"\"\n", + " # Number of nodes in the first layer of the DNN\n", + " first_dnn_layer_size = 100\n", + " num_dnn_layers = 4\n", + " dnn_decay_factor = 0.7\n", + "\n", + " tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n", + "\n", + " train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 40)\n", + " eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 40)\n", + "\n", + " model = _build_keras_model(\n", + " # Construct layers sizes with exponetial decay\n", + " hidden_units=[\n", + " max(2, int(first_dnn_layer_size * dnn_decay_factor**i))\n", + " for i in range(num_dnn_layers)\n", + " ])\n", + "\n", + " # This log path might change in the future.\n", + " log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n", + " tensorboard_callback = tf.keras.callbacks.TensorBoard(\n", + " log_dir=log_dir, update_freq='batch')\n", + " model.fit(\n", + " train_dataset,\n", + " steps_per_epoch=fn_args.train_steps,\n", + " validation_data=eval_dataset,\n", + " validation_steps=fn_args.eval_steps,\n", + " callbacks=[tensorboard_callback])\n", + "\n", + " signatures = {\n", + " 'serving_default':\n", + " _get_serve_tf_examples_fn(model,\n", + " tf_transform_output).get_concrete_function(\n", + " tf.TensorSpec(\n", + " shape=[None],\n", + " dtype=tf.string,\n", + " name='examples')),\n", + " }\n", + " model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "id": "429-vvCWibO0" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:absl:`custom_executor_spec` is deprecated. Please customize component directly.\n", + "INFO:absl:Generating ephemeral wheel package for '/home/hannes/tfx-addons/examples/model_card_generator/census_income_trainer.py' (including modules: ['census_income_trainer', 'census_income_constants', 'census_income_transform']).\n", + "INFO:absl:User module package has hash fingerprint version aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.\n", + "INFO:absl:Executing: ['/bin/python3', '/tmp/tmp0qjqlqw0/_tfx_generated_setup.py', 'bdist_wheel', '--bdist-dir', '/tmp/tmphsz9hiax', '--dist-dir', '/tmp/tmpcxgny7wz']\n", + "INFO:absl:Successfully built user code wheel distribution at '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'; target user module is 'census_income_trainer'.\n", + "INFO:absl:Full user module path is 'census_income_trainer@/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'\n", + "INFO:absl:Running driver for Trainer\n", + "INFO:absl:MetadataStore with DB connection initialized\n", + "INFO:absl:Running executor for Trainer\n", + "INFO:absl:Train on the 'train' split when train_args.splits is not set.\n", + "INFO:absl:Evaluate on the 'eval' split when eval_args.splits is not set.\n", + "WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE\n", + "WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE\n", + "WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE\n", + "INFO:absl:udf_utils.get_fn {'train_args': '{\\n \"num_steps\": 100\\n}', 'eval_args': '{\\n \"num_steps\": 50\\n}', 'module_file': None, 'run_fn': None, 'trainer_fn': None, 'custom_config': 'null', 'module_path': 'census_income_trainer@/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'} 'run_fn'\n", + "INFO:absl:Installing '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl' to a temporary directory.\n", + "INFO:absl:Executing: ['/bin/python3', '-m', 'pip', 'install', '--target', '/tmp/tmpk8nyo_0r', '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl']\n", + "INFO:absl:Successfully installed '/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'.\n", + "INFO:absl:Training model.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Age_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Gain_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Capital-Loss_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Country_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education-Num_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Education_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Hours-per-week_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Marital-Status_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Occupation_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Over-50K_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Race_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Relationship_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Sex_xf has a shape . Setting to DenseTensor.\n", + "INFO:absl:Feature Workclass_xf has a shape . Setting to DenseTensor.\n", + "WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.\n", + "INFO:absl:Model: \"model\"\n", + "INFO:absl:__________________________________________________________________________________________________\n", + "INFO:absl: Layer (type) Output Shape Param # Connected to \n", + "INFO:absl:==================================================================================================\n", + "INFO:absl: Age_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Capital-Gain_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Capital-Loss_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Country_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Education-Num_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Education_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Hours-per-week_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Marital-Status_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Occupation_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Race_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Relationship_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Sex_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: Workclass_xf (InputLayer) [(None,)] 0 [] \n", + "INFO:absl: \n", + "INFO:absl: dense_features (DenseFeatures) (None, 3) 0 ['Age_xf[0][0]', \n", + "INFO:absl: 'Capital-Gain_xf[0][0]', \n", + "INFO:absl: 'Capital-Loss_xf[0][0]', \n", + "INFO:absl: 'Country_xf[0][0]', \n", + "INFO:absl: 'Education-Num_xf[0][0]', \n", + "INFO:absl: 'Education_xf[0][0]', \n", + "INFO:absl: 'Hours-per-week_xf[0][0]', \n", + "INFO:absl: 'Marital-Status_xf[0][0]', \n", + "INFO:absl: 'Occupation_xf[0][0]', \n", + "INFO:absl: 'Race_xf[0][0]', \n", + "INFO:absl: 'Relationship_xf[0][0]', \n", + "INFO:absl: 'Sex_xf[0][0]', \n", + "INFO:absl: 'Workclass_xf[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl: dense (Dense) (None, 100) 400 ['dense_features[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl: dense_1 (Dense) (None, 70) 7070 ['dense[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl: dense_2 (Dense) (None, 48) 3408 ['dense_1[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl: dense_3 (Dense) (None, 34) 1666 ['dense_2[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl: dense_features_1 (DenseFeature (None, 1710) 0 ['Age_xf[0][0]', \n", + "INFO:absl: s) 'Capital-Gain_xf[0][0]', \n", + "INFO:absl: 'Capital-Loss_xf[0][0]', \n", + "INFO:absl: 'Country_xf[0][0]', \n", + "INFO:absl: 'Education-Num_xf[0][0]', \n", + "INFO:absl: 'Education_xf[0][0]', \n", + "INFO:absl: 'Hours-per-week_xf[0][0]', \n", + "INFO:absl: 'Marital-Status_xf[0][0]', \n", + "INFO:absl: 'Occupation_xf[0][0]', \n", + "INFO:absl: 'Race_xf[0][0]', \n", + "INFO:absl: 'Relationship_xf[0][0]', \n", + "INFO:absl: 'Sex_xf[0][0]', \n", + "INFO:absl: 'Workclass_xf[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl: concatenate (Concatenate) (None, 1744) 0 ['dense_3[0][0]', \n", + "INFO:absl: 'dense_features_1[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl: dense_4 (Dense) (None, 1) 1745 ['concatenate[0][0]'] \n", + "INFO:absl: \n", + "INFO:absl:==================================================================================================\n", + "INFO:absl:Total params: 14,289\n", + "INFO:absl:Trainable params: 14,289\n", + "INFO:absl:Non-trainable params: 0\n", + "INFO:absl:__________________________________________________________________________________________________\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100/100 [==============================] - 2s 6ms/step - loss: 0.5044 - binary_accuracy: 0.7690 - val_loss: 0.4465 - val_binary_accuracy: 0.8050\n", + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:struct2tensor is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_decision_forests is not available.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:tensorflow_text is not available.\n", + "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n", + "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/hannes/.local/lib/python3.8/site-packages/tensorflow/python/autograph/pyct/static_analysis/liveness.py:83: Analyzer.lamba_check (from tensorflow.python.autograph.pyct.static_analysis.liveness) is deprecated and will be removed after 2023-09-23.\n", + "Instructions for updating:\n", + "Lambda fuctions will be no more assumed to be used in the statement where they are used, or at least in the same block. https://github.com/tensorflow/tensorflow/issues/56089\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/hannes/.local/lib/python3.8/site-packages/tensorflow/python/autograph/pyct/static_analysis/liveness.py:83: Analyzer.lamba_check (from tensorflow.python.autograph.pyct.static_analysis.liveness) is deprecated and will be removed after 2023-09-23.\n", + "Instructions for updating:\n", + "Lambda fuctions will be no more assumed to be used in the statement where they are used, or at least in the same block. https://github.com/tensorflow/tensorflow/issues/56089\n", + "WARNING:absl:Found untraced functions such as _update_step_xla while saving (showing 1 of 1). These functions will not be directly callable after loading.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Assets written to: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5/Format-Serving/assets\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Assets written to: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5/Format-Serving/assets\n", + "INFO:absl:Training complete. Model written to /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5/Format-Serving. ModelRun written to /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model_run/5\n", + "INFO:absl:Running publisher for Trainer\n", + "INFO:absl:MetadataStore with DB connection initialized\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "
ExecutionResult at 0x7fdf4c7775e0
.execution_id5
.component\n", + "\n", + "
Trainer at 0x7fdec6225fa0
.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf4c77a250
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4) at 0x7fdf4c77a610
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['transform_graph']\n", + "\n", + "
Channel of type 'TransformGraph' (1 artifact) at 0x7fdf4c77a220
.type_nameTransformGraph
._artifacts
[0]\n", + "\n", + "
Artifact of type 'TransformGraph' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4) at 0x7fdf4c77aa00
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4
['schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdfb827da30
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3) at 0x7fdfb827dac0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3
.outputs
['model']\n", + "\n", + "
Channel of type 'Model' (1 artifact) at 0x7fdec629f130
.type_nameModel
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Model' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5) at 0x7fdec6236040
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5
['model_run']\n", + "\n", + "
Channel of type 'ModelRun' (1 artifact) at 0x7fdee546fd90
.type_nameModelRun
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelRun' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model_run/5) at 0x7fdec6236070
.type<class 'tfx.types.standard_artifacts.ModelRun'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model_run/5
.exec_properties
['train_args']{\n", + " "num_steps": 100\n", + "}
['eval_args']{\n", + " "num_steps": 50\n", + "}
['module_file']None
['run_fn']None
['trainer_fn']None
['custom_config']null
['module_path']census_income_trainer@/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl
.component.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf4c77a250
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4) at 0x7fdf4c77a610
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['transform_graph']\n", + "\n", + "
Channel of type 'TransformGraph' (1 artifact) at 0x7fdf4c77a220
.type_nameTransformGraph
._artifacts
[0]\n", + "\n", + "
Artifact of type 'TransformGraph' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4) at 0x7fdf4c77aa00
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Transform/transform_graph/4
['schema']\n", + "\n", + "
Channel of type 'Schema' (1 artifact) at 0x7fdfb827da30
.type_nameSchema
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Schema' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3) at 0x7fdfb827dac0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/SchemaGen/schema/3
.component.outputs
['model']\n", + "\n", + "
Channel of type 'Model' (1 artifact) at 0x7fdec629f130
.type_nameModel
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Model' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5) at 0x7fdec6236040
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5
['model_run']\n", + "\n", + "
Channel of type 'ModelRun' (1 artifact) at 0x7fdee546fd90
.type_nameModelRun
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelRun' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model_run/5) at 0x7fdec6236070
.type<class 'tfx.types.standard_artifacts.ModelRun'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model_run/5
" + ], + "text/plain": [ + "ExecutionResult(\n", + " component_id: Trainer\n", + " execution_id: 5\n", + " outputs:\n", + " model: OutputChannel(artifact_type=Model, producer_component_id=Trainer, output_key=model, additional_properties={}, additional_custom_properties={})\n", + " model_run: OutputChannel(artifact_type=ModelRun, producer_component_id=Trainer, output_key=model_run, additional_properties={}, additional_custom_properties={}))" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "trainer = Trainer(\n", + " module_file=os.path.abspath(_census_income_trainer_module_file),\n", + " custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n", + " examples=transform.outputs['transformed_examples'],\n", + " transform_graph=transform.outputs['transform_graph'],\n", + " schema=schema_gen.outputs['schema'],\n", + " train_args=trainer_pb2.TrainArgs(num_steps=100),\n", + " eval_args=trainer_pb2.EvalArgs(num_steps=50))\n", + "context.run(trainer)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "id": "cSb8fhbQDmyJ" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'model': OutputChannel(artifact_type=Model, producer_component_id=Trainer, output_key=model, additional_properties={}, additional_custom_properties={}),\n", + " 'model_run': OutputChannel(artifact_type=ModelRun, producer_component_id=Trainer, output_key=model_run, additional_properties={}, additional_custom_properties={})}" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "trainer.outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FmPftrv0lEQy" + }, + "source": [ + "### Evaluator\n", + "The `Evaluator` component computes model performance metrics over the evaluation set. It uses the [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) library. \n", + "\n", + "`Evaluator` will take as input the data from `ExampleGen`, the trained model from `Trainer`, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values. See an example of this configuration below:" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "id": "fVhfzzh9PDEx" + }, + "outputs": [], + "source": [ + "from google.protobuf.wrappers_pb2 import BoolValue\n", + "\n", + "eval_config = tfma.EvalConfig(\n", + " model_specs=[\n", + " # This assumes a serving model with signature 'serving_default'. If\n", + " # using estimator based EvalSavedModel, add signature_name: 'eval' and \n", + " # remove the label_key.\n", + " tfma.ModelSpec(label_key=\"Over-50K\")\n", + " ],\n", + " metrics_specs=[\n", + " tfma.MetricsSpec(\n", + " # The metrics added here are in addition to those saved with the\n", + " # model (assuming either a keras model or EvalSavedModel is used).\n", + " # Any metrics added into the saved model (for example using\n", + " # model.compile(..., metrics=[...]), etc) will be computed\n", + " # automatically.\n", + " # To add validation thresholds for metrics saved with the model,\n", + " # add them keyed by metric name to the thresholds map.\n", + " metrics=[\n", + " tfma.MetricConfig(class_name='ExampleCount'),\n", + " tfma.MetricConfig(class_name='BinaryAccuracy'),\n", + " tfma.MetricConfig(class_name='FairnessIndicators',\n", + " config='{ \"thresholds\": [0.5] }'),\n", + " ]\n", + " )\n", + " ],\n", + " slicing_specs=[\n", + " # An empty slice spec means the overall slice, i.e. the whole dataset.\n", + " tfma.SlicingSpec(),\n", + " # Data can be sliced along a feature column. In this case, data is\n", + " # sliced by feature column Race and Sex.\n", + " tfma.SlicingSpec(feature_keys=['Race']),\n", + " tfma.SlicingSpec(feature_keys=['Sex']),\n", + " tfma.SlicingSpec(feature_keys=['Race', 'Sex']),\n", + " ],\n", + " options = tfma.Options(compute_confidence_intervals=BoolValue(value=True))\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QfbptwWQ4k0z" + }, + "source": [ + "Warning: the Evaluator Component may take 5-10 minutes to run due to errors regarding \"inconsistent references\". " + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "id": "Zjcx8g6mihSt" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:absl:Running driver for Evaluator\n", + "INFO:absl:MetadataStore with DB connection initialized\n", + "INFO:absl:Running executor for Evaluator\n", + "INFO:absl:udf_utils.get_fn {'eval_config': '{\\n \"metrics_specs\": [\\n {\\n \"metrics\": [\\n {\\n \"class_name\": \"ExampleCount\"\\n },\\n {\\n \"class_name\": \"BinaryAccuracy\"\\n },\\n {\\n \"class_name\": \"FairnessIndicators\",\\n \"config\": \"{ \\\\\"thresholds\\\\\": [0.5] }\"\\n }\\n ]\\n }\\n ],\\n \"model_specs\": [\\n {\\n \"label_key\": \"Over-50K\"\\n }\\n ],\\n \"options\": {\\n \"compute_confidence_intervals\": true\\n },\\n \"slicing_specs\": [\\n {},\\n {\\n \"feature_keys\": [\\n \"Race\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Sex\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Race\",\\n \"Sex\"\\n ]\\n }\\n ]\\n}', 'feature_slicing_spec': None, 'fairness_indicator_thresholds': 'null', 'example_splits': 'null', 'module_file': None, 'module_path': None} 'custom_eval_shared_model'\n", + "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", + "model_specs {\n", + " label_key: \"Over-50K\"\n", + "}\n", + "slicing_specs {\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Sex\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + " feature_keys: \"Sex\"\n", + "}\n", + "metrics_specs {\n", + " metrics {\n", + " class_name: \"ExampleCount\"\n", + " }\n", + " metrics {\n", + " class_name: \"BinaryAccuracy\"\n", + " }\n", + " metrics {\n", + " class_name: \"FairnessIndicators\"\n", + " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", + " }\n", + "}\n", + "options {\n", + " compute_confidence_intervals {\n", + " value: true\n", + " }\n", + " confidence_intervals {\n", + " method: JACKKNIFE\n", + " }\n", + "}\n", + "\n", + "INFO:absl:Using /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5/Format-Serving as model.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec6431d30> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec6431d30> and ).\n", + "INFO:absl:The 'example_splits' parameter is not set, using 'eval' split.\n", + "INFO:absl:Evaluating model.\n", + "INFO:absl:udf_utils.get_fn {'eval_config': '{\\n \"metrics_specs\": [\\n {\\n \"metrics\": [\\n {\\n \"class_name\": \"ExampleCount\"\\n },\\n {\\n \"class_name\": \"BinaryAccuracy\"\\n },\\n {\\n \"class_name\": \"FairnessIndicators\",\\n \"config\": \"{ \\\\\"thresholds\\\\\": [0.5] }\"\\n }\\n ]\\n }\\n ],\\n \"model_specs\": [\\n {\\n \"label_key\": \"Over-50K\"\\n }\\n ],\\n \"options\": {\\n \"compute_confidence_intervals\": true\\n },\\n \"slicing_specs\": [\\n {},\\n {\\n \"feature_keys\": [\\n \"Race\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Sex\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Race\",\\n \"Sex\"\\n ]\\n }\\n ]\\n}', 'feature_slicing_spec': None, 'fairness_indicator_thresholds': 'null', 'example_splits': 'null', 'module_file': None, 'module_path': None} 'custom_extractors'\n", + "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", + "model_specs {\n", + " label_key: \"Over-50K\"\n", + "}\n", + "slicing_specs {\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Sex\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + " feature_keys: \"Sex\"\n", + "}\n", + "metrics_specs {\n", + " metrics {\n", + " class_name: \"ExampleCount\"\n", + " }\n", + " metrics {\n", + " class_name: \"BinaryAccuracy\"\n", + " }\n", + " metrics {\n", + " class_name: \"FairnessIndicators\"\n", + " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", + " }\n", + " model_names: \"\"\n", + "}\n", + "options {\n", + " compute_confidence_intervals {\n", + " value: true\n", + " }\n", + " confidence_intervals {\n", + " method: JACKKNIFE\n", + " }\n", + "}\n", + "\n", + "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", + "model_specs {\n", + " label_key: \"Over-50K\"\n", + "}\n", + "slicing_specs {\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Sex\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + " feature_keys: \"Sex\"\n", + "}\n", + "metrics_specs {\n", + " metrics {\n", + " class_name: \"ExampleCount\"\n", + " }\n", + " metrics {\n", + " class_name: \"BinaryAccuracy\"\n", + " }\n", + " metrics {\n", + " class_name: \"FairnessIndicators\"\n", + " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", + " }\n", + " model_names: \"\"\n", + "}\n", + "options {\n", + " compute_confidence_intervals {\n", + " value: true\n", + " }\n", + " confidence_intervals {\n", + " method: JACKKNIFE\n", + " }\n", + "}\n", + "\n", + "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", + "model_specs {\n", + " label_key: \"Over-50K\"\n", + "}\n", + "slicing_specs {\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Sex\"\n", + "}\n", + "slicing_specs {\n", + " feature_keys: \"Race\"\n", + " feature_keys: \"Sex\"\n", + "}\n", + "metrics_specs {\n", + " metrics {\n", + " class_name: \"ExampleCount\"\n", + " }\n", + " metrics {\n", + " class_name: \"BinaryAccuracy\"\n", + " }\n", + " metrics {\n", + " class_name: \"FairnessIndicators\"\n", + " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", + " }\n", + " model_names: \"\"\n", + "}\n", + "options {\n", + " compute_confidence_intervals {\n", + " value: true\n", + " }\n", + " confidence_intervals {\n", + " method: JACKKNIFE\n", + " }\n", + "}\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec7e3ca60> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec7e3ca60> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdee5546f70> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdee5546f70> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdf1c119490> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdf1c119490> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec77777c0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec77777c0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdfa8338e80> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdfa8338e80> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec4066970> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec4066970> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea7330280> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea7330280> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea66fce20> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea66fce20> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea5a21730> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea5a21730> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea44fb7f0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea44fb7f0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea3888970> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea3888970> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea1bc7490> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea1bc7490> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea0e968b0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea0e968b0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea00fb880> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea00fb880> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea1c46a90> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea1c46a90> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde7a801fd0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde7a801fd0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde792e1fd0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde792e1fd0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde78620460> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde78620460> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5b8816a0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5b8816a0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5ab8c310> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5ab8c310> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a0e2bb0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a0e2bb0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde58840df0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde58840df0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde4a75ae50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde4a75ae50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49a7c820> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49a7c820> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde48cc9670> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde48cc9670> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde480252e0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde480252e0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde434bbee0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde434bbee0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde427b84c0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde427b84c0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde41afd250> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde41afd250> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde41a946d0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde41a946d0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde400b2a60> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde400b2a60> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3f34bc10> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3f34bc10> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3e613a90> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3e613a90> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3d970220> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3d970220> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3cd88dc0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3cd88dc0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3c0fe760> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3c0fe760> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3b349a60> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3b349a60> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3a6918e0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3a6918e0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde39939250> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde39939250> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde38c1b8e0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde38c1b8e0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde37ee3c40> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde37ee3c40> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde371e47c0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde371e47c0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde364b19a0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde364b19a0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde357ad760> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde357ad760> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde34d7b850> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde34d7b850> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde34006790> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde34006790> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde332e7fa0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde332e7fa0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde325e7be0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde325e7be0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde32581af0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde32581af0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde30bbef10> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde30bbef10> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2fe841c0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2fe841c0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2f23adc0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2f23adc0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2e458910> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2e458910> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2d7bdaf0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2d7bdaf0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2ca079d0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2ca079d0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2bd2d7c0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2bd2d7c0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2b2c7850> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2b2c7850> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2a5c0f40> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2a5c0f40> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde29893dc0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde29893dc0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde28b9ae20> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde28b9ae20> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde27e64f70> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde27e64f70> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea2777e50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea2777e50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde25bcbfa0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde25bcbfa0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde24ee9460> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde24ee9460> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2418cb50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde2418cb50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde234d5e80> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde234d5e80> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde227ae910> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde227ae910> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde21a99ca0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde21a99ca0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde20d6b460> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde20d6b460> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde200b1760> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde200b1760> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde20d79d90> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde20d79d90> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ea80c40> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ea80c40> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ddd9070> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ddd9070> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ddf13a0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ddf13a0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde43f9e8b0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde43f9e8b0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1baffc10> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1baffc10> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ae127f0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1ae127f0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1a122e50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1a122e50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde194697f0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde194697f0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1879caf0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1879caf0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde17a410d0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde17a410d0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde16d5e3d0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde16d5e3d0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1605e160> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1605e160> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1531eb50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1531eb50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1466b910> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde1466b910> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde138e8ee0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde138e8ee0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde12bc3790> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde12bc3790> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde11e88b50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde11e88b50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde111a8820> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde111a8820> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde109eb3a0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde109eb3a0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0fd96940> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0fd96940> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0f0681c0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0f0681c0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0e340130> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0e340130> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0d5c1460> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0d5c1460> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0c915cd0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0c915cd0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0bbdbfd0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0bbdbfd0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0ae827c0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0ae827c0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0a1a6340> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde0a1a6340> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde09495760> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde09495760> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde08790f40> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde08790f40> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde07a565e0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde07a565e0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde06d92400> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde06d92400> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde060e03a0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde060e03a0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde43508460> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde43508460> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde7930fb20> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde7930fb20> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde327b5e50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde327b5e50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec7435850> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec7435850> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a2f6af0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a2f6af0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec78e93a0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec78e93a0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec65ff790> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdec65ff790> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49649190> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49649190> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49a5da30> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49a5da30> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3f430e50> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde3f430e50> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a1d68b0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a1d68b0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49cd9d00> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde49cd9d00> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdee463db80> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdee463db80> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde426d5160> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde426d5160> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde790b2f10> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde790b2f10> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea7ae2220> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea7ae2220> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a864460> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5a864460> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea6028fd0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fdea6028fd0> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde7bc73d30> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde7bc73d30> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde48d64820> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde48d64820> and ).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5bc3ebb0> and ).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(TransformFeaturesLayer object at 0x7fde5bc3ebb0> and ).\n", + "INFO:absl:Evaluation complete. Results written to /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6.\n", + "INFO:absl:No threshold configured, will not validate model.\n", + "INFO:absl:Running publisher for Evaluator\n", + "INFO:absl:MetadataStore with DB connection initialized\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "
ExecutionResult at 0x7fdec6271970
.execution_id6
.component\n", + "\n", + "
Evaluator at 0x7fdec621b820
.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['model']\n", + "\n", + "
Channel of type 'Model' (1 artifact) at 0x7fdec629f130
.type_nameModel
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Model' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5) at 0x7fdec6236040
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5
.outputs
['evaluation']\n", + "\n", + "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7fdec6271370
.type_nameModelEvaluation
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelEvaluation' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6) at 0x7fdee54d3160
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6
['blessing']\n", + "\n", + "
Channel of type 'ModelBlessing' (1 artifact) at 0x7fdec6271d90
.type_nameModelBlessing
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelBlessing' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/blessing/6) at 0x7fdec6265b80
.type<class 'tfx.types.standard_artifacts.ModelBlessing'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/blessing/6
.exec_properties
['eval_config']{\n", + " "metrics_specs": [\n", + " {\n", + " "metrics": [\n", + " {\n", + " "class_name": "ExampleCount"\n", + " },\n", + " {\n", + " "class_name": "BinaryAccuracy"\n", + " },\n", + " {\n", + " "class_name": "FairnessIndicators",\n", + " "config": "{ \\"thresholds\\": [0.5] }"\n", + " }\n", + " ]\n", + " }\n", + " ],\n", + " "model_specs": [\n", + " {\n", + " "label_key": "Over-50K"\n", + " }\n", + " ],\n", + " "options": {\n", + " "compute_confidence_intervals": true\n", + " },\n", + " "slicing_specs": [\n", + " {},\n", + " {\n", + " "feature_keys": [\n", + " "Race"\n", + " ]\n", + " },\n", + " {\n", + " "feature_keys": [\n", + " "Sex"\n", + " ]\n", + " },\n", + " {\n", + " "feature_keys": [\n", + " "Race",\n", + " "Sex"\n", + " ]\n", + " }\n", + " ]\n", + "}
['feature_slicing_spec']None
['fairness_indicator_thresholds']null
['example_splits']null
['module_file']None
['module_path']None
.component.inputs
['examples']\n", + "\n", + "
Channel of type 'Examples' (1 artifact) at 0x7fdf3a43f550
.type_nameExamples
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Examples' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1) at 0x7fdfb83941c0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['model']\n", + "\n", + "
Channel of type 'Model' (1 artifact) at 0x7fdec629f130
.type_nameModel
._artifacts
[0]\n", + "\n", + "
Artifact of type 'Model' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5) at 0x7fdec6236040
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Trainer/model/5
.component.outputs
['evaluation']\n", + "\n", + "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7fdec6271370
.type_nameModelEvaluation
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelEvaluation' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6) at 0x7fdee54d3160
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6
['blessing']\n", + "\n", + "
Channel of type 'ModelBlessing' (1 artifact) at 0x7fdec6271d90
.type_nameModelBlessing
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelBlessing' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/blessing/6) at 0x7fdec6265b80
.type<class 'tfx.types.standard_artifacts.ModelBlessing'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/blessing/6
" + ], + "text/plain": [ + "ExecutionResult(\n", + " component_id: Evaluator\n", + " execution_id: 6\n", + " outputs:\n", + " evaluation: OutputChannel(artifact_type=ModelEvaluation, producer_component_id=Evaluator, output_key=evaluation, additional_properties={}, additional_custom_properties={})\n", + " blessing: OutputChannel(artifact_type=ModelBlessing, producer_component_id=Evaluator, output_key=blessing, additional_properties={}, additional_custom_properties={}))" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use TFMA to compute a evaluation statistics over features of a model and\n", + "# validate them against a baseline.\n", + "\n", + "# TODO(b/226656838) Fix the inconsistent references warnings.\n", + "evaluator = Evaluator(\n", + " examples=example_gen.outputs['examples'],\n", + " model=trainer.outputs['model'],\n", + " eval_config=eval_config)\n", + "context.run(evaluator)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "id": "k4GghePOTJxL" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'evaluation': OutputChannel(artifact_type=ModelEvaluation, producer_component_id=Evaluator, output_key=evaluation, additional_properties={}, additional_custom_properties={}),\n", + " 'blessing': OutputChannel(artifact_type=ModelBlessing, producer_component_id=Evaluator, output_key=blessing, additional_properties={}, additional_custom_properties={})}" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluator.outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Y5TMskWe9LL0" + }, + "source": [ + "Using the `evaluation` output we can show the default visualization of global metrics on the entire evaluation set." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "id": "U729j5X5QQUQ" + }, + "outputs": [ + { + "data": { + "text/html": [ + "Artifact at /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/hannes/.local/lib/python3.8/site-packages/tensorflow_model_analysis/writers/metrics_plots_and_validations_writer.py:110: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use eager execution and: \n", + "`tf.data.TFRecordDataset(path)`\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/hannes/.local/lib/python3.8/site-packages/tensorflow_model_analysis/writers/metrics_plots_and_validations_writer.py:110: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use eager execution and: \n", + "`tf.data.TFRecordDataset(path)`\n" + ] + } + ], + "source": [ + "context.show(evaluator.outputs['evaluation'])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cdTLb_-8dLcu" + }, + "source": [ + "###Model Card Generator\n", + "The `Model Card` component is a [TFX Component](https://www.tensorflow.org/tfx/guide/understanding_tfx_pipelines#component) that generates model cards-- short documentation that provides key information about a machine learning model-- from the StatisticGen outputs, the Evaluator outputs, and a prepared json annotation. Optionally, a pushed model or a template can be provided as well. \n", + "\n", + "The model cards assets are saved to a ModelCard artifact that can be fetched from the `outputs['model_card]'` property." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l8LY5bMUJsuM" + }, + "source": [ + "#### Prepare Annotation Json for Model Card\n", + "\n", + "It is also important to document model information that might be important to downstream users, such as its limitations, intended use cases, trade offs, and ethical considerations. Thus, we will prepare this information in json format to be used in the model card generating step." + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "id": "SIUiTor4Johj" + }, + "outputs": [], + "source": [ + "model_card_json = {'model_details': {'name': 'Census Income Classifier'}, \n", + " 'model_details': {'overview': \n", + " 'This is a wide and deep Keras model which aims to classify whether or not '\n", + " 'an individual has an income of over $50,000 based on various demographic '\n", + " 'features. The model is trained on the UCI Census Income Dataset. This is '\n", + " 'not a production model, and this dataset has traditionally only been used '\n", + " 'for research purposes. In this Model Card, you can review quantitative '\n", + " 'components of the model’s performance and data, as well as information '\n", + " 'about the model’s intended uses, limitations, and ethical considerations.'},\n", + " 'model_details': {'owners': [{\"name\": \"Model Cards Team\", \"contact\": \"model-cards@google.com\"}]},\n", + " 'considerations': {'use_cases':[{\"description\":'This dataset that this model was trained on was originally created to '\n", + " 'support the machine learning community in conducting empirical analysis '\n", + " 'of ML algorithms. The Adult Data Set can be used in fairness-related '\n", + " 'studies that compare inequalities across sex and race, based on '\n", + " 'people’s annual incomes.'}]},\n", + " 'considerations': {'limitations': [{'description':\n", + " 'This is a class-imbalanced dataset across a variety of sensitive classes.'\n", + " ' The ratio of male-to-female examples is about 2:1 and there are far more'\n", + " ' examples with the “white” attribute than every other race combined. '\n", + " 'Furthermore, the ratio of $50,000 or less earners to $50,000 or more '\n", + " 'earners is just over 3:1. Due to the imbalance across income levels, we '\n", + " 'can see that our true negative rate seems quite high, while our true '\n", + " 'positive rate seems quite low. This is true to an even greater degree '\n", + " 'when we only look at the “female” sub-group, because there are even '\n", + " 'fewer female examples in the $50,000+ earner group, causing our model to '\n", + " 'overfit these examples. To avoid this, we can try various remediation '\n", + " 'strategies in future iterations (e.g. undersampling, hyperparameter '\n", + " 'tuning, etc), but we may not be able to fix all of the fairness issues.'}]}, \n", + " 'considerations': {'ethical_considerations': [\n", + " {'name': 'We risk expressing the viewpoint that the attributes in this dataset '\n", + " 'are the only ones that are predictive of someone’s income, even '\n", + " 'though we know this is not the case.', \n", + " 'mitigation_strategy': 'As mentioned, some interventions may need to be '\n", + " 'performed to address the class imbalances in the dataset.'}]}\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SOYofSZKOMZx" + }, + "source": [ + "#### Generate the Model Card.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "id": "bspjHq6u5aFf" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:absl:Running driver for ModelCardGenerator\n", + "INFO:absl:MetadataStore with DB connection initialized\n", + "INFO:absl:Running executor for ModelCardGenerator\n", + "INFO:absl:EvalResult found at path /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6\n", + "INFO:absl:Reading stats artifact from Split-eval\n", + "INFO:absl:Reading stats artifact from Split-train\n", + "INFO:absl:Running publisher for ModelCardGenerator\n", + "INFO:absl:MetadataStore with DB connection initialized\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "
ExecutionResult at 0x7fdfa84057f0
.execution_id7
.component\n", + "\n", + "
ModelCardGenerator at 0x7fdee4c9d4c0
.inputs
['statistics']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdfb827d5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2) at 0x7fdfb83998b0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
['evaluation']\n", + "\n", + "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7fdec6271370
.type_nameModelEvaluation
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelEvaluation' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6) at 0x7fdee54d3160
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6
.outputs
['model_card']\n", + "\n", + "
Channel of type 'ModelCard' (1 artifact) at 0x7fdee67eb070
.type_nameModelCard
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelCard' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/ModelCardGenerator/model_card/7) at 0x7fdee67eb940
.type<class 'tfx_addons.model_card_generator.artifact.ModelCard'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/ModelCardGenerator/model_card/7
.exec_properties
['json']{"model_details": {"owners": [{"name": "Model Cards Team", "contact": "model-cards@google.com"}]}, "considerations": {"ethical_considerations": [{"name": "We risk expressing the viewpoint that the attributes in this dataset are the only ones that are predictive of someone\\u2019s income, even though we know this is not the case.", "mitigation_strategy": "As mentioned, some interventions may need to be performed to address the class imbalances in the dataset."}]}}
['template_io']None
.component.inputs
['statistics']\n", + "\n", + "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7fdfb827d5b0
.type_nameExampleStatistics
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ExampleStatistics' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2) at 0x7fdfb83998b0
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
['evaluation']\n", + "\n", + "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7fdec6271370
.type_nameModelEvaluation
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelEvaluation' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6) at 0x7fdee54d3160
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/Evaluator/evaluation/6
.component.outputs
['model_card']\n", + "\n", + "
Channel of type 'ModelCard' (1 artifact) at 0x7fdee67eb070
.type_nameModelCard
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelCard' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/ModelCardGenerator/model_card/7) at 0x7fdee67eb940
.type<class 'tfx_addons.model_card_generator.artifact.ModelCard'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/ModelCardGenerator/model_card/7
" + ], + "text/plain": [ + "ExecutionResult(\n", + " component_id: ModelCardGenerator\n", + " execution_id: 7\n", + " outputs:\n", + " model_card: OutputChannel(artifact_type=ModelCard, producer_component_id=ModelCardGenerator, output_key=model_card, additional_properties={}, additional_custom_properties={}))" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mct_gen = ModelCardGenerator(statistics=statistics_gen.outputs['statistics'],\n", + " evaluation=evaluator.outputs['evaluation'],\n", + " json=json.dumps(model_card_json))\n", + "context.run(mct_gen)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "id": "euskJ-0qM6nV" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "
Channel of type 'ModelCard' (1 artifact) at 0x7fdee67eb070
.type_nameModelCard
._artifacts
[0]\n", + "\n", + "
Artifact of type 'ModelCard' (uri: /tmp/tfx-Census Income Classification Pipeline-2_2mej8l/ModelCardGenerator/model_card/7) at 0x7fdee67eb940
.type<class 'tfx_addons.model_card_generator.artifact.ModelCard'>
.uri/tmp/tfx-Census Income Classification Pipeline-2_2mej8l/ModelCardGenerator/model_card/7
" + ], + "text/plain": [ + "OutputChannel(artifact_type=ModelCard, producer_component_id=ModelCardGenerator, output_key=model_card, additional_properties={}, additional_custom_properties={})" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mct_gen.outputs['model_card']" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0kj1szHNdqrX" + }, + "source": [ + "##Display Model Card\n", + "\n", + "Lastly, we isolate the uri from the model card generator artifact and use it to display the model card." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": { + "id": "Sd68Ih928vr9" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['data', 'template', 'model_cards']\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " Model Card\n", + "\n", + "\n", + "\n", + "

\n", + " Model Card\n", + "

\n", + "
\n", + " \n", + "
\n", + "

Model Details

\n", + " \n", + " \n", + " \n", + "

Owners

\n", + " \n", + " Model Cards Team, model-cards@google.com\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + " \n", + "
\n", + "

Considerations

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

Ethical Considerations

\n", + "
    \n", + "
  • \n", + "
    Risk: We risk expressing the viewpoint that the attributes in this dataset are the only ones that are predictive of someone’s income, even though we know this is not the case.
    \n", + "
    Mitigation Strategy: As mentioned, some interventions may need to be performed to address the class imbalances in the dataset.
    \n", + "
\n", + "
\n", + " \n", + "
\n", + " \n", + " \n", + "
\n", + "

Datasets

\n", + " \n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "
\n", + " counts | Age\n", + "
\n", + " \n", + "
\n", + " counts | Capital-Gain\n", + "
\n", + " \n", + "
\n", + " counts | Capital-Loss\n", + "
\n", + " \n", + "
\n", + " counts | Country\n", + "
\n", + " \n", + "
\n", + " counts | Education\n", + "
\n", + " \n", + "
\n", + " counts | Education-Num\n", + "
\n", + " \n", + "
\n", + " counts | Hours-per-week\n", + "
\n", + " \n", + "
\n", + " counts | Marital-Status\n", + "
\n", + " \n", + "
\n", + " counts | Occupation\n", + "
\n", + " \n", + "
\n", + " counts | Over-50K\n", + "
\n", + " \n", + "
\n", + " counts | Race\n", + "
\n", + " \n", + "
\n", + " counts | Relationship\n", + "
\n", + " \n", + "
\n", + " counts | Sex\n", + "
\n", + " \n", + "
\n", + " counts | Workclass\n", + "
\n", + " \n", + "
\n", + " counts | fnlwgt\n", + "
\n", + " \n", + "
\n", + "\n", + " \n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "
\n", + " counts | Age\n", + "
\n", + " \n", + "
\n", + " counts | Capital-Gain\n", + "
\n", + " \n", + "
\n", + " counts | Capital-Loss\n", + "
\n", + " \n", + "
\n", + " counts | Country\n", + "
\n", + " \n", + "
\n", + " counts | Education\n", + "
\n", + " \n", + "
\n", + " counts | Education-Num\n", + "
\n", + " \n", + "
\n", + " counts | Hours-per-week\n", + "
\n", + " \n", + "
\n", + " counts | Marital-Status\n", + "
\n", + " \n", + "
\n", + " counts | Occupation\n", + "
\n", + " \n", + "
\n", + " counts | Over-50K\n", + "
\n", + " \n", + "
\n", + " counts | Race\n", + "
\n", + " \n", + "
\n", + " counts | Relationship\n", + "
\n", + " \n", + "
\n", + " counts | Sex\n", + "
\n", + " \n", + "
\n", + " counts | Workclass\n", + "
\n", + " \n", + "
\n", + " counts | fnlwgt\n", + "
\n", + " \n", + "
\n", + "\n", + " \n", + "
\n", + "
\n", + " \n", + "
\n", + "\n", + " \n", + " \n", + " \n", + "
\n", + "

Quantitative Analysis

\n", + " \n", + " \n", + "\n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "
Performance Metrics
NameValue
\n", + "binary_accuracy, Race_ Black_X_Sex_ Male\n", + "\n", + "0.9342105263157895\n", + "
\n", + "loss, Race_ Black_X_Sex_ Male\n", + "\n", + "0.3001808524131775\n", + "
\n", + "example_count, Race_ Black_X_Sex_ Male\n", + "\n", + "152.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "0.5263157894736842\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "0.47368421052631576\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "0.05921052631578947\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "0.9407894736842105\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "0.06993006993006994\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Black_X_Sex_ Male\n", + "\n", + "0.47368421052631576\n", + "
\n", + "binary_accuracy\n", + "\n", + "0.8022353306426575\n", + "
\n", + "loss\n", + "\n", + "0.4450683295726776\n", + "
\n", + "example_count\n", + "\n", + "3221.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5\n", + "\n", + "0.01557377049180328\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5\n", + "\n", + "0.7669654289372599\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5\n", + "\n", + "0.2330345710627401\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5\n", + "\n", + "0.9844262295081967\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5\n", + "\n", + "0.06830176963675877\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5\n", + "\n", + "0.9316982303632412\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5\n", + "\n", + "0.17272727272727273\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5\n", + "\n", + "0.1996001332889037\n", + "
\n", + "fairness_indicators_metrics/precision@0.5\n", + "\n", + "0.8272727272727273\n", + "
\n", + "fairness_indicators_metrics/recall@0.5\n", + "\n", + "0.2330345710627401\n", + "
\n", + "binary_accuracy, Race_ Asian-Pac-Islander\n", + "\n", + "0.7326732673267327\n", + "
\n", + "loss, Race_ Asian-Pac-Islander\n", + "\n", + "0.5496888756752014\n", + "
\n", + "example_count, Race_ Asian-Pac-Islander\n", + "\n", + "101.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "0.84375\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "0.15625\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "0.04950495049504951\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "0.9504950495049505\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "0.28125\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Asian-Pac-Islander\n", + "\n", + "0.15625\n", + "
\n", + "binary_accuracy, Sex_ Female\n", + "\n", + "0.9152073732718894\n", + "
\n", + "loss, Sex_ Female\n", + "\n", + "0.28882336616516113\n", + "
\n", + "example_count, Sex_ Female\n", + "\n", + "1085.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5, Sex_ Female\n", + "\n", + "0.006179196704428424\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Sex_ Female\n", + "\n", + "0.7543859649122807\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Sex_ Female\n", + "\n", + "0.24561403508771928\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Sex_ Female\n", + "\n", + "0.9938208032955715\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Sex_ Female\n", + "\n", + "0.03133640552995392\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Sex_ Female\n", + "\n", + "0.9686635944700461\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Sex_ Female\n", + "\n", + "0.17647058823529413\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Sex_ Female\n", + "\n", + "0.0818268315889629\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Sex_ Female\n", + "\n", + "0.8235294117647058\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Sex_ Female\n", + "\n", + "0.24561403508771928\n", + "
\n", + "binary_accuracy, Race_ White_X_Sex_ Female\n", + "\n", + "0.9032258064516129\n", + "
\n", + "loss, Race_ White_X_Sex_ Female\n", + "\n", + "0.3034161627292633\n", + "
\n", + "example_count, Race_ White_X_Sex_ Female\n", + "\n", + "868.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.005221932114882507\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.7843137254901961\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.21568627450980393\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.9947780678851175\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.029953917050691243\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.9700460829493087\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.15384615384615385\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.09501187648456057\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.8461538461538461\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ White_X_Sex_ Female\n", + "\n", + "0.21568627450980393\n", + "
\n", + "binary_accuracy, Race_ White\n", + "\n", + "0.7851690294438386\n", + "
\n", + "loss, Race_ White\n", + "\n", + "0.46579301357269287\n", + "
\n", + "example_count, Race_ White\n", + "\n", + "2751.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5, Race_ White\n", + "\n", + "0.017699115044247787\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ White\n", + "\n", + "0.7740585774058577\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ White\n", + "\n", + "0.22594142259414227\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ White\n", + "\n", + "0.9823008849557522\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ White\n", + "\n", + "0.07197382769901854\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ White\n", + "\n", + "0.9280261723009815\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ White\n", + "\n", + "0.18181818181818182\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ White\n", + "\n", + "0.21739130434782608\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ White\n", + "\n", + "0.8181818181818182\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ White\n", + "\n", + "0.22594142259414227\n", + "
\n", + "binary_accuracy, Sex_ Male\n", + "\n", + "0.7448501872659176\n", + "
\n", + "loss, Sex_ Male\n", + "\n", + "0.5244341492652893\n", + "
\n", + "example_count, Sex_ Male\n", + "\n", + "2136.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5, Sex_ Male\n", + "\n", + "0.021783526208304968\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Sex_ Male\n", + "\n", + "0.7691154422788605\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Sex_ Male\n", + "\n", + "0.23088455772113944\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Sex_ Male\n", + "\n", + "0.9782164737916951\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Sex_ Male\n", + "\n", + "0.08707865168539326\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Sex_ Male\n", + "\n", + "0.9129213483146067\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Sex_ Male\n", + "\n", + "0.17204301075268819\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Sex_ Male\n", + "\n", + "0.2630769230769231\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Sex_ Male\n", + "\n", + "0.8279569892473119\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Sex_ Male\n", + "\n", + "0.23088455772113944\n", + "
\n", + "binary_accuracy, Race_ White_X_Sex_ Male\n", + "\n", + "0.7307488050982475\n", + "
\n", + "loss, Race_ White_X_Sex_ Male\n", + "\n", + "0.5406433939933777\n", + "
\n", + "example_count, Race_ White_X_Sex_ Male\n", + "\n", + "1883.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.025236593059936908\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.7723577235772358\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.22764227642276422\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.9747634069400631\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.09134360063728093\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.908656399362719\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.18604651162790697\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.2776154295733489\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.813953488372093\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ White_X_Sex_ Male\n", + "\n", + "0.22764227642276422\n", + "
\n", + "binary_accuracy, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.8571428571428571\n", + "
\n", + "loss, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.4111871123313904\n", + "
\n", + "example_count, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "21.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.75\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.25\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.047619047619047616\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.9523809523809523\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.15\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", + "\n", + "0.25\n", + "
\n", + "binary_accuracy, Race_ Black_X_Sex_ Female\n", + "\n", + "0.9873417721518988\n", + "
\n", + "loss, Race_ Black_X_Sex_ Female\n", + "\n", + "0.19521600008010864\n", + "
\n", + "example_count, Race_ Black_X_Sex_ Female\n", + "\n", + "158.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "0.012987012987012988\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "0.987012987012987\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "0.0379746835443038\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "0.9620253164556962\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "0.3333333333333333\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "0.6666666666666666\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Black_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "binary_accuracy, Race_ Black\n", + "\n", + "0.9612903225806452\n", + "
\n", + "loss, Race_ Black\n", + "\n", + "0.24668261408805847\n", + "
\n", + "example_count, Race_ Black\n", + "\n", + "310.0\n", + "
\n", + "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Black\n", + "\n", + "0.006968641114982578\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Black\n", + "\n", + "0.43478260869565216\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Black\n", + "\n", + "0.5652173913043478\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Black\n", + "\n", + "0.9930313588850174\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Black\n", + "\n", + "0.04838709677419355\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Black\n", + "\n", + "0.9516129032258065\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Black\n", + "\n", + "0.13333333333333333\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Black\n", + "\n", + "0.03389830508474576\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Black\n", + "\n", + "0.8666666666666667\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Black\n", + "\n", + "0.5652173913043478\n", + "
\n", + "binary_accuracy, Race_ Other\n", + "\n", + "0.9166666666666666\n", + "
\n", + "loss, Race_ Other\n", + "\n", + "0.2953522801399231\n", + "
\n", + "example_count, Race_ Other\n", + "\n", + "24.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Other\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Other\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Other\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Other\n", + "\n", + "NaN\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Other\n", + "\n", + "0.08333333333333333\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Other\n", + "\n", + "NaN\n", + "
\n", + "binary_accuracy, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.6567164179104478\n", + "
\n", + "loss, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.6346439719200134\n", + "
\n", + "example_count, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "67.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.8518518518518519\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.14814814814814814\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.05970149253731343\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.9402985074626866\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.36507936507936506\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", + "\n", + "0.14814814814814814\n", + "
\n", + "binary_accuracy, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.8823529411764706\n", + "
\n", + "loss, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.3822771906852722\n", + "
\n", + "example_count, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "34.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.8\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.2\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.029411764705882353\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.9705882352941176\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.12121212121212122\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", + "\n", + "0.2\n", + "
\n", + "binary_accuracy, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.8571428571428571\n", + "
\n", + "loss, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.31817975640296936\n", + "
\n", + "example_count, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "14.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.6666666666666666\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.3333333333333333\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.07142857142857142\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.9285714285714286\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.15384615384615385\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", + "\n", + "0.3333333333333333\n", + "
\n", + "binary_accuracy, Race_ Other_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "loss, Race_ Other_X_Sex_ Female\n", + "\n", + "0.15564249455928802\n", + "
\n", + "example_count, Race_ Other_X_Sex_ Female\n", + "\n", + "11.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Other_X_Sex_ Female\n", + "\n", + "NaN\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Other_X_Sex_ Female\n", + "\n", + "NaN\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Other_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Other_X_Sex_ Female\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Other_X_Sex_ Female\n", + "\n", + "NaN\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Other_X_Sex_ Female\n", + "\n", + "NaN\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Other_X_Sex_ Female\n", + "\n", + "NaN\n", + "
\n", + "binary_accuracy, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.8571428571428571\n", + "
\n", + "loss, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.3739841878414154\n", + "
\n", + "example_count, Race_ Amer-Indian-Eskimo\n", + "\n", + "35.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.7142857142857143\n", + "
\n", + "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.2857142857142857\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/positive_rate@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.05714285714285714\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.9428571428571428\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.15151515151515152\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/recall@0.5, Race_ Amer-Indian-Eskimo\n", + "\n", + "0.2857142857142857\n", + "
\n", + "binary_accuracy, Race_ Other_X_Sex_ Male\n", + "\n", + "0.8461538461538461\n", + "
\n", + "loss, Race_ Other_X_Sex_ Male\n", + "\n", + "0.41356828808784485\n", + "
\n", + "example_count, Race_ Other_X_Sex_ Male\n", + "\n", + "13.0\n", + "
\n", + "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Other_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Other_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/negative_rate@0.5, Race_ Other_X_Sex_ Male\n", + "\n", + "1.0\n", + "
\n", + "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Other_X_Sex_ Male\n", + "\n", + "NaN\n", + "
\n", + "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Other_X_Sex_ Male\n", + "\n", + "0.15384615384615385\n", + "
\n", + "fairness_indicators_metrics/precision@0.5, Race_ Other_X_Sex_ Male\n", + "\n", + "NaN\n", + "
\n", + "\n", + " \n", + " \n", + " \n", + "
\n", + "
\n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "
\n", + " fairness_indicators_metrics/negative_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/negative_rate@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/negative_rate@0.5 | Race, Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/positive_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/positive_rate@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/positive_rate@0.5 | Race, Sex\n", + "
\n", + " \n", + "
\n", + " loss | Sex\n", + "
\n", + " \n", + "
\n", + " loss | Race\n", + "
\n", + " \n", + "
\n", + " loss | Race, Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/true_positive_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/true_positive_rate@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " binary_accuracy | Sex\n", + "
\n", + " \n", + "
\n", + " binary_accuracy | Race\n", + "
\n", + " \n", + "
\n", + " binary_accuracy | Race, Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/true_negative_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/true_negative_rate@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/true_negative_rate@0.5 | Race, Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/precision@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/recall@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/recall@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_discovery_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " example_count | Sex\n", + "
\n", + " \n", + "
\n", + " example_count | Race\n", + "
\n", + " \n", + "
\n", + " example_count | Race, Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_positive_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_positive_rate@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_positive_rate@0.5 | Race, Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_negative_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_negative_rate@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_omission_rate@0.5 | Sex\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_omission_rate@0.5 | Race\n", + "
\n", + " \n", + "
\n", + " fairness_indicators_metrics/false_omission_rate@0.5 | Race, Sex\n", + "
\n", + " \n", + "
\n", + "\n", + "
\n", + "
\n", + "\n", + " \n", + "
\n", + "\n", + " \n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from IPython import display\n", + "\n", + "mct_artifact = mct_gen.outputs['model_card'].get()[0]\n", + "mct_uri = mct_artifact.uri\n", + "\n", + "print(os.listdir(mct_uri))\n", + "\n", + "mct_path = os.path.join(mct_uri, 'model_cards', 'model_card.html')\n", + "with open(mct_path) as f:\n", + " mct_content = f.read()\n", + "\n", + "\n", + "display.display(display.HTML(mct_content))" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "MLMD Model Card Toolkit Demo.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/proposals/20230209-copy_example_gen.md b/proposals/20230209-copy_example_gen.md new file mode 100644 index 00000000..b1c75f8e --- /dev/null +++ b/proposals/20230209-copy_example_gen.md @@ -0,0 +1,90 @@ +#### SIG TFX-Addons +# Project Proposal for CopyExampleGen component + +**Your name:** Alexander Ho + +**Your email:** alexanderho@google.com + +**Your company/organization:** Google + +**Project name:** CopyExampleGen component + +## Project Description +CopyExampleGen will allow the user to copy pre-existing tfrecords and ingest it into the pipeline as examples, ultimately skipping the process of shuffling and running the Beam job that is in the standard component, ExampleGen. This process will require a dict input with split names as keys and their respective URIs as the value from the user. Following suit, the component will set the artifact’s properties, generate output dict, and register contexts and execution. Lastly, it will output an Examples Artifact in which downstream components can use. + +Example of pipeline component definition: +```python + copy_example_gen = component.CopyExampleGen( + splits_dict = tfrecords_dict + ) +``` + +## Project Category +Addon Component + +## Project Use-Case(s) +CopyExampleGen will replace ExampleGen when tfrecords and split names are already in the possession of the user. Hence, a Beam job will not be run nor will the tfrecords be shuffled and/ or randomized saving data ingestion pipeline process time. + +Currently, ingesting data with the ExampleGen component does not provide a way to split without random data shuffling and always runs a beam job. This component will save significant time (hours for large amounts of data) per pipeline run when a pipeline run does not require data to be shuffled. Some challenges users have had: + + 1. “Reshuffle doesn't work well with DirectRunner and causes OOMing. Users have been patching out shuffling in every release and doing it in the DB query. They have given up on Beam based ExampleGen and have created an entire custom ExampleGen that reads from the database and doesn’t use Beam”. + + 2. “When the use case is a time series problem using sliding windows, shuffling before splitting in train and eval set is counterproductive as the user would need a coherent training set”. + + +## Project Implementation +### Component + +CopyExampleGenSpec Class: + Add parameters to following sections in CopyExampleGenSpec(types.ComponentSpec) class: + +- `PARAMETERS`: `’tfrecords_dict’: ‘ExecutionParameter(type=dict)’`. The user input dict will follow a pattern like {‘Split-name’: ‘uri_to_tfrecords_folder’} i.e. (see question #2): +```python + { + ‘train’: ‘./uri/path/to/Split_train/’, + ‘eval’: ‘./uri/path/to/Split_eval/’ + } +``` + + - `INPUTS`: will be empty since user will only have a dict + + - `OUTPUTS`: `’output_data’: ‘ChannelParameter(type=standard_artifacts.Examples)’` + +CopyExampleGen Class: + `output_data` will contain a list of Channels for each split of the data. The splits in `output_data` will be derived from the keys in the ‘tfrecords_dict’. + + + +### Executor + +#### Part 1 + + Using the keys and values from `tfrecords_dict`: + 1. function `parse_tfrecords_dict(tfrecords_dict)`: determine the source (and possibly destination–see question #2) for the files in each split, building exact URIs as necessary. + 2. function `split_names(tfrecords_dict)`: parse the input into the list of split names that will become `split` properties of the output Examples artifact. Example: `[“train”,”eval”]` + + Depending on when the file copying happens (see question #1), possibly copy the files at this point. + + +#### Part 2 + + Transform the result of `parse_tfrecords_dict` we created above into an Examples Artifact. Importer Node has the functionality and process we are trying to recreate in this CopyExampleGen because it registers an external resource into MLMD and outputs the user defined Artifact type. + + This step can possibly use the [importer.generate_output_dict](https://github.com/tensorflow/tfx/blob/f8ce19339568ae58519d4eecfdd73078f80f84a2/tfx/dsl/components/common/importer.py#L153) function: + Create standard ‘output_dict’ variable. The value will be created by calling the worker function. If file copying is done before this step, this method can probably be used as is to register the artifact. + +## Open Implementation Questions + 1. There's a few open questions about how the file copying should actually done. Where does the copying that importer does actually happen? And what's the best way to change that? Are there other ways in TFX to do copying in a robust way? Maybe something in tfx.io? If there's an existing method, what has to happen in the `parse_tfrecords_dict`. Depending on the copying capabilities available, will there be a need to detect the execution environment? Does TFX rely on other tools to execute a copy that handle this? Is detection of the execution environment and the copying itself separate? What could be reused? + + - If it's not easy to detect the execution environment without also performing a copy, will the user have to specify the execution environment and therefore how to do the copy (e.g., local copy, GCS, S3). And then what's the best way to handle that? + + 2. Should the dictionary of file inputs take a path to a folder? Globs? Lists of individual files? + 3. Assuming file copying is done entirely separately, [importer.generate_output_dict](https://github.com/tensorflow/tfx/blob/f8ce19339568ae58519d4eecfdd73078f80f84a2/tfx/dsl/components/common/importer.py#L153) be used as is to register the artifacts, or does some separate code using [MLMD](https://www.tensorflow.org/tfx/guide/mlmd) in a different way need to be written + +## Project Dependencies +Possibly libraries that directly access blob storage platforms, e.g. google-cloud-storage. + + +## Project Team +Alex Ho, alexanderho@google.com, @alxndrnh + diff --git a/proposals/20230328-airflow_orchestration.md b/proposals/20230328-airflow_orchestration.md new file mode 100644 index 00000000..b5db8298 --- /dev/null +++ b/proposals/20230328-airflow_orchestration.md @@ -0,0 +1,54 @@ +#### SIG TFX-Addons +# Project Proposal + +**Your name:** Woosung Song + +**Your email:** wssong@google.com + +**Your company/organization:** Google + +**Project name:** Apache Airflow for Pipeline Orchestration + +## Project Description +Apache Airflow for pipeline orchestration is going to be migrated from the +official TFX to Addons. + +## Project Category +Other (Orchestration) + +## Project Use-Case(s) +In order to simplify core TFX for users who are not using Airflow, we would like +to separate out support for the Airflow orchestrator into a pluggable module and +make it available through TFX-Addons. This will help simplify the core TFX +install, dependencies, and tests, and decrease the size of the installed +payload. + +The functionality of the orchestrator will be retained, but users will need to +update the import paths. To make the transition smoother, it will coexist on +both the official TFX and Addons for a while, and the official one will be +deprecated from the 1.14.0 release. + +## Project Implementation +The basic implementation and API signatures will follow the original methods, +but the internal dependencies and testing will be reimplemented. + +The import path will be moved from `tfx.orchestration.airflow` to +`tfx_addons.airflow_orchestration`. + +```python +from tfx_addons.airflow_orchestration import airflow_dag_runner + +def _create_pipeline(): + ... + return [example_gen, statistics_gen, trainer, evaluator, pusher] + +runner = airflow_dag_runner.AirflowDagRunner(_airflow_dag_config) +result = runner.run(_create_pipeline()) +``` + +## Project Dependencies +It introduces `apache-airflow[mysql]>=1.10.14,<3` as the dependencies. + +## Project Team +**Project Leader** : Woosung Song, lego0901, wssong@google.com +1. Woosung Song, wssong@google.com, @wssong diff --git a/tfx_addons/apache_airflow/README.md b/tfx_addons/apache_airflow/README.md new file mode 100644 index 00000000..9878b09d --- /dev/null +++ b/tfx_addons/apache_airflow/README.md @@ -0,0 +1,3 @@ +# Apache Airflow Orchestrator + +(Please fill in a description of the project, usage instructions, etc.) diff --git a/tfx_addons/copy_example_gen/README.md b/tfx_addons/copy_example_gen/README.md new file mode 100644 index 00000000..01efebbf --- /dev/null +++ b/tfx_addons/copy_example_gen/README.md @@ -0,0 +1,40 @@ +#### SIG TFX-Addons +# Project Proposal for CopyExampleGen component + +**Your name:** Alexander Ho + +**Your email:** alexanderho@google.com + +**Your company/organization:** Google + +**Project name:** CopyExampleGen component + +## Project Description +CopyExampleGen will allow the user to copy a pre-existing Tfrecord dataset or raw data and ingest it into the pipeline, ultimately skipping the process of shuffling and running the Beam job. This process will require a dict input with split_names and their respective URI. This will output an Examples Artifact (same as the Artifact output from the ExampleGen component) in which downstream components can use. + +## Project Category +Component + +## Project Use-Case(s) +CopyExampleGen will allow the user to add a dict input with split_names as the key and their respective pre-existing Tfrecords URIs as their value, then format the director structure so that it matches that of an Example Artifact. + +Currently, ingesting data with the ExampleGen requires a Beam job to be ran and requires the data to be shuffled. This component will save users hours/ days of having to create a workaround fully custom ExampleGen component. Some challenges our users have had: +Reshuffle doesn't work well with DirectRunner and causes OOMing. Users have been patching out shuffling in every release and doing it in the DB query. They have given up on Beam based ExampleGen and have created an entire custom ExampleGen that reads from the database and doesn’t use Beam. Link. +When the use case is a time series problem using sliding windows, shuffling before splitting in train and eval set is counterproductive as the user would need a coherent training set. Link. +Almost impossible to use ExampleGen based components for large datasets. Without it, Beam knows how to write to disk after transforming from input format to output format, allowing it to transform (slowly) large datasets that would otherwise not fit into memory. Link. + +## Project Implementation +Use case #1 - Tfrecords as input URIs: +This component will: +1. Accept a dict i.e. {'split_name1': './path/to/split_name1/tfrecord1', 'split_name2': './path/to/split_name2/tfrecord2'} +2. Retrieve the tfrecords +3. Create an Examples Artifact, following Examples directory structure and properties required for an Examples Artifact +4. Register the Examples Artifact into MLMD +5. Output as 'examples' to be ingested from downstream components + + +## Project Dependencies +Using: Python 3.8.2, Tensorflow 2.11.0, TFX 1.12.0 + +## Project Team +Alex Ho, alexanderho@google.com, @alxndrnh diff --git a/tfx_addons/model_card_generator/README.md b/tfx_addons/model_card_generator/README.md new file mode 100644 index 00000000..3e58e8d4 --- /dev/null +++ b/tfx_addons/model_card_generator/README.md @@ -0,0 +1,74 @@ +# TFX Model Card Generator + +Idea from [#12](https://github.com/tensorflow/tfx-addons/issues/82) + +**Status**: Active + +Created by @shuklak13 + +The ModelCardGenerator TFX pipeline component generates model cards. + +For the detailed model card format, see the +[Model Card API](https://www.tensorflow.org/responsible_ai/model_card_toolkit/api_docs/python/model_card_toolkit/ModelCard). + +For more general information about TFX, please see the +[TFX User Guide](https://www.tensorflow.org/tfx/guide). + +## Configuring the ModelCardGenerator Component + +The ModelCardGenerator takes +[dataset statistics](https://www.tensorflow.org/tfx/guide/statsgen), +[model evaluation](https://www.tensorflow.org/tfx/guide/evaluator), and a +[pushed model](https://www.tensorflow.org/tfx/guide/pusher) to automatically +populate parts of a model card. + +[Model card fields](https://www.tensorflow.org/responsible_ai/model_card_toolkit/api_docs/python/model_card_toolkit/ModelCard) +can also be explicitly populated with a JSON string (this can be generated using +the [`json`](https://docs.python.org/3/library/json.html) module, see Example +below). If a field is populated both by TFX and JSON, the JSON value will +overwrite the TFX value. + +The ModelCardGenerator writes model card documents to the `model_card/` +directory of its artifact output. It uses a default HTML model card template, +which is used to generate `model_card.html`. Custom +[templates](https://www.tensorflow.org/responsible_ai/model_card_toolkit/guide/templates) +can also be used; each template input must be accompanied by a file name output +in the `template_io` arg. + +### Example + +```py +import json + +from tfx_addons.model_card_generator.component import ModelCardGenerator + +... +model_card_fields = { + 'model_details': { + 'name': 'my_model', + 'owners': 'Google', + 'version': 'v0.1' + }, + 'considerations': { + 'limitations': 'This is a demo model.' + } +} +mc_gen = ModelCardGenerator( + statistics=statistics_gen.outputs['statistics'], + evaluation=evaluator.outputs['evaluation'], + pushed_model=pusher.outputs['pushed_model'], + json=json.dumps(model_card_fields), + template_io=[ + ('html/default_template.html.jinja', 'model_card.html'), + ('md/default_template.md.jinja', 'model_card.md') + ] +) +``` + +More details are available in the +[ModelCardGenerator](https://www.tensorflow.org/responsible_ai/model_card_toolkit/api_docs/python/model_card_toolkit/ModelCardGenerator) +API reference. + +See our +[end-to-end demo](https://www.tensorflow.org/responsible_ai/model_card_toolkit/examples/MLMD_Model_Card_Toolkit_Demo) +for a full working example. diff --git a/tfx_addons/model_card_generator/RELEASE.md b/tfx_addons/model_card_generator/RELEASE.md new file mode 100644 index 00000000..59c9412a --- /dev/null +++ b/tfx_addons/model_card_generator/RELEASE.md @@ -0,0 +1,23 @@ +# Current Version + +### Last Update: 3 April 2023 + +## Major Features and Improvements + +* Model Card Generator for TFX pipelines using the `model-card-toolkit` + +## Breaking Changes + +* None at this time + +## Deprecations + +* None at this time + +## Bug Fixes and Other Changes + +* None at this time + +## Documentation Updates + +* None at this time diff --git a/tfx_addons/model_card_generator/__init__.py b/tfx_addons/model_card_generator/__init__.py new file mode 100644 index 00000000..da00b90e --- /dev/null +++ b/tfx_addons/model_card_generator/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model Card Generator""" diff --git a/tfx_addons/model_card_generator/artifact.py b/tfx_addons/model_card_generator/artifact.py new file mode 100644 index 00000000..0dbdccca --- /dev/null +++ b/tfx_addons/model_card_generator/artifact.py @@ -0,0 +1,75 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The ModelCard TFX/MLMD artifact.""" + +import datetime + +import ml_metadata as mlmd +from absl import logging +from ml_metadata import errors +from ml_metadata.proto import metadata_store_pb2 +from tfx.types.artifact import Artifact +from tfx.types.system_artifacts import Metrics + + +class ModelCard(Artifact): + """A [TFX/MLMD artifact](https://www.tensorflow.org/tfx/guide/mlmd#data_model) to model card assets. + + Assets include: + * a data file containing the model card fields, located at + `/data/model_card.proto`. + * the model card itself, located at the `/model_card/ directory`. + """ + TYPE_NAME = 'ModelCard' + TYPE_ANNOTATION = Metrics + + +def create_and_save_artifact( + artifact_name: str, artifact_uri: str, + store: mlmd.MetadataStore) -> metadata_store_pb2.Artifact: + """Generates and saves a ModelCard artifact to the specified MetadataStore. + + Args: + artifact_name: The name for the ModelCard artifact. A timestamp will be + appended to this to distinguish model cards created from the same job. + artifact_uri: The uri for the ModelCard artifact. + store: The MetadataStore where the ModelCard artifact and artifact type are + saved. + + Returns: + The saved artifact, which can be used to store model card assets. + """ + + try: + type_id = store.get_artifact_type(ModelCard.TYPE_NAME).id + except errors.NotFoundError: + type_id = store.put_artifact_type( + metadata_store_pb2.ArtifactType(name=ModelCard.TYPE_NAME)) + name = ''.join( + [artifact_name, '_', + datetime.datetime.now().strftime('%H:%M:%S')]) + + # Save artifact to store. Also populates the artifact's id. + artifact_id = store.put_artifacts([ + metadata_store_pb2.Artifact(type=ModelCard.TYPE_NAME, + type_id=type_id, + uri=artifact_uri, + name=name) + ])[0] + artifact = store.get_artifacts_by_id([artifact_id])[0] + logging.info( + 'Successfully saved ModelCard artifact %s with uri=%s and id=%s.', + artifact.name, artifact.uri, artifact.id) + return artifact diff --git a/tfx_addons/model_card_generator/artifact_test.py b/tfx_addons/model_card_generator/artifact_test.py new file mode 100644 index 00000000..5ada6984 --- /dev/null +++ b/tfx_addons/model_card_generator/artifact_test.py @@ -0,0 +1,51 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for artifact.""" + +import ml_metadata as mlmd +from absl.testing import absltest +from ml_metadata.proto import metadata_store_pb2 + +from tfx_addons.model_card_generator import artifact + + +class ArtifactTest(absltest.TestCase): + def setUp(self): + super(ArtifactTest, self).setUp() + connection_config = metadata_store_pb2.ConnectionConfig() + connection_config.fake_database.SetInParent() + self.store = mlmd.MetadataStore(connection_config) + + def test_create_and_save_artifact(self): + mc_artifact = artifact.create_and_save_artifact( + artifact_name='my model', + artifact_uri='/path/to/model/card/assets', + store=self.store) + + with self.subTest('saved_to_mlmd'): + self.assertCountEqual([mc_artifact], + self.store.get_artifacts_by_id([mc_artifact.id])) + with self.subTest('properties'): + with self.subTest('type_id'): + self.assertEqual(mc_artifact.type_id, + self.store.get_artifact_type('ModelCard').id) + with self.subTest('uri'): + self.assertEqual(mc_artifact.uri, '/path/to/model/card/assets') + with self.subTest('name'): + self.assertStartsWith(mc_artifact.name, 'my model_') + + +if __name__ == '__main__': + absltest.main() diff --git a/tfx_addons/model_card_generator/component.py b/tfx_addons/model_card_generator/component.py new file mode 100644 index 00000000..24e917ab --- /dev/null +++ b/tfx_addons/model_card_generator/component.py @@ -0,0 +1,147 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model Card TFX Component. + +The ModelCardGenerator is used to generate model cards in a TFX pipeline. +""" + +from typing import Any, List, Optional, Tuple + +from tfx import types +from tfx.dsl.components.base import executor_spec +from tfx.dsl.components.base.base_component import BaseComponent +from tfx.types import (component_spec, standard_artifacts, + standard_component_specs) + +from tfx_addons.model_card_generator import artifact, executor + +MODEL_CARD_KEY = 'model_card' + + +class ModelCardGeneratorSpec(component_spec.ComponentSpec): + """Component spec for the ModelCardGenerator.""" + PARAMETERS = { + 'json': + component_spec.ExecutionParameter(type=str, optional=True), + # template_io's type is List[Tuple[str, str]], + # but we need List[Any] to pass ExecutionParameter.type_check(). + # See below link for details. + # https://github.com/tensorflow/tfx/blob/4ff5e97b09540ff8a858076a163ecdf209716324/tfx/types/component_spec.py#L308 + 'template_io': + component_spec.ExecutionParameter(type=List[Any], optional=True) + } + INPUTS = { + standard_component_specs.STATISTICS_KEY: + component_spec.ChannelParameter( + type=standard_artifacts.ExampleStatistics, optional=True), + standard_component_specs.EVALUATION_KEY: + component_spec.ChannelParameter(type=standard_artifacts.ModelEvaluation, + optional=True), + standard_component_specs.PUSHED_MODEL_KEY: + component_spec.ChannelParameter(type=standard_artifacts.PushedModel, + optional=True), + } + OUTPUTS = { + MODEL_CARD_KEY: component_spec.ChannelParameter(type=artifact.ModelCard), + } + + +class ModelCardGenerator(BaseComponent): + """A TFX component to generate a model card. + + The `ModelCardGenerator` is a [TFX + Component](https://www.tensorflow.org/tfx/guide/understanding_tfx_pipelines#component) + that generates model cards. + + The model cards are written to a `ModelCard` artifact that can be fetched + from the `outputs['model_card]'` property. + + Example: + + ```py + context = InteractiveContext() + ... + mc_gen = ModelCardGenerator( + statistics=statistics_gen.outputs['statistics'], + evaluation=evaluator.outputs['evaluation'], + pushed_model=pusher.outputs['pushed_model'], + json="{'model_details': {'name': 'my_model'}}", + template_io=[ + ('html/default_template.html.jinja', 'model_card.html'), + ('md/default_template.md.jinja', 'model_card.md') + ] + ) + context.run(mc_gen) + mc_artifact = mc_gen.outputs['model_card'].get()[0] + mc_path = os.path.join(mc_artifact.uri, 'model_card', 'model_card.html') + with open(mc_path) as f: + mc_content = f.readlines() + ``` + """ + + SPEC_CLASS = ModelCardGeneratorSpec + EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) + + def __init__(self, + evaluation: Optional[types.Channel] = None, + statistics: Optional[types.Channel] = None, + pushed_model: Optional[types.Channel] = None, + json: Optional[str] = None, + template_io: Optional[List[Tuple[str, str]]] = None): + """Generate a model card for a TFX pipeline. + + This executes a Model Card Toolkit workflow, producing a `ModelCard` + artifact. + + Model card generation is partially automated from TFX, using the + `ExampleStatistics`, `ModelEvaluation`, and `PushedModel` artifacts. Model + card fields may be manually populated using the `json` arg. See the Args + section for more details. + + To use custom model card templates, use the `template_io` arg. + `ModelCardGenerator` can generate multiple model cards per execution. + + Args: + evaluation: TFMA output from an + [Evaluator](https://www.tensorflow.org/tfx/guide/evaluator) component, + used to populate quantitative analysis fields in the model card. + statistics: TFDV output from a + [StatisticsGen](https://www.tensorflow.org/tfx/guide/statsgen) + component, used to populate dataset fields in the model card. + pushed_model: PushedModel output from a + [Pusher](https://www.tensorflow.org/tfx/guide/pusher) component, used to + populate model details in the the model card. + json: A JSON string containing `ModelCard` fields. This is particularly + useful for fields that cannot be auto-populated from earlier TFX + components. If a field is populated both by TFX and JSON, the JSON value + will overwrite the TFX value. Use the [Model Card JSON + schema](https://github.com/tensorflow/model-card-toolkit/blob/master/model_card_toolkit/schema/v0.0.2/model_card.schema.json). + template_io: A list of input/output pairs. The input is the path to a + [Jinja](https://jinja.palletsprojects.com/) template. Using data + extracted from TFX components and `json`, this template is populated and + saved as a model card. The output is a file name where the model card + will be written to in the `model_card/` directory. By default, + `ModelCardToolkit`'s default HTML template + (`default_template.html.jinja`) and file name (`model_card.html`) are + used. + """ + spec = ModelCardGeneratorSpec( + evaluation=evaluation, + statistics=statistics, + pushed_model=pushed_model, + model_card=types.Channel(type=artifact.ModelCard), + json=json, + template_io=template_io) + super(ModelCardGenerator, self).__init__(spec=spec) diff --git a/tfx_addons/model_card_generator/component_test.py b/tfx_addons/model_card_generator/component_test.py new file mode 100644 index 00000000..da3aeb34 --- /dev/null +++ b/tfx_addons/model_card_generator/component_test.py @@ -0,0 +1,73 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for model_card_toolkit.tfx.component.""" + +import json as json_lib + +from absl.testing import absltest +from tfx.types import channel_utils, standard_artifacts + +from tfx_addons.model_card_generator import artifact +from tfx_addons.model_card_generator.component import ModelCardGenerator + + +class ComponentTest(absltest.TestCase): + def test_component_construction(self): + this_component = ModelCardGenerator( + statistics=channel_utils.as_channel( + [standard_artifacts.ExampleStatistics()]), + evaluation=channel_utils.as_channel( + [standard_artifacts.ModelEvaluation()]), + pushed_model=channel_utils.as_channel( + [standard_artifacts.PushedModel()]), + json=json_lib.dumps( + {'model_details': { + 'name': 'my model', + 'version': { + 'name': 'v1' + } + }}), + template_io=[('path/to/html/template', 'mc.html'), + ('path/to/md/template', 'mc.md')]) + + with self.subTest('outputs'): + self.assertEqual(this_component.outputs['model_card'].type_name, + artifact.ModelCard.TYPE_NAME) + + with self.subTest('exec_properties'): + self.assertDictEqual( + { + 'json': + json_lib.dumps({ + 'model_details': { + 'name': 'my model', + 'version': { + 'name': 'v1' + } + } + }), + 'template_io': [('path/to/html/template', 'mc.html'), + ('path/to/md/template', 'mc.md')] + }, this_component.exec_properties) + + def test_empty_component_construction(self): + this_component = ModelCardGenerator() + with self.subTest('outputs'): + self.assertEqual(this_component.outputs['model_card'].type_name, + artifact.ModelCard.TYPE_NAME) + + +if __name__ == '__main__': + absltest.main() diff --git a/tfx_addons/model_card_generator/executor.py b/tfx_addons/model_card_generator/executor.py new file mode 100644 index 00000000..e946ee55 --- /dev/null +++ b/tfx_addons/model_card_generator/executor.py @@ -0,0 +1,119 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model Card TFX Component Executor. + +The ModelCard Executor handles the ModelCardToolkit workflow in the +ModelCardGenerator. +""" + +from typing import Any, Dict, List, Optional + +from model_card_toolkit import core +from model_card_toolkit.utils import source as src +from tfx import types +from tfx.dsl.components.base.base_executor import BaseExecutor +from tfx.types import artifact_utils, standard_component_specs + +_DEFAULT_MODEL_CARD_FILE_NAME = 'model_card.html' + + +class Executor(BaseExecutor): + """Executor for Model Card TFX component.""" + def _tfma_source( + self, + input_dict: Dict[str, List[types.Artifact]]) -> Optional[src.TfmaSource]: + """See base class.""" + if not input_dict.get(standard_component_specs.EVALUATION_KEY): + return None + else: + return src.TfmaSource(model_evaluation_artifacts=input_dict[ + standard_component_specs.EVALUATION_KEY]) + + def _tfdv_source( + self, + input_dict: Dict[str, List[types.Artifact]]) -> Optional[src.TfdvSource]: + """See base class.""" + if not input_dict.get(standard_component_specs.STATISTICS_KEY): + return None + else: + return src.TfdvSource(example_statistics_artifacts=input_dict[ + standard_component_specs.STATISTICS_KEY]) + + def _model_source( + self, + input_dict: Dict[str, + List[types.Artifact]]) -> Optional[src.ModelSource]: + """See base class.""" + if not input_dict.get(standard_component_specs.PUSHED_MODEL_KEY): + return None + else: + return src.ModelSource( + pushed_model_artifact=artifact_utils.get_single_instance(input_dict[ + standard_component_specs.PUSHED_MODEL_KEY])) + + def Do(self, input_dict: Dict[str, List[types.Artifact]], + output_dict: Dict[str, List[types.Artifact]], + exec_properties: Dict[str, Any]) -> None: + """Generate a model card for a TFX pipeline. + + This executes a Model Card Toolkit workflow, producing a `ModelCard` + artifact. + + Args: + input_dict: Input dict from key to a list of artifacts, including: + - evaluation: TFMA output from an + [Evaluator](https://www.tensorflow.org/tfx/guide/evaluator) component, + used to populate quantitative analysis fields in the model card. + - statistics: TFDV output from a + [StatisticsGen](https://www.tensorflow.org/tfx/guide/statsgen) + component, used to populate dataset fields in the model card. + - pushed_model: PushedModel output from a + [Pusher](https://www.tensorflow.org/tfx/guide/pusher) component, used + to populate model details in the the model card. + output_dict: Output dict from key to a list of artifacts, including: + - model_card: An artifact referencing the directory containing the Model + Card document, as well as the `ModelCard` used to construct the + document. + exec_properties: An optional dict of execution properties, including: + - json: A JSON string containing `ModelCard` fields. This is + particularly useful for fields that cannot be auto-populated from + earlier TFX components. If a field is populated both by TFX and JSON, + the JSON value will overwrite the TFX value. Use the [Model Card JSON + schema](https://github.com/tensorflow/model-card-toolkit/blob/master/model_card_toolkit/schema/v0.0.2/model_card.schema.json). + - template_io: A list of input/output pairs. The input is the path to a + [Jinja](https://jinja.palletsprojects.com/) template. Using data + extracted from TFX components and `json`, this template is populated + and saved as a model card. The output is a file name where the model + card will be written to in the `model_card/` directory. By default, + `ModelCardToolkit`'s default HTML template + (`default_template.html.jinja`) and file name (`model_card.html`) + are used. + """ + + # Initialize ModelCardToolkit + mct = core.ModelCardToolkit(source=src.Source( + tfma=self._tfma_source(input_dict), + tfdv=self._tfdv_source(input_dict), + model=self._model_source(input_dict)), + output_dir=artifact_utils.get_single_instance( + output_dict['model_card']).uri) + template_io = exec_properties.get('template_io') or [ + (mct.default_template, _DEFAULT_MODEL_CARD_FILE_NAME) + ] + + # Create model card assets from inputs + mct.scaffold_assets(json=exec_properties.get('json')) + for template_path, output_file in template_io: + mct.export_format(template_path=template_path, output_file=output_file) diff --git a/tfx_addons/model_card_generator/executor_test.py b/tfx_addons/model_card_generator/executor_test.py new file mode 100644 index 00000000..9f046cb8 --- /dev/null +++ b/tfx_addons/model_card_generator/executor_test.py @@ -0,0 +1,196 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for model_card_toolkit.tfx.executor.""" + +import os + +import tensorflow_model_analysis as tfma +from absl.testing import absltest, parameterized +from model_card_toolkit.proto import model_card_pb2 +from tfx.types import standard_artifacts, standard_component_specs + +from tfx_addons.model_card_generator import artifact as artifact_utils +from tfx_addons.model_card_generator import executor +from tfx_addons.model_card_generator.tfxtest import TfxTest + + +class ExecutorTest(parameterized.TestCase, TfxTest): + def setUp(self): + super(ExecutorTest, self).setUp() + self.mct_executor = executor.Executor() + mlmd_store = self._set_up_mlmd() + + # Write template + self.template_file = self.create_tempdir().create_file() + self.template_file.write_text('hello world') + + # Write TFMA metrics to store + tfma_path = os.path.join(self.tmpdir, 'tfma') + add_metrics_callbacks = [ + tfma.post_export_metrics.example_count(), + tfma.post_export_metrics.calibration_plot_and_prediction_histogram( + num_buckets=2) + ] + self._write_tfma(tfma_path, '', add_metrics_callbacks, mlmd_store) + + # Write TFDV statistics to store + tfdv_path = os.path.join(self.tmpdir, 'tfdv') + self.train_dataset_name = 'Dataset-Split-train' + self.train_features = ['feature_name1'] + self.eval_dataset_name = 'Dataset-Split-eval' + self.eval_features = ['feature_name2'] + self._write_tfdv(tfdv_path, self.train_dataset_name, self.train_features, + self.eval_dataset_name, self.eval_features, mlmd_store) + + self.eval_artifacts = mlmd_store.get_artifacts_by_type( + standard_artifacts.ModelEvaluation.TYPE_NAME) + self.example_stats_artifacts = mlmd_store.get_artifacts_by_type( + standard_artifacts.ExampleStatistics.TYPE_NAME) + + self.pushed_model_path = os.path.join(self.tmpdir, 'pushed_model') + self.pushed_model_artifact = standard_artifacts.PushedModel() + self.pushed_model_artifact.uri = self.pushed_model_path + + self.model_card_artifact = artifact_utils.create_and_save_artifact( + artifact_name=self.pushed_model_artifact.name + '_model_card', + artifact_uri=self.create_tempdir().full_path, + store=mlmd_store) + + @parameterized.named_parameters( + dict(testcase_name='fullInput', + eval_artifacts=True, + example_stats_artifacts=True, + pushed_model_artifact=True, + exec_props=True), + dict(testcase_name='emptyInput', + eval_artifacts=False, + example_stats_artifacts=False, + pushed_model_artifact=False, + exec_props=False), + dict(testcase_name='partialInput', + eval_artifacts=False, + example_stats_artifacts=True, + pushed_model_artifact=False, + exec_props=True)) + def test_do(self, eval_artifacts: bool, example_stats_artifacts: bool, + pushed_model_artifact: bool, exec_props: bool): + + input_dict = {} + if eval_artifacts: + input_dict[standard_component_specs.EVALUATION_KEY] = self.eval_artifacts + if example_stats_artifacts: + input_dict[standard_component_specs. + STATISTICS_KEY] = self.example_stats_artifacts + if pushed_model_artifact: + input_dict[standard_component_specs.PUSHED_MODEL_KEY] = [ + self.pushed_model_artifact + ] + + output_dict = {'model_card': [self.model_card_artifact]} + + exec_properties = {} + if exec_props: + exec_properties['json'] = { + 'model_details': { + 'name': 'json_test', + } + } + exec_properties['template_io'] = [(self.template_file.full_path, + 'my_cool_model_card.html')] + + # Call MCT Executor + self.mct_executor.Do(input_dict=input_dict, + output_dict=output_dict, + exec_properties=exec_properties) + + # Verify model card proto and document were generated + self.assertIn( + 'model_card.proto', + os.listdir(os.path.join(self.model_card_artifact.uri, 'data'))) + self.assertIn( + 'default_template.html.jinja', + os.listdir( + os.path.join(self.model_card_artifact.uri, 'template', 'html'))) + + model_card_proto = model_card_pb2.ModelCard() + with open( + os.path.join(self.model_card_artifact.uri, 'data', 'model_card.proto'), + 'rb') as f: + model_card_proto.ParseFromString(f.read()) + + with self.subTest(name='exec_props'): + model_card_dir = os.path.join(self.model_card_artifact.uri, + 'model_cards') + if exec_props: + self.assertEqual(model_card_proto.model_details.name, 'json_test') + model_card_file_name = 'my_cool_model_card.html' + else: + model_card_file_name = 'model_card.html' + self.assertIn(model_card_file_name, os.listdir(model_card_dir)) + model_card_filepath = os.path.join(model_card_dir, model_card_file_name) + with open(model_card_filepath) as f: + model_card_content = f.read() + if exec_props: + self.assertEqual(model_card_content, 'hello world') + else: + self.assertStartsWith(model_card_content, '') + + if eval_artifacts: + with self.subTest(name='eval_artifacts'): + self.assertCountEqual( + model_card_proto.quantitative_analysis.performance_metrics, [ + model_card_pb2.PerformanceMetric( + type='post_export_metrics/example_count', value='2.0'), + model_card_pb2.PerformanceMetric(type='average_loss', + value='0.5') + ]) + self.assertLen( + model_card_proto.quantitative_analysis.graphics.collection, 2) + + if example_stats_artifacts: + with self.subTest(name='example_stats_artifacts.data'): + self.assertLen(model_card_proto.model_parameters.data, + 2) # train and eval + for dataset in model_card_proto.model_parameters.data: + for graphic in dataset.graphics.collection: + self.assertIsNotNone( + graphic.image, + msg=f'No image found for graphic: {dataset.name} {graphic.name}' + ) + graphic.image = bytes( + ) # ignore graphic.image for below assertions + self.assertIn( + model_card_pb2.Dataset( + name=self.train_dataset_name, + graphics=model_card_pb2.GraphicsCollection(collection=[ + model_card_pb2.Graphic(name='counts | feature_name1', + image='') + ])), model_card_proto.model_parameters.data) + self.assertIn( + model_card_pb2.Dataset( + name=self.eval_dataset_name, + graphics=model_card_pb2.GraphicsCollection(collection=[ + model_card_pb2.Graphic(name='counts | feature_name2', + image='') + ])), model_card_proto.model_parameters.data) + + if pushed_model_artifact: + with self.subTest(name='pushed_model_artifact'): + self.assertEqual(model_card_proto.model_details.path, + self.pushed_model_path) + + +if __name__ == '__main__': + absltest.main() diff --git a/tfx_addons/model_card_generator/tfxtest.py b/tfx_addons/model_card_generator/tfxtest.py new file mode 100644 index 00000000..9376a705 --- /dev/null +++ b/tfx_addons/model_card_generator/tfxtest.py @@ -0,0 +1,187 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""A helper class for testing interop with TFX pipelines.""" + +import os +from typing import Any, Callable, List, Optional + +import apache_beam as beam +import ml_metadata as mlmd +import tensorflow_model_analysis as tfma +from absl import flags +from ml_metadata.proto import metadata_store_pb2 +from model_card_toolkit.utils.tfx_util import (_TFX_METRICS_TYPE, + _TFX_STATS_TYPE) +from tensorflow_metadata.proto.v0 import statistics_pb2 +from tensorflow_model_analysis.eval_saved_model.example_trainers import \ + fixed_prediction_estimator +from tfx_bsl.tfxio import raw_tf_record + + +class TfxTest(tfma.eval_saved_model.testutil.TensorflowModelAnalysisTest): + """A helper class for testing interop with TFX pipelines.""" + def setUp(self): + super().setUp() + self.tmp_db_path = os.path.join(self.create_tempdir(), 'test_mlmd.db') + self.tmpdir = self.create_tempdir() + + def _set_up_mlmd(self): + connection_config = metadata_store_pb2.ConnectionConfig() + connection_config.fake_database.SetInParent() + return mlmd.MetadataStore(connection_config) + + def _put_artifact(self, store: mlmd.MetadataStore, type_name: str, + uri: str) -> None: + type_id = store.put_artifact_type( + metadata_store_pb2.ArtifactType(name=type_name)) + store.put_artifacts( + [metadata_store_pb2.Artifact(uri=uri, type_id=type_id)]) + + def _write_tfma(self, + tfma_path: str, + output_file_format: str, + add_metrics_callbacks: List[Callable[..., Any]], + store: Optional[mlmd.MetadataStore] = None) -> None: + """Runs a sample TFMA job and stores output. + + This uses a trivial inputs (two examples, with prediction/label = 0/1 and + 1/1). and writes metrics and plots to the specified path. + + Args: + tfma_path: The path to save the TFMA output to. + output_file_format: The format to save TFMA output to. + See [TFMA API Docs](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/writers/MetricsPlotsAndValidationsWriter) # pylint: disable=line-too-long + for the most up-to-date reference. If the empty string, 'tfrecord' + will be used. + add_metrics_callbacks: TFMA metric callbacks to compute. + See [TFMA API Docs](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/post_export_metrics) # pylint: disable=line-too-long + for examples. + store: The MLMD store to save the TFMA output artifact. + """ + if not output_file_format: + output_file_format = 'tfrecord' + _, eval_saved_model_path = ( + fixed_prediction_estimator.simple_fixed_prediction_estimator( + export_path=None, + eval_export_path=os.path.join(self.tmpdir, 'eval_export_dir'))) + eval_config = tfma.EvalConfig(model_specs=[tfma.ModelSpec()]) + eval_shared_model = self.createTestEvalSharedModel( + eval_saved_model_path=eval_saved_model_path, + add_metrics_callbacks=add_metrics_callbacks) + extractors = [ + tfma.extractors.legacy_predict_extractor.PredictExtractor( + eval_shared_model, eval_config=eval_config), + tfma.extractors.unbatch_extractor.UnbatchExtractor(), + tfma.extractors.slice_key_extractor.SliceKeyExtractor(), + ] + evaluators = [ + tfma.evaluators.legacy_metrics_and_plots_evaluator. + MetricsAndPlotsEvaluator(eval_shared_model) + ] + writers = [ + tfma.writers.MetricsPlotsAndValidationsWriter( + output_paths={ + 'metrics': os.path.join(tfma_path, 'metrics'), + 'plots': os.path.join(tfma_path, 'plots') + }, + output_file_format=output_file_format, + eval_config=eval_config, + add_metrics_callbacks=eval_shared_model.add_metrics_callbacks), + ] + + tfx_io = raw_tf_record.RawBeamRecordTFXIO( + physical_format='inmemory', + raw_record_column_name='__raw_record__', + telemetry_descriptors=['TFMATest']) + with beam.Pipeline() as pipeline: + example1 = self._makeExample(prediction=0.0, label=1.0) + example2 = self._makeExample(prediction=1.0, label=1.0) + _ = (pipeline + | 'Create' >> beam.Create([ + example1.SerializeToString(), + example2.SerializeToString(), + ]) + | 'BatchExamples' >> tfx_io.BeamSource() + | 'ExtractEvaluateAndWriteResults' >> + tfma.ExtractEvaluateAndWriteResults( + eval_config=eval_config, + eval_shared_model=eval_shared_model, + extractors=extractors, + evaluators=evaluators, + writers=writers)) + + if store: + self._put_artifact(store, _TFX_METRICS_TYPE, tfma_path) + + def _write_tfdv(self, + tfdv_path: str, + train_dataset_name: str, + train_features: List[str], + eval_dataset_name: str, + eval_features: List[str], + store: Optional[mlmd.MetadataStore] = None) -> None: + """Runs a sample TFDV job and stores output. + + For the training and evaluation datasets, for each feature, this creates a + trivial TFDV histogram with three buckets. It writes this output to the + specified path. + + Args: + tfdv_path: The path to save the TFDV output to. + train_dataset_name: The name to give the training dataset in the TFDV + analysis. + train_features: The names of the features in the training dataset. + eval_dataset_name: The name to give the evaluation dataset in the TFDV + analysis. + eval_features: The names of the features in the evaluation dataset. + store: The MLMD store to save the TFDV output artifact. + """ + def _write(dataset_name: str, features: List[str], split_name: str): + stats = statistics_pb2.DatasetFeatureStatistics() + stats.name = dataset_name + for feature in features: + stat_feature = stats.features.add() + stat_feature.name = feature + stat_feature.string_stats.rank_histogram.buckets.extend([ + statistics_pb2.RankHistogram.Bucket(low_rank=0, + high_rank=0, + label='a', + sample_count=4.0), + statistics_pb2.RankHistogram.Bucket(low_rank=1, + high_rank=1, + label='b', + sample_count=3.0), + statistics_pb2.RankHistogram.Bucket(low_rank=2, + high_rank=2, + label='c', + sample_count=2.0) + ]) + stats_list = statistics_pb2.DatasetFeatureStatisticsList( + datasets=[stats]) + stats_file = os.path.join(tfdv_path, split_name, 'FeatureStats.pb') + os.makedirs(os.path.dirname(stats_file), exist_ok=True) + with open(stats_file, mode='wb') as f: + f.write(stats_list.SerializeToString()) + + _write(train_dataset_name, train_features, 'Split-train') + _write(eval_dataset_name, eval_features, 'Split-eval') + + if store: + self._put_artifact(store, _TFX_STATS_TYPE, tfdv_path) + + +if __name__ != '__main__': + # Manually pass and parse flags to prevent UnparsedFlagAccessError when using + # pytest or unittest as a runner. + flags.FLAGS(['--test_tmpdir']) diff --git a/tfx_addons/predictions_to_bigquery/__init__.py b/tfx_addons/predictions_to_bigquery/__init__.py new file mode 100644 index 00000000..804c6abb --- /dev/null +++ b/tfx_addons/predictions_to_bigquery/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/tfx_addons/predictions_to_bigquery/component.py b/tfx_addons/predictions_to_bigquery/component.py new file mode 100644 index 00000000..c89c8ca9 --- /dev/null +++ b/tfx_addons/predictions_to_bigquery/component.py @@ -0,0 +1,128 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# This code was originally written by Hannes Hapke (Digits Financial Inc.) +# on Feb. 6, 2023. +""" +Digits Prediction-to-BigQuery: Functionality to write prediction results usually + from a BulkInferrer to BigQuery. +""" + +from typing import Optional + +from tfx import types +from tfx.dsl.components.base import base_component, executor_spec +from tfx.types import standard_artifacts +from tfx.types.component_spec import ChannelParameter, ExecutionParameter + +from .executor import Executor as AnnotateUnlabeledCategoryDataExecutor + +_MIN_THRESHOLD = 0.8 +_VOCAB_FILE = "vocab_label_txt" + +# pylint: disable=missing-class-docstring + + +class AnnotateUnlabeledCategoryDataComponentSpec(types.ComponentSpec): + + PARAMETERS = { + # These are parameters that will be passed in the call to + # create an instance of this component. + "vocab_label_file": ExecutionParameter(type=str), + "bq_table_name": ExecutionParameter(type=str), + "filter_threshold": ExecutionParameter(type=float), + "table_suffix": ExecutionParameter(type=str), + "table_partitioning": ExecutionParameter(type=bool), + "expiration_time_delta": ExecutionParameter(type=int), + } + INPUTS = { + # This will be a dictionary with input artifacts, including URIs + "transform_graph": + ChannelParameter(type=standard_artifacts.TransformGraph), + "inference_results": + ChannelParameter(type=standard_artifacts.InferenceResult), + "schema": + ChannelParameter(type=standard_artifacts.Schema), + } + OUTPUTS = { + "bigquery_export": ChannelParameter(type=standard_artifacts.String), + } + + +class AnnotateUnlabeledCategoryDataComponent(base_component.BaseComponent): + """ + AnnotateUnlabeledCategoryData Component. + + The component takes the following input artifacts: + * Inference results: InferenceResult + * Transform graph: TransformGraph + * Schema: Schema (optional) if not present, the component will determine + the schema (only predtion supported at the moment) + + The component takes the following parameters: + * vocab_label_file: str - The file name of the file containing the + vocabulary labels (produced by TFT). + * bq_table_name: str - The name of the BigQuery table to write the results + to. + * filter_threshold: float - The minimum probability threshold for a + prediction to be considered a positive, thrustworthy prediction. + Default is 0.8. + * table_suffix: str (optional) - If provided, the generated datetime string + will be added the BigQuery table name as suffix. The default is %Y%m%d. + * table_partitioning: bool - Whether to partition the table by DAY. If True, + the generated BigQuery table will be partition by date. If False, no + partitioning will be applied. Default is True. + * expiration_time_delta: int (optional) - The number of seconds after which + the table will expire. + + The component produces the following output artifacts: + * bigquery_export: String - The URI of the BigQuery table containing the + results. + """ + + SPEC_CLASS = AnnotateUnlabeledCategoryDataComponentSpec + EXECUTOR_SPEC = executor_spec.BeamExecutorSpec( + AnnotateUnlabeledCategoryDataExecutor) + + def __init__( + self, + inference_results: types.Channel = None, + transform_graph: types.Channel = None, + bq_table_name: str = None, + vocab_label_file: str = _VOCAB_FILE, + filter_threshold: float = _MIN_THRESHOLD, + table_suffix: str = "%Y%m%d", + table_partitioning: bool = True, + schema: Optional[types.Channel] = None, + expiration_time_delta: Optional[int] = 0, + bigquery_export: Optional[types.Channel] = None, + ): + + bigquery_export = bigquery_export or types.Channel( + type=standard_artifacts.String) + schema = schema or types.Channel(type=standard_artifacts.Schema()) + + spec = AnnotateUnlabeledCategoryDataComponentSpec( + inference_results=inference_results, + transform_graph=transform_graph, + schema=schema, + bq_table_name=bq_table_name, + vocab_label_file=vocab_label_file, + filter_threshold=filter_threshold, + table_suffix=table_suffix, + table_partitioning=table_partitioning, + expiration_time_delta=expiration_time_delta, + bigquery_export=bigquery_export, + ) + super().__init__(spec=spec) diff --git a/tfx_addons/predictions_to_bigquery/executor.py b/tfx_addons/predictions_to_bigquery/executor.py new file mode 100644 index 00000000..763e226f --- /dev/null +++ b/tfx_addons/predictions_to_bigquery/executor.py @@ -0,0 +1,283 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# This code was originally written by Hannes Hapke (Digits Financial Inc.) +# on Feb. 6, 2023. +"""Implements executor to write BulkInferrer prediction results to BigQuery.""" + +import datetime +import os +import re +from collections.abc import Mapping, Sequence +from typing import Any, List, Optional, Tuple, Union + +import apache_beam as beam +import numpy as np +import numpy.typing as npt +import tensorflow as tf +import tensorflow_transform as tft +from absl import logging +from tensorflow_serving.apis import prediction_log_pb2 +from tfx import types +from tfx.dsl.components.base import base_beam_executor +from tfx.types import artifact_utils + +# TODO(cezequiel): Move relevant functions in utils module here. +from tfx_addons.predictions_to_bigquery import utils + +_SCHEMA_FILE_NAME = "schema.pbtxt" +_DECIMAL_PLACES = 6 +_DEFAULT_TIMESTRING_FORMAT = '%Y%m%d_%H%M%S' +_REQUIRED_EXEC_PROPERTIES = ( + 'bq_table_name', + 'bq_dataset', + 'filter_threshold', + 'gcp_project', + 'gcs_temp_dir', + 'vocab_label_file', +) +_REGEX_CHARS_TO_REPLACE = re.compile(r'[^a-zA-Z0-9_]') + + +def _check_exec_properties(exec_properties: Mapping[str, Any]) -> None: + for key in _REQUIRED_EXEC_PROPERTIES: + if exec_properties[key] is None: + raise ValueError(f'{key} must be set in exec_properties') + + +def _get_labels(transform_output_uri: str, vocab_file: str) -> Sequence[str]: + tf_transform_output = tft.TFTransformOutput(transform_output_uri) + tft_vocab = tf_transform_output.vocabulary_by_name(vocab_filename=vocab_file) + return [label.decode() for label in tft_vocab] + + +def _get_bq_table_name( + basename: str, + timestamp: Optional[datetime.datetime] = None, + timestring_format: Optional[str] = None, +) -> str: + if timestamp is not None: + timestring_format = timestring_format or _DEFAULT_TIMESTRING_FORMAT + return basename + '_' + timestamp.strftime(timestring_format) + return basename + + +def _get_additional_bq_parameters( + expiration_days: Optional[int] = None, + table_partitioning: bool = False, +) -> Mapping[str, Any]: + output = {} + if table_partitioning: + time_partitioning = {'type': 'DAY'} + logging.info('BigQuery table time partitioning set to DAY') + if expiration_days: + expiration_time_delta = datetime.timedelta(days=expiration_days) + expiration_milliseconds = expiration_time_delta.total_seconds() * 1000 + logging.info( + f'BigQuery table partition expiration time set to {expiration_days}' + ' days') + time_partitioning['expirationMs'] = expiration_milliseconds + output['timePartitioning'] = time_partitioning + return output + + +def _get_features( + *, + schema_uri: Optional[str] = None, + prediction_log_path: Optional[str] = None, +) -> Mapping[str, Any]: + if schema_uri: + schema_file = os.path.join(schema_uri, _SCHEMA_FILE_NAME) + return utils.load_schema(schema_file) + + if not prediction_log_path: + raise ValueError('Specify one of `schema_uri` or `prediction_log_path`.') + + return utils.parse_schema(prediction_log_path) + + +def _get_bq_field_name_from_key(key: str) -> str: + field_name = _REGEX_CHARS_TO_REPLACE.sub('_', key) + return re.sub('_+', '_', field_name).strip('_') + + +def _features_to_bq_schema(features: Mapping[str, Any], + required: bool = False): + bq_schema_fields_ = utils.feature_to_bq_schema(features, required=required) + bq_schema_fields = [] + for field in bq_schema_fields_: + field['name'] = _get_bq_field_name_from_key(field['name']) + bq_schema_fields.append(field) + bq_schema_fields.extend( + utils.create_annotation_fields(label_field_name="category_label", + score_field_name="score", + required=required, + add_datetime_field=True)) + return {"fields": bq_schema_fields} + + +def _tensor_to_native_python_value( + tensor: Union[tf.Tensor, tf.sparse.SparseTensor] +) -> Optional[Union[int, float, str]]: + """Converts a TF Tensor to a native Python value.""" + if isinstance(tensor, tf.sparse.SparseTensor): + values = tensor.values.numpy() + else: + values = tensor.numpy() + if not values: + return None + values = np.squeeze(values) # Removes extra dimension, e.g. shape (n, 1). + values = values.item() # Converts to native Python type + if isinstance(values, Sequence) and isinstance(values[0], bytes): + return [v.decode('utf-8') for v in values] + if isinstance(values, bytes): + return values.decode('utf-8') + return values + + +@beam.typehints.with_input_types(str) +@beam.typehints.with_output_types(beam.typehints.Iterable[Tuple[str, str, + Any]]) +class FilterPredictionToDictFn(beam.DoFn): + """Converts a PredictionLog proto to a dict.""" + def __init__( + self, + labels: List, + features: Any, + timestamp: datetime.datetime, + filter_threshold: float, + score_multiplier: float = 1., + ): + super().__init__() + self._labels = labels + self._features = features + self._filter_threshold = filter_threshold + self._score_multiplier = score_multiplier + self._timestamp = timestamp + + def _parse_prediction(self, predictions: npt.ArrayLike): + prediction_id = np.argmax(predictions) + logging.debug("Prediction id: %s", prediction_id) + logging.debug("Predictions: %s", predictions) + label = self._labels[prediction_id] + score = predictions[0][prediction_id] + return label, score + + def _parse_example(self, serialized: bytes) -> Mapping[str, Any]: + parsed_example = tf.io.parse_example(serialized, self._features) + output = {} + for key, tensor in parsed_example.items(): + field = _get_bq_field_name_from_key(key) + value = _tensor_to_native_python_value(tensor) + # To add a null value to BigQuery from JSON, omit the key,value pair + # with null value. + if value is None: + continue + output[field] = value + return output + + def process(self, element, *args, **kwargs): # pylint: disable=missing-function-docstring + del args, kwargs # unused + + parsed_prediction_scores = tf.make_ndarray( + element.predict_log.response.outputs["outputs"]) + label, score = self._parse_prediction(parsed_prediction_scores) + if score >= self._filter_threshold: + output = { + "category_label": label, + # Workaround to issue with the score value having additional non-zero values + # in higher decimal places. + # e.g. 0.8 -> 0.800000011920929 + "score": round(score * self._score_multiplier, _DECIMAL_PLACES), + "datetime": self._timestamp, + } + output.update( + self._parse_example( + element.predict_log.request.inputs['examples'].string_val)) + yield output + + +class Executor(base_beam_executor.BaseBeamExecutor): + """Implements predictions-to-bigquery component logic.""" + def Do( + self, + input_dict: Mapping[str, List[types.Artifact]], + output_dict: Mapping[str, List[types.Artifact]], + exec_properties: Mapping[str, Any], + ) -> None: + """Do function for predictions_to_bq executor.""" + + timestamp = datetime.datetime.now().replace(second=0, microsecond=0) + + # Check required keys set in exec_properties + _check_exec_properties(exec_properties) + + # get labels from tf transform generated vocab file + labels = _get_labels( + artifact_utils.get_single_uri(input_dict['transform_graph']), + exec_properties['vocab_label_file'], + ) + logging.info(f"found the following labels from TFT vocab: {labels}") + + # set BigQuery table name and timestamp suffix if specified. + bq_table_name = _get_bq_table_name(exec_properties['bq_table_name'], + timestamp, + exec_properties['table_suffix']) + + # set prediction result file path and decoder + inference_results_uri = artifact_utils.get_single_uri( + input_dict["inference_results"]) + prediction_log_path = f"{inference_results_uri}/*.gz" + prediction_log_decoder = beam.coders.ProtoCoder( + prediction_log_pb2.PredictionLog) + + # get schema features + features = _get_features(schema_uri=artifact_utils.get_single_uri( + input_dict["schema"]), + prediction_log_path=prediction_log_path) + + # generate bigquery schema from tf transform features + bq_schema = _features_to_bq_schema(features) + logging.info(f'generated bq_schema: {bq_schema}.') + + additional_bq_parameters = _get_additional_bq_parameters( + exec_properties.get('expiration_time_delta'), + exec_properties.get('table_partitioning')) + + # run the Beam pipeline to write the inference data to bigquery + with self._make_beam_pipeline() as pipeline: + _ = (pipeline + | 'Read Prediction Log' >> beam.io.ReadFromTFRecord( + prediction_log_path, coder=prediction_log_decoder) + | 'Filter and Convert to Dict' >> beam.ParDo( + FilterPredictionToDictFn( + labels=labels, + features=features, + timestamp=timestamp, + filter_threshold=exec_properties['filter_threshold'])) + | 'Write Dict to BQ' >> beam.io.gcp.bigquery.WriteToBigQuery( + table=bq_table_name, + dataset=exec_properties['bq_dataset'], + project=exec_properties['gcp_project'], + schema=bq_schema, + additional_bq_parameters=additional_bq_parameters, + create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, + write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE, + custom_gcs_temp_location=exec_properties['gcs_temp_dir'])) + + bigquery_export = artifact_utils.get_single_instance( + output_dict['bigquery_export']) + bigquery_export.set_string_custom_property('generated_bq_table_name', + bq_table_name) + logging.info(f'Annotated data exported to {bq_table_name}') diff --git a/tfx_addons/predictions_to_bigquery/executor_test.py b/tfx_addons/predictions_to_bigquery/executor_test.py new file mode 100644 index 00000000..38447fb4 --- /dev/null +++ b/tfx_addons/predictions_to_bigquery/executor_test.py @@ -0,0 +1,343 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for executor.py.""" + +import datetime +from typing import Mapping, Sequence, Union +from unittest import mock + +import apache_beam as beam +import tensorflow as tf +import tensorflow_transform as tft +from absl import logging +from absl.testing import absltest, parameterized +from ml_metadata.proto import metadata_store_pb2 +from tensorflow_serving.apis import model_pb2, predict_pb2, prediction_log_pb2 +from tfx import types + +from tfx_addons.predictions_to_bigquery import executor, utils + +logging.set_verbosity(logging.WARNING) + +_TIMESTAMP = datetime.datetime.now() + + +def _create_tf_example( + features: Mapping[str, Union[bytes, float, int]]) -> tf.train.Example: + tf_features = {} + for key, value in features.items(): + if isinstance(value, bytes): + tf_feature = tf.train.Feature(bytes_list=tf.train.BytesList( + value=[value])) + elif isinstance(value, float): + tf_feature = tf.train.Feature(float_list=tf.train.FloatList( + value=[value])) + elif isinstance(value, int): + tf_feature = tf.train.Feature(int64_list=tf.train.Int64List( + value=[value])) + else: + raise ValueError(f'Unsupported feature type for key {key}:' + f' {type(value)} .') + tf_features[key] = tf_feature + return tf.train.Example(features=tf.train.Features(feature=tf_features)) + + +def _create_model_spec() -> model_pb2.ModelSpec: + return model_pb2.ModelSpec(signature_name='serving_default') + + +def _create_predict_request( + features: Mapping[str, Union[bytes, float, int]] +) -> predict_pb2.PredictRequest: + tf_example = _create_tf_example(features) + request_tensor_proto = tf.make_tensor_proto( + values=tf_example.SerializeToString(), dtype=tf.string, shape=(1, )) + return predict_pb2.PredictRequest( + model_spec=_create_model_spec(), + inputs={ + 'examples': request_tensor_proto, + }, + ) + + +def _create_predict_response( + values: Sequence[float]) -> predict_pb2.PredictResponse: + response_tensor_proto = tf.make_tensor_proto(values=values, + dtype=tf.float32, + shape=(1, len(values))) + return predict_pb2.PredictResponse(model_spec=_create_model_spec(), + outputs={ + 'outputs': response_tensor_proto, + }) + + +def _create_prediction_log( + request: predict_pb2.PredictRequest, + response: predict_pb2.PredictResponse) -> prediction_log_pb2.PredictionLog: + predict_log = prediction_log_pb2.PredictLog(request=request, + response=response) + return prediction_log_pb2.PredictionLog(predict_log=predict_log) + + +class FilterPredictionToDictFnTest(absltest.TestCase): + """Tests for FilterPredictionToDictFn class.""" + def setUp(self): + self.labels = ['l1', 'l2', 'l3'] + self.features = { + 'bytes_feature': tf.io.FixedLenFeature([], dtype=tf.string), + 'float_feature': tf.io.FixedLenFeature([], dtype=tf.float32), + } + self.timestamp = datetime.datetime.now() + self.filter_threshold = 0.5 + + self.dofn = executor.FilterPredictionToDictFn( + labels=self.labels, + features=self.features, + timestamp=self.timestamp, + filter_threshold=self.filter_threshold, + ) + + def test_process(self): + element = _create_prediction_log( + request=_create_predict_request(features={ + 'bytes_feature': b'a', + 'float_feature': 0.5, + }), + response=_create_predict_response([0.1, 0.8, 0.1]), + ) + output = next(self.dofn.process(element)) + expected = { + 'bytes_feature': 'a', + 'float_feature': 0.5, + 'category_label': 'l2', + 'score': 0.8, + 'datetime': mock.ANY, + } + self.assertEqual(expected, output) + self.assertIsInstance(output['datetime'], datetime.datetime) + + def test_process_below_threshold(self): + element = _create_prediction_log( + request=_create_predict_request(features={ + 'bytes_feature': b'a', + }), + response=_create_predict_response([1 / 3, 1 / 3, 1 / 3]), + ) + with self.assertRaises(StopIteration): + _ = next(self.dofn.process(element)) + + +def _make_artifact(uri) -> types.Artifact: + artifact = types.Artifact(metadata_store_pb2.ArtifactType()) + artifact.uri = uri + return artifact + + +def _make_artifact_mapping( + data_dict: Mapping[str, str]) -> Mapping[str, Sequence[types.Artifact]]: + return {k: [_make_artifact(v)] for k, v in data_dict.items()} + + +class ExecutorTest(absltest.TestCase): + """Tests for Executor class.""" + def setUp(self): + super().setUp() + self.input_dict = _make_artifact_mapping({ + 'transform_graph': '/path/to/transform_output', + 'inference_results': '/path/to/BulkInferrer/inference_results', + 'schema': '/path/to/schema', + }) + self.output_dict = _make_artifact_mapping( + {'bigquery_export': '/path/to/bigquery_export'}) + self.exec_properties = { + 'bq_table_name': 'table', + 'bq_dataset': 'dataset', + 'gcp_project': 'project', + 'gcs_temp_dir': 'gs://bucket/temp-dir', + 'expiration_time_delta': 1, + 'filter_threshold': 0.5, + 'table_suffix': '%Y%m%d', + 'table_partitioning': True, + 'vocab_label_file': 'vocab_file', + } + + self.executor = executor.Executor() + + self.enter_context( + mock.patch.object(executor, '_get_labels', autospec=True)) + self.enter_context( + mock.patch.object(executor, '_get_bq_table_name', autospec=True)) + self.enter_context( + mock.patch.object(executor, + '_get_additional_bq_parameters', + autospec=True)) + self.enter_context( + mock.patch.object(executor, '_get_features', autospec=True)) + self.enter_context( + mock.patch.object(executor, '_features_to_bq_schema', autospec=True)) + + self.mock_read_from_tfrecord = self.enter_context( + mock.patch.object(beam.io, 'ReadFromTFRecord', autospec=True)) + self.mock_pardo = self.enter_context( + mock.patch.object(beam, 'ParDo', autospec=True)) + self.mock_write_to_bigquery = self.enter_context( + mock.patch.object(beam.io.gcp.bigquery, + 'WriteToBigQuery', + autospec=True)) + + self.enter_context( + mock.patch.object(types.Artifact, + 'set_string_custom_property', + autospec=True)) + + def test_Do(self): + self.executor.Do(self.input_dict, self.output_dict, self.exec_properties) + + self.mock_read_from_tfrecord.assert_called_once() + self.mock_pardo.assert_called_once() + self.mock_write_to_bigquery.assert_called_once() + + +# pylint: disable=protected-access + + +class ExecutorModuleTest(parameterized.TestCase): + """Tests for executor module-level functions.""" + def test_get_labels(self): + mock_tftransform_output = self.enter_context( + mock.patch.object(tft, 'TFTransformOutput', autospec=True)) + mock_vocabulary_by_name = ( + mock_tftransform_output.return_value.vocabulary_by_name) + mock_vocabulary_by_name.return_value = [b'a', b'b'] + + transform_output_uri = '/path/to/transform_output' + vocab_file = 'vocab' + + output = executor._get_labels(transform_output_uri, vocab_file) + + self.assertEqual(['a', 'b'], output) + mock_tftransform_output.assert_called_once_with(transform_output_uri) + mock_vocabulary_by_name.assert_called_once_with(vocab_file) + + @parameterized.named_parameters([('no_timestamp', None, None), + ('timestamp_no_format', _TIMESTAMP, None), + ('timestamp_format', _TIMESTAMP, '%Y%m%d')]) + def test_get_bq_table_name(self, timestamp, timestring_format): + basename = 'bq_table' + + output = executor._get_bq_table_name(basename, timestamp, + timestring_format) + + if timestamp is None: + expected = basename + self.assertEqual(expected, output) + elif timestring_format is None: + expected = ( + f'bq_table_{timestamp.strftime(executor._DEFAULT_TIMESTRING_FORMAT)}' + ) + self.assertEqual(expected, output) + else: + expected = f'bq_table_{timestamp.strftime(timestring_format)}' + self.assertEqual(expected, output) + + @parameterized.named_parameters([ + ('no_additional', None, None), + ('expiration_days_only', 1, None), + ('table_partitioning_only', None, True), + ('expiration_table_partitioning', 2, True), + ]) + def test_get_additiona_bq_parameters(self, expiration_days, + table_partitioning): + output = executor._get_additional_bq_parameters(expiration_days, + table_partitioning) + + if table_partitioning is None: + self.assertEqual({}, output) + if expiration_days is None and table_partitioning is not None: + expected = {'timePartitioning': {'type': 'DAY'}} + self.assertEqual(expected, output) + if expiration_days is not None and table_partitioning is not None: + # TODO(cezequiel): Use freezegun to set the time a specific value + expected = { + 'timePartitioning': { + 'type': 'DAY', + 'expirationMs': mock.ANY, + }, + } + self.assertEqual(expected, output) + + @parameterized.named_parameters([ + ('error_no_input', None, None), + ('schema_uri_only', 'uri', None), + ('prediction_log_path', None, 'path'), + ('schema_uri_prediction_log_path', 'uri', 'path'), + ]) + def test_get_features(self, schema_uri, prediction_log_path): + schema = { + 'feature': tf.io.FixedLenFeature([], dtype=tf.int64), + } + mock_load_schema = self.enter_context( + mock.patch.object(utils, + 'load_schema', + autospec=True, + return_value=schema)) + mock_parse_schema = self.enter_context( + mock.patch.object(utils, + 'parse_schema', + autopspec=True, + return_value=schema)) + + if schema_uri is None and prediction_log_path is None: + with self.assertRaises(ValueError): + _ = executor._get_features(schema_uri=schema_uri, + prediction_log_path=prediction_log_path) + + else: + output = executor._get_features(schema_uri=schema_uri, + prediction_log_path=prediction_log_path) + + if schema_uri: + mock_load_schema.assert_called_once_with(mock.ANY) + mock_parse_schema.assert_not_called() + elif prediction_log_path: + mock_load_schema.assert_not_called() + mock_parse_schema.assert_called_once_with(prediction_log_path) + + self.assertEqual(schema, output) + + def test_features_to_bq_schema(self): + mock_feature_to_bq_schema = self.enter_context( + mock.patch.object(utils, 'feature_to_bq_schema', autospec=True)) + mock_create_annotation_fields = self.enter_context( + mock.patch.object(utils, + 'create_annotation_fields', + autospec=True, + return_value={})) + + features = { + 'feature': tf.io.FixedLenFeature([], dtype=tf.int64), + } + required = True + + output = executor._features_to_bq_schema(features, required) + + self.assertIn('fields', output) + mock_feature_to_bq_schema.assert_called_once_with(features, + required=required) + mock_create_annotation_fields.assert_called_once() + + +if __name__ == '__main__': + absltest.main() diff --git a/tfx_addons/predictions_to_bigquery/test_component.py b/tfx_addons/predictions_to_bigquery/test_component.py new file mode 100644 index 00000000..f07bae40 --- /dev/null +++ b/tfx_addons/predictions_to_bigquery/test_component.py @@ -0,0 +1,52 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# This code was originally written by Hannes Hapke (Digits Financial Inc.) +# on Feb. 6, 2023. +""" +Tests around Digits Prediction-to-BigQuery component. +""" + +import tensorflow as tf +from tfx.types import channel_utils, standard_artifacts + +from . import component + + +class ComponentTest(tf.test.TestCase): + def setUp(self): + super(ComponentTest, self).setUp() + self._transform_graph = channel_utils.as_channel( + [standard_artifacts.TransformGraph()]) + self._inference_results = channel_utils.as_channel( + [standard_artifacts.InferenceResult()]) + self._schema = channel_utils.as_channel([standard_artifacts.Schema()]) + + def testConstruct(self): + # not a real test, just checking if if the component can be + # instantiated + _ = component.AnnotateUnlabeledCategoryDataComponent( + transform_graph=self._transform_graph, + inference_results=self._inference_results, + schema=self._schema, + bq_table_name="gcp_project:bq_database.table", + vocab_label_file="vocab_txt", + filter_threshold=0.1, + table_suffix="%Y", + table_partitioning=False, + ) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tfx_addons/predictions_to_bigquery/utils.py b/tfx_addons/predictions_to_bigquery/utils.py new file mode 100644 index 00000000..ee79b126 --- /dev/null +++ b/tfx_addons/predictions_to_bigquery/utils.py @@ -0,0 +1,247 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# This code was originally written by Hannes Hapke (Digits Financial Inc.) +# on Feb. 6, 2023. +""" +Util functions for the Digits Prediction-to-BigQuery component. +""" + +import glob +from typing import Any, Dict, List + +import numpy as np +import tensorflow as tf +import tensorflow_transform as tft +from absl import logging +from google.protobuf import text_format +from tensorflow.python.lib.io import file_io +from tensorflow_metadata.proto.v0 import schema_pb2 + + +def load_schema(input_path: str) -> Dict: + """ + Loads a TFX schema from a file and returns schema object. + + Args: + input_path: Path to the file containing the schema. + + Returns: + A schema object. + """ + + schema = schema_pb2.Schema() + schema_text = file_io.read_file_to_string(input_path) + text_format.Parse(schema_text, schema) + return tft.tf_metadata.schema_utils.schema_as_feature_spec( + schema).feature_spec + + +def _get_compress_type(file_path): + magic_bytes = { + b'x\x01': 'ZLIB', + b'x^': 'ZLIB', + b'x\x9c': 'ZLIB', + b'x\xda': 'ZLIB', + b'\x1f\x8b': 'GZIP' + } + + two_bytes = open(file_path, 'rb').read(2) + return magic_bytes.get(two_bytes) + + +def _get_feature_type(feature=None, type_=None): + + if type_: + return { + int: tf.int64, + bool: tf.int64, + float: tf.float32, + str: tf.string, + bytes: tf.string, + }[type_] + + if feature: + if feature.HasField('int64_list'): + return tf.int64 + if feature.HasField('float_list'): + return tf.float32 + if feature.HasField('bytes_list'): + return tf.string + + return None + + +def parse_schema(prediction_log_path: str, + compression_type: str = 'auto') -> Dict: + """Parses feature schema from predictions.""" + + features = {} + + file_paths = glob.glob(prediction_log_path) + if compression_type == 'auto': + compression_type = _get_compress_type(file_paths[0]) + + dataset = tf.data.TFRecordDataset(file_paths, + compression_type=compression_type) + + serialized = next(iter(dataset.map(lambda serialized: serialized))) + seq_ex = tf.train.SequenceExample.FromString(serialized.numpy()) + + if seq_ex.feature_lists.feature_list: + raise NotImplementedError("FeatureLists aren't supported at the moment.") + + for key, feature in seq_ex.context.feature.items(): + features[key] = tf.io.FixedLenFeature((), + _get_feature_type(feature=feature)) + return features + + +def convert_python_numpy_to_bq_type(python_type: Any) -> str: + """ + Converts a python type to a BigQuery type. + + Args: + python_type: A python type. + + Returns: + A BigQuery type. + """ + if isinstance(python_type, (int, np.int64)): + return "INTEGER" + elif isinstance(python_type, (float, np.float32)): + return "FLOAT" + elif isinstance(python_type, (str, bytes)): + return "STRING" + elif isinstance(python_type, (bool, np.bool)): + return "BOOLEAN" + else: + raise ValueError("Unsupported type: {python_type}") + + +def convert_single_value_to_native_py_value(tensor: Any) -> str: + """ + Converts a Python value to a native Python value. + + Args: + value: A value. + + Returns: + Value casted to native Python type. + """ + + if isinstance(tensor, tf.sparse.SparseTensor): + value = tensor.values.numpy()[0] + logging.debug(f"sparse value: {value}") + else: + value = tensor.numpy()[0] + logging.debug(f"dense value: {value}") + + if isinstance(value, (int, np.int64, np.int32)): + return int(value) + elif isinstance(value, (float, np.float32, np.float64)): + return float(value) + elif isinstance(value, str): + return value + elif isinstance(value, bytes): + return value.decode("utf-8") + elif isinstance(value, (bool, np.bool)): + return bool(value) + else: + raise ValueError(f"Unsupported value type: {value} of type {type(value)}") + + +def convert_tensorflow_dtype_to_bq_type(tf_dtype: tf.dtypes.DType) -> str: + """ + Converts a tensorflow dtype to a BigQuery type string. + + Args: + tf_dtype: A tensorflow dtype. + + Returns: + A BigQuery type string. + """ + if tf_dtype in (tf.int64, tf.int64): + return "INTEGER" + elif tf_dtype in (tf.float32, tf.float64): + return "FLOAT" + elif tf_dtype == tf.string: + return "STRING" + elif tf_dtype == tf.bool: + return "BOOLEAN" + else: + raise ValueError(f"Unsupported type: {tf_dtype}") + + +def feature_to_bq_schema(features: Dict[str, Any], + required: bool = True) -> List[Dict]: + """ + Convert a list of features to a list of BigQuery schema fields. + + Args: + features: A list of features. + required: Whether the field is required. + + Returns: + A list of BigQuery schema fields. + """ + return [{ + "name": feature_name, + "type": convert_tensorflow_dtype_to_bq_type(feature_def.dtype), + "mode": "REQUIRED" if required else "NULLABLE", + } for feature_name, feature_def in features.items()] + + +def create_annotation_fields( + label_field_name: str = "category_label", + score_field_name: str = "score", + required: bool = True, + add_datetime_field: bool = True, +) -> List[Dict]: + """ + Create a list of BigQuery schema fields for the annotation fields. + + Args: + label_field_name: The name of the label field. + score_field_name: The name of the score field. + required: Whether the fields are required. + add_datetime_field: Whether to add a datetime field. + + Returns: + A list of BigQuery schema fields. + """ + + label_field = { + "name": label_field_name, + "type": "STRING", + "mode": "REQUIRED" if required else "NULLABLE", + } + + score_field = { + "name": score_field_name, + "type": "FLOAT", + "mode": "REQUIRED" if required else "NULLABLE", + } + + fields = [label_field, score_field] + + if add_datetime_field: + datetime_field = { + "name": "datetime", + "type": "TIMESTAMP", + "mode": "REQUIRED" if required else "NULLABLE", + } + fields.append(datetime_field) + + return fields diff --git a/tfx_addons/sampling/README.md b/tfx_addons/sampling/README.md index e0a32be4..390a6483 100644 --- a/tfx_addons/sampling/README.md +++ b/tfx_addons/sampling/README.md @@ -35,4 +35,4 @@ Component `outputs` contains: materialized sampled examples, based on the input splits, which includes copied splits unless otherwise specified by copy_others. -[Initial Proposal](../proposals/20210721-sampling_component.md) +[Initial Proposal](https://github.com/tensorflow/tfx-addons/blob/main/proposals/20210721-sampling_component.md) diff --git a/tfx_addons/version.py b/tfx_addons/version.py index d47b38e4..d80db7be 100644 --- a/tfx_addons/version.py +++ b/tfx_addons/version.py @@ -16,7 +16,7 @@ # We follow Semantic Versioning (https://semver.org/) _MAJOR_VERSION = "0" -_MINOR_VERSION = "5" +_MINOR_VERSION = "6" _PATCH_VERSION = "0" # When building releases, we can update this value on the release branch to @@ -24,7 +24,7 @@ # stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a # release branch, the current version is by default assumed to be a # 'development' version, labeled 'dev'. -_VERSION_SUFFIX = "dev" +_VERSION_SUFFIX = "" # Example, '0.1.0-dev' __version__ = ".".join([_MAJOR_VERSION, _MINOR_VERSION, _PATCH_VERSION]) @@ -77,5 +77,7 @@ "firebase_publisher": [f"tfx{_TFXVERSION_CONSTRAINT}", "firebase-admin>=5.0.0,<6.0.0"], "huggingface_pusher": - [f"tfx{_TFXVERSION_CONSTRAINT}", "huggingface-hub>=0.10.0,<1.0.0"] + [f"tfx{_TFXVERSION_CONSTRAINT}", "huggingface-hub>=0.10.0,<1.0.0"], + "model_card_generator": + [f"tfx{_TFXVERSION_CONSTRAINT}", "model-card-toolkit>=2.0.0,<3.0.0"] }