1
votes

I have a TensorFlow model on GCP ML Engine, however I have a problem with the JSON string below:

from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import json

   credentials = GoogleCredentials.get_application_default()
   api = discovery.build('ml', 'v1', credentials=credentials,
        discoveryServiceUrl='https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json')
   request_data = {'instances':
  [{
    'inputs':{
    'clump_thickness': 2,
    'size_uniformity': 1,
    'shape_uniformity': 1,
    'marginal_adhesion': 1,
    'epithelial_size': 2,
    'bland_chromatin': 1,
    'bare_nucleoli': 2,
    'normal_nucleoli': 1,
    'mitoses': 1
    }  
  }]
}

parent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 
'breastCancer_optimized_06152018_2_2_a', 'v1')
response = api.projects().predict(body=request_data, name=parent).execute()
print(response)

I get the following error: {'error': "Prediction failed: Error processing input: Expected string, got {u'epithelial_size': 2, u'marginal_adhesion': 1, u'clump_thickness': 2, u'size_uniformity': 1, u'shape_uniformity': 1, u'normal_nucleoli': 1, u'mitoses': 1, u'bland_chromatin': 1, u'bare_nucleoli': 2} of type 'dict' instead."}

I can't seem to format request_data properly. Does anyone see what is wrong?

original serving function:

clump_thickness = tf.feature_column.numeric_column("clump_thickness");
size_uniformity = tf.feature_column.numeric_column("size_uniformity");
shape_uniformity = tf.feature_column.numeric_column("shape_uniformity");
marginal_adhesion = tf.feature_column.numeric_column("marginal_adhesion");
epithelial_size = tf.feature_column.numeric_column("epithelial_size");
bare_nucleoli = tf.feature_column.numeric_column("bare_nucleoli");
bland_chromatin = tf.feature_column.numeric_column("bland_chromatin");
normal_nucleoli = tf.feature_column.numeric_column("normal_nucleoli");
mitoses = tf.feature_column.numeric_column("mitoses");
feature_columns = [clump_thickness, size_uniformity, shape_uniformity, marginal_adhesion, epithelial_size, 
                   bare_nucleoli, bland_chromatin, normal_nucleoli, mitoses];
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns);
export_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec);
estimator.export_savedmodel(output_dir, export_input_fn, as_text=False)

Then I tried:

  def serving_input_fn():
    feature_placeholders = {
  'clump_thickness' : tf.placeholder(tf.float32, [None]),
  'size_uniformity' : tf.placeholder(tf.float32, [None]),
  'shape_uniformity' : tf.placeholder(tf.float32, [None]),
  'marginal_adhesion' : tf.placeholder(tf.float32, [None]),
  'epithelial_size' : tf.placeholder(tf.float32, [None]),
  'bare_nucleoli' : tf.placeholder(tf.float32, [None]),
  'bland_chromatin' : tf.placeholder(tf.float32, [None]),
  'normal_nucleoli' : tf.placeholder(tf.float32, [None]),
  'mitoses' : tf.placeholder(tf.float32, [None]),      
}

features = feature_placeholders # no transformation needed
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)

And in the train_and_eval function:

estimator.export_savedmodel(output_dir, serving_input_fn, as_text=False)

But now I get the following error:

{'error': "Prediction failed: Expected tensor name: inputs, got tensor name: [u'epithelial_size', u'marginal_adhesion', u'clump_thickness', u'size_uniformity', u'shape_uniformity', u'normal_nucleoli', u'mitoses', u'bland_chromatin', u'bare_nucleoli']."}

The estimator.export_savedmodel appears to create a model which requires a tensor input(in the request_data line).

When I use the model created with either serving function the following works fine:

predict_fn = tf.contrib.predictor.from_saved_model("gs://test- 
203900/breastCancer_optimized_06182018/9/1529432417")

# Test inputs represented by Pandas DataFrame.
inputs = pd.DataFrame({
    'clump_thickness': [2,5,4],
    'size_uniformity': [1,10,8],
    'shape_uniformity': [1,10,6],
    'marginal_adhesion': [1,3,4],
    'epithelial_size': [2,7,3],
    'bland_chromatin': [1,3,4],
    'bare_nucleoli': [2,8,10],
    'normal_nucleoli': [1,10,6],
    'mitoses': [1,2,1],
})

# Convert input data into serialized Example strings.
examples = []
for index, row in inputs.iterrows():
   feature = {}
   for col, value in row.iteritems():
       feature[col] = 
tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
    example = tf.train.Example(
        features=tf.train.Features(
           feature=feature
        )
   )
examples.append(example.SerializeToString())
   # Make predictions.
   predictions = predict_fn({'inputs': examples})
1

1 Answers

0
votes

It depends on what your serving input function is. It appears from the error message that 'inputs' needs to be a string (maybe comma-separated?)

Try this:

saved_model_cli show --dir $MODEL_LOCATION --tag_set serve --signature_def serving_default

It will tell you what your serving input function is set to.

I suspect that what you want is for your serving input function to be:

def serving_input_fn():
  feature_placeholders = {
    'size_uniformity' : tf.placeholder(tf.float32, [None]),
    'shape_uniformity' : tf.placeholder(tf.float32, [None])
  }
  features = feature_placeholders # no transformation needed
  return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)

and for your input format to be:

request_data = {'instances':
  [
      {
        'size_uniformity': 1,
        'shape_uniformity': 1
      }
  ]
}