I tried export my keras model to tensorflow serving and all works well. What I'm trying to do is to accept a b64 encoded input image string from client and output a True/False value. My keras model outputs 3 values and the first value indicates the degree predicted from model, and I will compare it to another fixed value and export the whole algorithm from taking image string to outputing True/False value to tensorflow serving using RESTful API. However, I did not get correct output from my client program. Long words short, let me show the code
My program to export saved model:
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
from keras.models import load_model
from keras.layers import Input
import os
tf.app.flags.DEFINE_string('model_dir', './keras_models',
'''Directory which contains keras models''')
tf.app.flags.DEFINE_string('output_dir', './model_output',
'''Directory where to export the model''')
tf.app.flags.DEFINE_string('model_version', '1',
'''version number of the model''')
tf.app.flags.DEFINE_string('model_file', 'pointer_model.json',
'''json file which contains model architecture''')
tf.app.flags.DEFINE_string('weights_file', 'pointer_model.h5',
'''h5 file that contains model weights''')
FLAGS = tf.app.flags.FLAGS
def preprocess_image(image_buffer):
'''
Preprocess JPEG encoded bytes to 3D floate tensor
:param image_buffer:
:return: 4D image tensor (1, width, height, channels)
'''
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def main(_):
with tf.Graph().as_default():
serialized_tf_example = tf.placeholder(tf.string, name='input_image')
feature_configs = {
'image/encoded': tf.FixedLenFeature(
shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
jpegs = tf_example['image/encoded']
images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
images = tf.squeeze(images, [0])
images = tf.expand_dims(images, axis=0)
# now the image shape is [1, ?, ?, 3]
images = tf.image.resize_images(images, tf.constant([224, 224]))
model = load_model('./keras_models/my_model.h5')
x = Input(tensor=images)
y = model(x)
model.summary()
compare_value = tf.Variable(100.0)
bool_out = tf.math.greater(y, compare_value)
bool_out = bool_out[:,0]
bool_out = tf.cast(bool_out, tf.float32)
bool_out = tf.expand_dims(bool_out, axis=0)
final_out = tf.concat([tf.transpose(y), bool_out], axis=0)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# predict_tensor_input_info = tf.saved_model.utils.build_tensor_info(jpegs)
# predict_tensor_score_info = tf.saved_model.utils.build_tensor_info(bool_out)
prediction_signature = \
(tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'images': jpegs},
outputs={'scores': final_out}
)
)
export_path = os.path.join(
tf.compat.as_bytes(FLAGS.output_dir),
tf.compat.as_bytes(FLAGS.model_version)
)
builder = saved_model_builder.SavedModelBuilder(export_path)
legacy_init_op = tf.group(tf.tables_initializer(),
name = 'legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:prediction_signature,
},
legacy_init_op = legacy_init_op
)
builder.save()
if __name__ =="__main__":
tf.app.run()
and this is my client program:
import base64
import requests
import json
import argparse
import time
from glob import glob
image_path = glob('./segmented_image/*.jpg')
for i in range(len(image_path)):
input_image = open(image_path[i], 'rb').read()
encoded_input_string = base64.b64encode(input_image)
input_string = encoded_input_string.decode('utf-8')
# input_image_recover = base64.b64decode(input_string)
# with open('recovered_image.jpg', 'wb') as output_file:
# output_file.write(input_image_recover)
#
# print('Base64 encoded string: ' + input_string[:10] + '...' + input_string[-10:])
instance = [{"b64": input_string}]
data = json.dumps({"instances": instance})
print(data[:30]+ '...' + data[-10:])
json_response = requests.post('http://localhost:8501/v1/models/pointer_model:predict',
data=data)
print(json_response.text)
end_time = time.time()
The output from json_response.text is like:
{"instances": [{"b64": "/9j/4A...Y//9k="}]}
{
"predictions": [[-0.00015692], [-0.000967527], [0.000567942], [0.0]
]
}
{"instances": [{"b64": "/9j/4A...if/9k="}]}
{
"predictions": [[-0.000157582], [-0.000998327], [0.000598866], [0.0]
]
}
......
The first 3 values in prediction key is supposed to be degree, and x,y coordinates in an image which should be hundreds value... the last value is the True/False value casted to float32 comparing with 100.0
Ok.. and last, I have also tested my model using model.predict, which gives correct answer...
Now I'm completely confused. Can someone tell me where is wrong with my code?