github博客传送门
csdn博客传送门python
└── keras-and-tensorflow-serving ├── README.md ├── my_image_classifier │ └── 1 │ ├── saved_model.pb # 导出的模型 │ └── variables # 导出模型的参数文件夹 │ ├── variables.data-00000-of-00001 │ └── variables.index ├── test_images # 测试数据 │ ├── car.jpg │ └── car.png └── scripts # 本身的模型 ├── download_inceptionv3_model.py ├── inception.h5 ├── auto_cmd.py ├── export_saved_model.py ├── imagenet_class_index.json └── serving_sample_request.py
# 例: from keras.applications.inception_v3 import InceptionV3 # 导入InceptionV3网络结构 from keras.layers import Input # 导入输入 inception_model = InceptionV3(weights='imagenet', input_tensor=Input(shape=(224, 224, 3))) # 建立模型并加载权重 inception_model.save('inception.h5') # 保存网络模型为:inception.h5
# 代码: import tensorflow as tf # 导出路径包含模型的名称和版本 tf.keras.backend.set_learning_phase(0) model = tf.keras.models.load_model('./inception.h5') # 须要加载的模型路径 export_path = '../my_image_classifier/1' # 将要导出模型的路径 # 获取Keras会话和保存模型 # 签名的定义是定义的输入和输出张量 with tf.keras.backend.get_session() as sess: tf.saved_model.simple_save( sess, export_path, inputs={'input_image': model.input}, outputs={t.name: t for t in model.outputs})
# 目录结构 ├── my_image_classifier └── 1 ├── saved_model.pb └── variables ├── variables.data-00000-of-00001 └── variables.index
打开交互模式的Python,进入可执行代码的环境.输入:git
tensorflow_model_server --model_base_path=/home/******/PycharmProjects/tensorflow/deployment_testing/my_image_classifier --rest_api_port=9000 --model_name=detection ### --model_base_path:这必须是一个绝对的路径,不然你会获得一个错误 ### --rest_api_port:Tensorflow服务将在端口8500上启动gRPC ModelServer,而且REST API将在端口9000上可用。 ### --model_name:这将是您将用于发送POST请求的服务服务器的名称。您能够在此处键入任何名称。
文件名:serving_sample_request.py import argparse import json import numpy as np import requests from keras.applications import inception_v3 from keras.preprocessing import image # Argument parser for giving input image_path from command line ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path of the image") args = vars(ap.parse_args()) image_path = args['image'] # Preprocessing our input image img = image.img_to_array(image.load_img(image_path, target_size=(224, 224))) / 255. # this line is added because of a bug in tf_serving(1.10.0-dev) img = img.astype('float16') payload = { "instances": [{'input_image': img.tolist()}] } # sending post request to TensorFlow Serving server r = requests.post('http://localhost:9000/v1/models/ImageClassifier:predict', json=payload) pred = json.loads(r.content.decode('utf-8')) # Decoding the response # decode_predictions(preds, top=5) by default gives top 5 results # You can pass "top=10" to get top 10 predicitons print(json.dumps(inception_v3.decode_predictions(np.array(pred['predictions']))[0]))
python serving_sample_request.py -i ../test_images/car.png
print_r('点个赞吧'); var_dump('点个赞吧'); NSLog(@"点个赞吧!") System.out.println("点个赞吧!"); console.log("点个赞吧!"); print("点个赞吧!"); printf("点个赞吧!\n"); cout << "点个赞吧!" << endl; Console.WriteLine("点个赞吧!"); fmt.Println("点个赞吧!") Response.Write("点个赞吧"); alert(’点个赞吧’)