In [0]:
We use following lines because we are running on Google Colab
If you are running notebook on a local computer, you don't need this cell
from google.colab import drive
import os
os.chdir('/content/gdrive/My Drive/finch/tensorflow1/free_chat/chinese/main')
In [0]:
!pip install -q requests

import numpy as np
import requests
import json
import os
In [3]:
!echo "deb stable tensorflow-model-server tensorflow-model-server-universal" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \
curl | apt-key add -
!apt update
!apt-get install tensorflow-model-server
deb stable tensorflow-model-server tensorflow-model-server-universal
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  2943  100  2943    0     0   9909      0 --:--:-- --:--:-- --:--:--  9909
Get:1 stable InRelease [3,012 B]
Ign:2  InRelease
Get:3 bionic InRelease [21.3 kB]
Hit:4 bionic InRelease
Get:5 bionic-security InRelease [88.7 kB]
Ign:6  InRelease
Get:7  Release [564 B]
Get:8  Release [564 B]
Get:9  Release.gpg [819 B]
Get:10  Release.gpg [833 B]
Get:11 bionic-updates InRelease [88.7 kB]
Get:12 stable/tensorflow-model-server amd64 Packages [357 B]
Get:13 bionic-cran35/ InRelease [3,626 B]
Get:14 bionic InRelease [15.4 kB]
Get:15 stable/tensorflow-model-server-universal amd64 Packages [365 B]
Get:16  Packages [119 kB]
Get:17 bionic-backports InRelease [74.6 kB]
Get:18 bionic/main amd64 Packages [31.7 kB]
Get:19  Packages [20.6 kB]
Get:20 bionic-cran35/ Packages [72.2 kB]
Get:21 bionic-security/main amd64 Packages [680 kB]
Get:22 bionic/main Sources [1,710 kB]
Get:23 bionic-updates/restricted amd64 Packages [21.9 kB]
Get:24 bionic-updates/universe amd64 Packages [1,294 kB]
Get:25 bionic-security/universe amd64 Packages [774 kB]
Get:26 bionic/main amd64 Packages [823 kB]
Get:27 bionic-updates/multiverse amd64 Packages [8,734 B]
Get:28 bionic-security/restricted amd64 Packages [11.3 kB]
Get:29 bionic-updates/main amd64 Packages [975 kB]
Get:30 bionic-security/multiverse amd64 Packages [5,391 B]
Get:31 bionic-backports/universe amd64 Packages [4,227 B]
Fetched 6,849 kB in 4s (1,906 kB/s)
Reading package lists... Done
Building dependency tree       
Reading state information... Done
146 packages can be upgraded. Run 'apt list --upgradable' to see them.
Reading package lists... Done
Building dependency tree       
Reading state information... Done
The following NEW packages will be installed:
0 upgraded, 1 newly installed, 0 to remove and 146 not upgraded.
Need to get 151 MB of archives.
After this operation, 0 B of additional disk space will be used.
Get:1 stable/tensorflow-model-server amd64 tensorflow-model-server all 1.14.0 [151 MB]
Fetched 151 MB in 2s (65.6 MB/s)
Selecting previously unselected package tensorflow-model-server.
(Reading database ... 131183 files and directories currently installed.)
Preparing to unpack .../tensorflow-model-server_1.14.0_all.deb ...
Unpacking tensorflow-model-server (1.14.0) ...
Setting up tensorflow-model-server (1.14.0) ...
In [0]:
os.environ["MODEL_DIR"] = '/content/gdrive/My Drive/finch/tensorflow1/free_chat/chinese/model/gru_seq2seq_export'
In [5]:
%%bash --bg 
nohup tensorflow_model_server \
  --rest_api_port=8508 \
  --model_name=free_chat_model \
  --model_base_path="${MODEL_DIR}" >server.log 2>&1
Starting job # 0 in a separate thread.
In [6]:
!tail server.log
2019-10-11 00:33:16.940878: I tensorflow_serving/model_servers/] Adding/updating models.
2019-10-11 00:33:16.941008: I tensorflow_serving/model_servers/]  (Re-)adding model: free_chat_model
2019-10-11 00:33:17.369398: I tensorflow_serving/core/] Successfully reserved resources to load servable {name: free_chat_model version: 1569460583}
2019-10-11 00:33:17.369583: I tensorflow_serving/core/] Approving load for servable version {name: free_chat_model version: 1569460583}
2019-10-11 00:33:17.369704: I tensorflow_serving/core/] Loading servable version {name: free_chat_model version: 1569460583}
2019-10-11 00:33:17.370153: I external/org_tensorflow/tensorflow/contrib/session_bundle/] Attempting to load native SavedModelBundle in bundle-shim from: /content/gdrive/My Drive/finch/tensorflow1/free_chat/chinese/model/gru_seq2seq_export/1569460583
2019-10-11 00:33:17.370279: I external/org_tensorflow/tensorflow/cc/saved_model/] Reading SavedModel from: /content/gdrive/My Drive/finch/tensorflow1/free_chat/chinese/model/gru_seq2seq_export/1569460583
2019-10-11 00:33:17.694151: I external/org_tensorflow/tensorflow/cc/saved_model/] Reading meta graph with tags { serve }
2019-10-11 00:33:17.699337: I external/org_tensorflow/tensorflow/core/platform/] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2019-10-11 00:33:17.726292: I external/org_tensorflow/tensorflow/cc/saved_model/] Restoring SavedModel bundle.
In [0]:
def get_vocab(f_path):
  k2v = {}
  with open(f_path) as f:
    for i, line in enumerate(f):
      line = line.rstrip('\n')
      k2v[line] = i
  return k2v

parse_fn = lambda text: [[CHAR2IDX.get(c, len(CHAR2IDX)) for c in list(text)]]
In [8]:
CHAR2IDX = get_vocab('../vocab/char.txt')
data = json.dumps({"signature_name": "serving_default", "instances": parse_fn('你是谁')})
{"signature_name": "serving_default", "instances": [[10, 13, 119]]}
In [9]:
headers = {"content-type": "application/json"}
json_response ='http://localhost:8508/v1/models/free_chat_model:predict', data=data, headers=headers)
predictions = json.loads(json_response.text)['predictions']
<Response [200]>
In [10]:
predictions = np.asarray(predictions)
IDX2CHAR = {idx: char for char, idx in CHAR2IDX.items()}
for j in range(5):
  print('A{}:'.format(j+1), ' '.join([IDX2CHAR.get(idx, len(IDX2CHAR)) for idx in predictions[0, :, j]]).replace('<end>', ''))
A1: 我 是 小 通                  
A2: 我 是 小 黄 鹰                 
A3: 不 是 她 们 俩 吗 ?               
A4: 我 是 小 通 ~                 
A5: 我 是 宇 宙 无 敌 可 爱 小 通