Page MenuHomePhabricator (Chris)

No OneTemporary

Size
36 KB
Referenced Files
None
Subscribers
None
diff --git a/classify_nsfw.py b/classify_nsfw.py
index e5c4c7b..39458ab 100644
--- a/classify_nsfw.py
+++ b/classify_nsfw.py
@@ -1,69 +1,73 @@
#!/usr/bin/env python
import sys
import argparse
import tensorflow as tf
from model import OpenNsfwModel, InputType
from image_utils import create_tensorflow_image_loader
from image_utils import create_yahoo_image_loader
import numpy as np
+gpu_devices = tf.config.experimental.list_physical_devices('GPU')
+for device in gpu_devices:
+ tf.config.experimental.set_memory_growth(device, True)
+
IMAGE_LOADER_TENSORFLOW = "tensorflow"
IMAGE_LOADER_YAHOO = "yahoo"
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="Path to the input image.\
Only jpeg images are supported.")
parser.add_argument("-m", "--model_weights", required=True,
help="Path to trained model weights file")
parser.add_argument("-l", "--image_loader",
default=IMAGE_LOADER_YAHOO,
help="image loading mechanism",
choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])
parser.add_argument("-i", "--input_type",
default=InputType.TENSOR.name.lower(),
help="input type",
choices=[InputType.TENSOR.name.lower(),
InputType.BASE64_JPEG.name.lower()])
args = parser.parse_args()
model = OpenNsfwModel()
- with tf.Session() as sess:
+ with tf.compat.v1.Session() as sess:
input_type = InputType[args.input_type.upper()]
model.build(weights_path=args.model_weights, input_type=input_type)
fn_load_image = None
if input_type == InputType.TENSOR:
if args.image_loader == IMAGE_LOADER_TENSORFLOW:
- fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
+ fn_load_image = create_tensorflow_image_loader(tf.compat.v1.Session(graph=tf.Graph()))
else:
fn_load_image = create_yahoo_image_loader()
elif input_type == InputType.BASE64_JPEG:
import base64
fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])
- sess.run(tf.global_variables_initializer())
+ sess.run(tf.compat.v1.global_variables_initializer())
image = fn_load_image(args.input_file)
predictions = \
sess.run(model.predictions,
feed_dict={model.input: image})
print("Results for '{}'".format(args.input_file))
print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
if __name__ == "__main__":
main(sys.argv)
diff --git a/eval/batch_classify.py b/eval/batch_classify.py
index b768dda..e9c5249 100644
--- a/eval/batch_classify.py
+++ b/eval/batch_classify.py
@@ -1,112 +1,112 @@
import os
import sys
sys.path.append((os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..'))))
import argparse
import glob
import tensorflow as tf
from tqdm import tqdm
from model import OpenNsfwModel, InputType
from image_utils import create_tensorflow_image_loader
from image_utils import create_yahoo_image_loader
IMAGE_LOADER_TENSORFLOW = "tensorflow"
IMAGE_LOADER_YAHOO = "yahoo"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
-tf.logging.set_verbosity(tf.logging.ERROR)
+tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def create_batch_iterator(filenames, batch_size, fn_load_image):
for i in range(0, len(filenames), batch_size):
yield list(map(fn_load_image, filenames[i:i+batch_size]))
def create_tf_batch_iterator(filenames, batch_size):
for i in range(0, len(filenames), batch_size):
- with tf.Session(graph=tf.Graph()) as session:
+ with tf.compat.v1.Session(graph=tf.Graph()) as session:
fn_load_image = create_tensorflow_image_loader(session,
expand_dims=False)
yield list(map(fn_load_image, filenames[i:i+batch_size]))
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source", required=True,
help="Folder containing the images to classify")
parser.add_argument("-o", "--output_file", required=True,
help="Output file path")
parser.add_argument("-m", "--model_weights", required=True,
help="Path to trained model weights file")
parser.add_argument("-b", "--batch_size", help="Number of images to \
classify simultaneously.", type=int, default=64)
parser.add_argument("-l", "--image_loader",
default=IMAGE_LOADER_YAHOO,
help="image loading mechanism",
choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])
args = parser.parse_args()
batch_size = args.batch_size
output_file = args.output_file
input_type = InputType.TENSOR
model = OpenNsfwModel()
filenames = glob.glob(args.source + "/*.jpg")
num_files = len(filenames)
num_batches = int(num_files / batch_size)
print("Found", num_files, " files")
print("Split into", num_batches, " batches")
- config = tf.ConfigProto()
+ config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
batch_iterator = None
if args.image_loader == IMAGE_LOADER_TENSORFLOW:
batch_iterator = create_tf_batch_iterator(filenames, batch_size)
else:
fn_load_image = create_yahoo_image_loader(expand_dims=False)
batch_iterator = create_batch_iterator(filenames, batch_size,
fn_load_image)
- with tf.Session(graph=tf.Graph(), config=config) as session:
+ with tf.compat.v1.Session(graph=tf.Graph(), config=config) as session:
model.build(weights_path=args.model_weights,
input_type=input_type)
- session.run(tf.global_variables_initializer())
+ session.run(tf.compat.v1.global_variables_initializer())
with tqdm(total=num_files) as progress_bar:
with open(output_file, 'w') as o:
o.write('File\tSFW Score\tNSFW Score\n')
for batch_num, images in enumerate(batch_iterator):
predictions = \
session.run(model.predictions,
feed_dict={model.input: images})
fi = (batch_num * batch_size)
for i, prediction in enumerate(predictions):
filename = os.path.basename(filenames[fi + i])
o.write('{}\t{}\t{}\n'.format(filename,
prediction[0],
prediction[1]))
progress_bar.update(len(images))
if __name__ == "__main__":
main(sys.argv)
diff --git a/image_utils.py b/image_utils.py
index d2cb9a3..7ca5ceb 100644
--- a/image_utils.py
+++ b/image_utils.py
@@ -1,143 +1,142 @@
VGG_MEAN = [104, 117, 123]
def create_yahoo_image_loader(expand_dims=True):
"""Yahoo open_nsfw image loading mechanism
Approximation of the image loading mechanism defined in
https://github.com/yahoo/open_nsfw/blob/79f77bcd45076b000df71742a59d726aa4a36ad1/classify_nsfw.py#L40
"""
import numpy as np
import skimage
import skimage.io
from PIL import Image
from io import BytesIO
def load_image(image_path):
pimg = open(image_path, 'rb').read()
img_data = pimg
im = Image.open(BytesIO(img_data))
if im.mode != "RGB":
im = im.convert('RGB')
imr = im.resize((256, 256), resample=Image.BILINEAR)
fh_im = BytesIO()
imr.save(fh_im, format='JPEG')
fh_im.seek(0)
image = (skimage.img_as_float(skimage.io.imread(fh_im, as_gray=False))
.astype(np.float32))
H, W, _ = image.shape
h, w = (224, 224)
h_off = max((H - h) // 2, 0)
w_off = max((W - w) // 2, 0)
image = image[h_off:h_off + h, w_off:w_off + w, :]
# RGB to BGR
image = image[:, :, :: -1]
image = image.astype(np.float32, copy=False)
image = image * 255.0
image -= np.array(VGG_MEAN, dtype=np.float32)
if expand_dims:
image = np.expand_dims(image, axis=0)
return image
return load_image
def create_tensorflow_image_loader(session, expand_dims=True,
options=None,
run_metadata=None):
"""Tensorflow image loader
Results seem to deviate quite a bit from yahoo image loader due to
different jpeg encoders/decoders and different image resize
implementations between PIL, skimage and tensorflow
Only supports jpeg images.
Relevant tensorflow issues:
* https://github.com/tensorflow/tensorflow/issues/6720
* https://github.com/tensorflow/tensorflow/issues/12753
"""
import tensorflow as tf
def load_image(image_path):
- image = tf.read_file(image_path)
+ image = tf.io.read_file(image_path)
image = __tf_jpeg_process(image)
if expand_dims:
image_batch = tf.expand_dims(image, axis=0)
return session.run(image_batch,
options=options,
run_metadata=run_metadata)
return session.run(image,
options=options,
run_metadata=run_metadata)
return load_image
def load_base64_tensor(_input):
import tensorflow as tf
def decode_and_process(base64):
- _bytes = tf.decode_base64(base64)
+ _bytes = tf.io.decode_base64(base64)
_image = __tf_jpeg_process(_bytes)
return _image
# we have to do some preprocessing with map_fn, since functions like
# decode_*, resize_images and crop_to_bounding_box do not support
# processing of batches
image = tf.map_fn(decode_and_process, _input,
back_prop=False, dtype=tf.float32)
return image
def __tf_jpeg_process(data):
import tensorflow as tf
# The whole jpeg encode/decode dance is neccessary to generate a result
# that matches the original model's (caffe) preprocessing
# (as good as possible)
image = tf.image.decode_jpeg(data, channels=3,
fancy_upscaling=True,
dct_method="INTEGER_FAST")
image = tf.image.convert_image_dtype(image, tf.float32, saturate=True)
- image = tf.image.resize_images(image, (256, 256),
- method=tf.image.ResizeMethod.BILINEAR,
- align_corners=True)
+ image = tf.image.resize(image, (256, 256),
+ method=tf.image.ResizeMethod.BILINEAR)
image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
image = tf.image.encode_jpeg(image, format='', quality=75,
progressive=False, optimize_size=False,
chroma_downsampling=True,
density_unit=None,
x_density=None, y_density=None,
xmp_metadata=None)
image = tf.image.decode_jpeg(image, channels=3,
fancy_upscaling=False,
dct_method="INTEGER_ACCURATE")
image = tf.cast(image, dtype=tf.float32)
image = tf.image.crop_to_bounding_box(image, 16, 16, 224, 224)
image = tf.reverse(image, axis=[2])
image -= VGG_MEAN
return image
diff --git a/model.py b/model.py
index fda4af0..f817d8c 100644
--- a/model.py
+++ b/model.py
@@ -1,252 +1,252 @@
import math
import numpy as np
import tensorflow as tf
from enum import Enum, unique
@unique
class InputType(Enum):
TENSOR = 1
BASE64_JPEG = 2
class OpenNsfwModel:
"""Tensorflow implementation of Yahoo's Open NSFW Model
Original implementation:
https://github.com/yahoo/open_nsfw
Weights have been converted using caffe-tensorflow:
https://github.com/ethereon/caffe-tensorflow
"""
def __init__(self):
self.weights = {}
self.bn_epsilon = 1e-5 # Default used by Caffe
def build(self, weights_path="open_nsfw-weights.npy",
input_type=InputType.TENSOR):
self.weights = np.load(weights_path, encoding="latin1", allow_pickle=True).item()
self.input_tensor = None
if input_type == InputType.TENSOR:
- self.input = tf.placeholder(tf.float32,
+ self.input = tf.compat.v1.placeholder(tf.float32,
shape=[None, 224, 224, 3],
name="input")
self.input_tensor = self.input
elif input_type == InputType.BASE64_JPEG:
from image_utils import load_base64_tensor
- self.input = tf.placeholder(tf.string, shape=(None,), name="input")
+ self.input = tf.compat.v1.placeholder(tf.string, shape=(None,), name="input")
self.input_tensor = load_base64_tensor(self.input)
else:
raise ValueError("invalid input type '{}'".format(input_type))
x = self.input_tensor
- x = tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], 'CONSTANT')
+ x = tf.pad(tensor=x, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]], mode='CONSTANT')
x = self.__conv2d("conv_1", x, filter_depth=64,
kernel_size=7, stride=2, padding='valid')
x = self.__batch_norm("bn_1", x)
x = tf.nn.relu(x)
- x = tf.layers.max_pooling2d(x, pool_size=3, strides=2, padding='same')
+ x = tf.compat.v1.layers.max_pooling2d(x, pool_size=3, strides=2, padding='same')
x = self.__conv_block(stage=0, block=0, inputs=x,
filter_depths=[32, 32, 128],
kernel_size=3, stride=1)
x = self.__identity_block(stage=0, block=1, inputs=x,
filter_depths=[32, 32, 128], kernel_size=3)
x = self.__identity_block(stage=0, block=2, inputs=x,
filter_depths=[32, 32, 128], kernel_size=3)
x = self.__conv_block(stage=1, block=0, inputs=x,
filter_depths=[64, 64, 256],
kernel_size=3, stride=2)
x = self.__identity_block(stage=1, block=1, inputs=x,
filter_depths=[64, 64, 256], kernel_size=3)
x = self.__identity_block(stage=1, block=2, inputs=x,
filter_depths=[64, 64, 256], kernel_size=3)
x = self.__identity_block(stage=1, block=3, inputs=x,
filter_depths=[64, 64, 256], kernel_size=3)
x = self.__conv_block(stage=2, block=0, inputs=x,
filter_depths=[128, 128, 512],
kernel_size=3, stride=2)
x = self.__identity_block(stage=2, block=1, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=2, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=3, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=4, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__identity_block(stage=2, block=5, inputs=x,
filter_depths=[128, 128, 512], kernel_size=3)
x = self.__conv_block(stage=3, block=0, inputs=x,
filter_depths=[256, 256, 1024], kernel_size=3,
stride=2)
x = self.__identity_block(stage=3, block=1, inputs=x,
filter_depths=[256, 256, 1024],
kernel_size=3)
x = self.__identity_block(stage=3, block=2, inputs=x,
filter_depths=[256, 256, 1024],
kernel_size=3)
- x = tf.layers.average_pooling2d(x, pool_size=7, strides=1,
+ x = tf.compat.v1.layers.average_pooling2d(x, pool_size=7, strides=1,
padding="valid", name="pool")
x = tf.reshape(x, shape=(-1, 1024))
self.logits = self.__fully_connected(name="fc_nsfw",
inputs=x, num_outputs=2)
self.predictions = tf.nn.softmax(self.logits, name="predictions")
"""Get weights for layer with given name
"""
def __get_weights(self, layer_name, field_name):
if not layer_name in self.weights:
raise ValueError("No weights for layer named '{}' found"
.format(layer_name))
w = self.weights[layer_name]
if not field_name in w:
raise (ValueError("No entry for field '{}' in layer named '{}'"
.format(field_name, layer_name)))
return w[field_name]
"""Layer creation and weight initialization
"""
def __fully_connected(self, name, inputs, num_outputs):
- return tf.layers.dense(
+ return tf.compat.v1.layers.dense(
inputs=inputs, units=num_outputs, name=name,
- kernel_initializer=tf.constant_initializer(
+ kernel_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "weights"), dtype=tf.float32),
- bias_initializer=tf.constant_initializer(
+ bias_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "biases"), dtype=tf.float32))
def __conv2d(self, name, inputs, filter_depth, kernel_size, stride=1,
padding="same", trainable=False):
if padding.lower() == 'same' and kernel_size > 1:
if kernel_size > 1:
oh = inputs.get_shape().as_list()[1]
h = inputs.get_shape().as_list()[1]
p = int(math.floor(((oh - 1) * stride + kernel_size - h)//2))
- inputs = tf.pad(inputs,
- [[0, 0], [p, p], [p, p], [0, 0]],
- 'CONSTANT')
+ inputs = tf.pad(tensor=inputs,
+ paddings=[[0, 0], [p, p], [p, p], [0, 0]],
+ mode='CONSTANT')
else:
raise Exception('unsupported kernel size for padding: "{}"'
.format(kernel_size))
- return tf.layers.conv2d(
+ return tf.compat.v1.layers.conv2d(
inputs, filter_depth,
kernel_size=(kernel_size, kernel_size),
strides=(stride, stride), padding='valid',
activation=None, trainable=trainable, name=name,
- kernel_initializer=tf.constant_initializer(
+ kernel_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "weights"), dtype=tf.float32),
- bias_initializer=tf.constant_initializer(
+ bias_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "biases"), dtype=tf.float32))
def __batch_norm(self, name, inputs, training=False):
- return tf.layers.batch_normalization(
+ return tf.compat.v1.layers.batch_normalization(
inputs, training=training, epsilon=self.bn_epsilon,
- gamma_initializer=tf.constant_initializer(
+ gamma_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "scale"), dtype=tf.float32),
- beta_initializer=tf.constant_initializer(
+ beta_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "offset"), dtype=tf.float32),
- moving_mean_initializer=tf.constant_initializer(
+ moving_mean_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "mean"), dtype=tf.float32),
- moving_variance_initializer=tf.constant_initializer(
+ moving_variance_initializer=tf.compat.v1.constant_initializer(
self.__get_weights(name, "variance"), dtype=tf.float32),
name=name)
"""ResNet blocks
"""
def __conv_block(self, stage, block, inputs, filter_depths,
kernel_size=3, stride=2):
filter_depth1, filter_depth2, filter_depth3 = filter_depths
conv_name_base = "conv_stage{}_block{}_branch".format(stage, block)
bn_name_base = "bn_stage{}_block{}_branch".format(stage, block)
shortcut_name_post = "_stage{}_block{}_proj_shortcut" \
.format(stage, block)
shortcut = self.__conv2d(
name="conv{}".format(shortcut_name_post), stride=stride,
inputs=inputs, filter_depth=filter_depth3, kernel_size=1,
padding="same"
)
shortcut = self.__batch_norm("bn{}".format(shortcut_name_post),
shortcut)
x = self.__conv2d(
name="{}2a".format(conv_name_base),
inputs=inputs, filter_depth=filter_depth1, kernel_size=1,
stride=stride, padding="same",
)
x = self.__batch_norm("{}2a".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2b".format(conv_name_base),
inputs=x, filter_depth=filter_depth2, kernel_size=kernel_size,
padding="same", stride=1
)
x = self.__batch_norm("{}2b".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2c".format(conv_name_base),
inputs=x, filter_depth=filter_depth3, kernel_size=1,
padding="same", stride=1
)
x = self.__batch_norm("{}2c".format(bn_name_base), x)
x = tf.add(x, shortcut)
return tf.nn.relu(x)
def __identity_block(self, stage, block, inputs,
filter_depths, kernel_size):
filter_depth1, filter_depth2, filter_depth3 = filter_depths
conv_name_base = "conv_stage{}_block{}_branch".format(stage, block)
bn_name_base = "bn_stage{}_block{}_branch".format(stage, block)
x = self.__conv2d(
name="{}2a".format(conv_name_base),
inputs=inputs, filter_depth=filter_depth1, kernel_size=1,
stride=1, padding="same",
)
x = self.__batch_norm("{}2a".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2b".format(conv_name_base),
inputs=x, filter_depth=filter_depth2, kernel_size=kernel_size,
padding="same", stride=1
)
x = self.__batch_norm("{}2b".format(bn_name_base), x)
x = tf.nn.relu(x)
x = self.__conv2d(
name="{}2c".format(conv_name_base),
inputs=x, filter_depth=filter_depth3, kernel_size=1,
padding="same", stride=1
)
x = self.__batch_norm("{}2c".format(bn_name_base), x)
x = tf.add(x, inputs)
return tf.nn.relu(x)
diff --git a/tools/create_predict_request.py b/tools/create_predict_request.py
index f1b4f0b..627048e 100644
--- a/tools/create_predict_request.py
+++ b/tools/create_predict_request.py
@@ -1,76 +1,76 @@
import base64
import json
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model.signature_constants import PREDICT_INPUTS
import os
import sys
sys.path.append((os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..'))))
from image_utils import create_tensorflow_image_loader
from image_utils import create_yahoo_image_loader
from model import InputType
IMAGE_LOADER_TENSORFLOW = "tensorflow"
IMAGE_LOADER_YAHOO = "yahoo"
# Thanks to https://stackoverflow.com/a/47626762
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
"""Generates a json prediction request suitable for consumption by a model
generated with 'export-model.py' and deployed on either ml-engine or tensorflow-serving
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="Path to the input image file")
parser.add_argument("-i", "--input_type", required=True,
default=InputType.TENSOR.name.lower(),
help="Input type",
choices=[InputType.TENSOR.name.lower(),
InputType.BASE64_JPEG.name.lower()])
parser.add_argument("-l", "--image_loader", required=False,
default=IMAGE_LOADER_YAHOO,
help="Image loading mechanism. Only relevant when using input_type 'tensor'",
choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])
parser.add_argument("-t", "--target", required=True,
choices=['ml-engine', 'tf-serving'],
help="Create json request for ml-engine or tensorflow-serving")
args = parser.parse_args()
target = args.target
input_type = InputType[args.input_type.upper()]
image_data = None
if input_type == InputType.TENSOR:
fn_load_image = None
if args.image_loader == IMAGE_LOADER_TENSORFLOW:
- with tf.Session() as sess:
+ with tf.compat.v1.Session() as sess:
fn_load_image = create_tensorflow_image_loader(sess)
- sess.run(tf.global_variables_initializer())
+ sess.run(tf.compat.v1.global_variables_initializer())
image_data = fn_load_image(args.input_file)[0]
else:
- image_data = create_yahoo_image_loader(tf.Session(graph=tf.Graph()))(args.input_file)[0]
+ image_data = create_yahoo_image_loader(tf.compat.v1.Session(graph=tf.Graph()))(args.input_file)[0]
elif input_type == InputType.BASE64_JPEG:
import base64
image_data = base64.urlsafe_b64encode(open(args.input_file, "rb").read()).decode("ascii")
if target == "ml-engine":
print(json.dumps({PREDICT_INPUTS: image_data}, cls=NumpyEncoder))
elif target == "tf-serving":
print(json.dumps({"instances": [image_data]}, cls=NumpyEncoder))
diff --git a/tools/export_graph.py b/tools/export_graph.py
index 5b5f7cc..9c6411d 100644
--- a/tools/export_graph.py
+++ b/tools/export_graph.py
@@ -1,123 +1,123 @@
import os
import sys
import argparse
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
sys.path.append((os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..'))))
from model import OpenNsfwModel, InputType
"""Exports the graph so it can be imported via import_graph_def
The exported model takes an base64 encoded string tensor as input
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("target", help="output directory")
parser.add_argument("-m", "--model_weights", required=True,
help="Path to trained model weights file")
parser.add_argument("-i", "--input_type", required=True,
default=InputType.TENSOR.name.lower(),
help="Input type",
choices=[InputType.TENSOR.name.lower(),
InputType.BASE64_JPEG.name.lower()])
parser.add_argument("-o", "--optimize", action='store_true',
default=False,
help="Optimize graph for inference")
parser.add_argument("-f", "--freeze", action='store_true',
required=False, default=False,
help="Freeze graph: convert variables to ops")
parser.add_argument("-t", "--text", action='store_true',
required=False, default=False,
help="Write graph as binary (.pb) or text (pbtext)")
args = parser.parse_args()
model = OpenNsfwModel()
export_base_path = args.target
do_freeze = args.freeze
do_optimize = args.optimize
as_binary = not args.text
input_type = InputType[args.input_type.upper()]
input_node_name = 'input'
output_node_name = 'predictions'
base_name = 'open_nsfw'
checkpoint_path = os.path.join(export_base_path, base_name + '.ckpt')
if as_binary:
graph_name = base_name + '.pb'
else:
graph_name = base_name + '.pbtxt'
graph_path = os.path.join(export_base_path, graph_name)
frozen_graph_path = os.path.join(export_base_path,
'frozen_' + graph_name)
optimized_graph_path = os.path.join(export_base_path,
'optimized_' + graph_name)
- with tf.Session() as sess:
+ with tf.compat.v1.Session() as sess:
model.build(weights_path=args.model_weights,
input_type=input_type)
- sess.run(tf.global_variables_initializer())
+ sess.run(tf.compat.v1.global_variables_initializer())
- saver = tf.train.Saver()
+ saver = tf.compat.v1.train.Saver()
saver.save(sess, save_path=checkpoint_path)
print('Checkpoint exported to {}'.format(checkpoint_path))
- tf.train.write_graph(sess.graph_def, export_base_path, graph_name,
+ tf.io.write_graph(sess.graph_def, export_base_path, graph_name,
as_text=not as_binary)
print('Graph exported to {}'.format(graph_path))
if do_freeze:
print('Freezing graph...')
freeze_graph.freeze_graph(
input_graph=graph_path, input_saver='',
input_binary=as_binary, input_checkpoint=checkpoint_path,
output_node_names=output_node_name,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph=frozen_graph_path, clear_devices=True,
initializer_nodes='')
print('Frozen graph exported to {}'.format(frozen_graph_path))
graph_path = frozen_graph_path
if do_optimize:
print('Optimizing graph...')
- input_graph_def = tf.GraphDef()
+ input_graph_def = tf.compat.v1.GraphDef()
- with tf.gfile.Open(graph_path, 'rb') as f:
+ with tf.io.gfile.GFile(graph_path, 'rb') as f:
data = f.read()
input_graph_def.ParseFromString(data)
output_graph_def =\
optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
[input_node_name],
[output_node_name],
tf.float32.as_datatype_enum)
- f = tf.gfile.FastGFile(optimized_graph_path, 'wb')
+ f = tf.compat.v1.gfile.FastGFile(optimized_graph_path, 'wb')
f.write(output_graph_def.SerializeToString())
print('Optimized graph exported to {}'
.format(optimized_graph_path))
diff --git a/tools/export_savedmodel.py b/tools/export_savedmodel.py
index 2579bb1..29e2a2c 100644
--- a/tools/export_savedmodel.py
+++ b/tools/export_savedmodel.py
@@ -1,72 +1,72 @@
import os
import sys
import argparse
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model.signature_def_utils\
import predict_signature_def
from tensorflow.python.saved_model.tag_constants import SERVING
from tensorflow.python.saved_model.signature_constants\
import DEFAULT_SERVING_SIGNATURE_DEF_KEY
from tensorflow.python.saved_model.signature_constants import PREDICT_INPUTS
from tensorflow.python.saved_model.signature_constants import PREDICT_OUTPUTS
sys.path.append((os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..'))))
from model import OpenNsfwModel, InputType
"""Builds a SavedModel which can be used for deployment with
gcloud ml-engine, tensorflow-serving, ...
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("target", help="output directory")
parser.add_argument("-i", "--input_type", required=True,
default=InputType.TENSOR.name.lower(),
help="Input type",
choices=[InputType.TENSOR.name.lower(),
InputType.BASE64_JPEG.name.lower()])
parser.add_argument("-v", "--export_version",
help="export model version",
default="1")
parser.add_argument("-m", "--model_weights", required=True,
help="Path to trained model weights file")
args = parser.parse_args()
model = OpenNsfwModel()
export_base_path = args.target
export_version = args.export_version
input_type = InputType[args.input_type.upper()]
export_path = os.path.join(export_base_path, export_version)
- with tf.Session() as sess:
+ with tf.compat.v1.Session() as sess:
model.build(weights_path=args.model_weights,
input_type=input_type)
- sess.run(tf.global_variables_initializer())
+ sess.run(tf.compat.v1.global_variables_initializer())
builder = saved_model_builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
sess, [SERVING],
signature_def_map={
DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def(
inputs={PREDICT_INPUTS: model.input},
outputs={PREDICT_OUTPUTS: model.predictions}
)
}
)
builder.save()
diff --git a/tools/export_tflite.py b/tools/export_tflite.py
index 30f098d..6c39ee3 100644
--- a/tools/export_tflite.py
+++ b/tools/export_tflite.py
@@ -1,49 +1,49 @@
import os
import sys
import argparse
import tensorflow as tf
sys.path.append((os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..'))))
from model import OpenNsfwModel, InputType
"""Exports a tflite version of tensorflow-open_nsfw
Note: The standard TFLite runtime does not support all required ops when using the base64_jpeg input type.
You will have to implement the missing ones by yourself.
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("target", help="output filename, e.g. 'open_nsfw.tflite'")
parser.add_argument("-i", "--input_type", required=True,
default=InputType.TENSOR.name.lower(),
help="Input type. Warning: base64_jpeg does not work with the standard TFLite runtime since a lot of operations are not supported",
choices=[InputType.TENSOR.name.lower(),
InputType.BASE64_JPEG.name.lower()])
parser.add_argument("-m", "--model_weights", required=True,
help="Path to trained model weights file")
args = parser.parse_args()
model = OpenNsfwModel()
export_path = args.target
input_type = InputType[args.input_type.upper()]
- with tf.Session() as sess:
+ with tf.compat.v1.Session() as sess:
model.build(weights_path=args.model_weights,
input_type=input_type)
- sess.run(tf.global_variables_initializer())
+ sess.run(tf.compat.v1.global_variables_initializer())
converter = tf.compat.v1.lite.TFLiteConverter.from_session(sess, [model.input], [model.predictions])
tflite_model = converter.convert()
with open(export_path, "wb") as f:
f.write(tflite_model)

File Metadata

Mime Type
text/x-diff
Expires
Wed, Sep 10, 11:40 AM (5 h, 30 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
42594
Default Alt Text
(36 KB)

Event Timeline