How to Open Byte Image using Keras

import gunicorn
from urllib.parse import unquote_plus
import json
import cgi
import sys
import traceback
from backdoria import predict_backdoria_web
from femgen import predict_femgen_web
from penialized import predict_penialized_web
from genyolo import predict_genyolo_web
import numpy as np
from PIL import Image
import io
from timeit import default_timer as timer

predict_backdoria_web.load_model()
predict_femgen_web.load_model()
predict_penialized_web.load_model()
predict_genyolo_web.load_model()

def app(environ, start_response):
    try:
        request_size = int(environ.get('CONTENT_LENGTH', 0))
    except (ValueError):
        request_size = 0

    if request_size == 0:
        response_code = '400 Bad Request'
        response = json.dumps('Zero sized request received').encode('utf-8')
        start_response(response_code, [
            ("Content-Type", "text/plain"),
            ("Content-Length", str(len(response)))
        ])

        return iter([response])

    try:
        start = timer()
        predictions = {}
        form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
        images = {}
        for image_name in form:
            images[image_name] = form[image_name].file.read()
        predictions['backdoria'] = predict_backdoria_web.predict(images)
        predictions['femgen'] = predict_femgen_web.predict(images)
        predictions['penialized'] = predict_penialized_web.predict(images)
        predictions['genyolo'] = predict_genyolo_web.predict(images)
    except:
        response_code = '400 Bad Request'
        error = traceback.format_exc()
        response = json.dumps(error).encode('utf-8')
        start_response(response_code, [
            ("Content-Type", "text/plain"),
            ("Content-Length", str(len(response)))
        ])

        return iter([response])

    response_code = '200 OK'
    predictions['total_time_taken'] = str(timer() - start)
    response = json.dumps(predictions).encode('utf-8')

    start_response(response_code, [
        ("Content-Type", "text/plain"),
        ("Content-Length", str(len(response)))
    ])

    return iter([response])
import os
import tensorflow as tf
import sys
import warnings
import argparse
import traceback
from io import StringIO
import json
from penialized import settings
from timeit import default_timer as timer

warnings.filterwarnings("ignore", category=UserWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

def load_model():
    with tf.gfile.GFile(settings.MODEL_PATH, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='graphdef_penialized')

def predict(images):
    predictions = {
        'status': 'success',
        'msg': '',
        'detections': []
    }

    try:
        start = timer()
        label_lines = [line.rstrip()
                       for line in tf.gfile.GFile(settings.CLASSES_PATH)]

        with tf.Session() as sess:
            for image_name, image_data in images.items():
                softmax_tensor = sess.graph.get_tensor_by_name(
                    'graphdef_penialized/final_result:0')

                sub_predictions = sess.run(
                    softmax_tensor, {'graphdef_penialized/DecodeJpeg/contents:0': image_data})

                # Sort to show labels of first prediction in order of confidence
                top_k = sub_predictions[0].argsort(
                )[-len(sub_predictions[0]):][::-1]

                detected_objects = []
                for node_id in top_k:
                    human_string = label_lines[node_id]
                    score = sub_predictions[0][node_id]
                    detected_objects.append({
                        'class': human_string,
                        'score': float(score)
                    })

                predictions['detections'].append({
                    'image_name': image_name,
                    'detected_objects': detected_objects
                })
        end = timer()
        predictions['time_taken'] = str(end-start)
    except:
        predictions['status'] = 'error'
        predictions['msg'] = traceback.format_exc()
        traceback.print_exc()

    return predictions
image = Image.open(io.BytesIO(image))

if self.model_image_size != (None, None):
    assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
    assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
    boxed_image = letterbox_image(
        image, tuple(reversed(self.model_image_size)))
else:
    new_image_size = (image.width - (image.width % 32),
                        image.height - (image.height % 32))
    boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')

image_data /= 255.
image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

Leave a Reply

Your email address will not be published. Required fields are marked *

This site uses Akismet to reduce spam. Learn how your comment data is processed.

Back To Top