From 1c614c730f081f9769a4e3b2b3a9989b1ed0f1c2 Mon Sep 17 00:00:00 2001
From: merschie <famer101@hhu.de>
Date: Wed, 5 Jul 2023 16:10:57 +0200
Subject: [PATCH] add webserver

---
 src/main.py      |  2 ++
 src/webserver.py | 40 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 42 insertions(+)
 create mode 100644 src/webserver.py

diff --git a/src/main.py b/src/main.py
index 1ee11d2..98f385d 100644
--- a/src/main.py
+++ b/src/main.py
@@ -10,6 +10,8 @@ from dataloader_iam import DataLoaderIAM, Batch
 from model import Model, DecoderType
 from preprocessor import Preprocessor
 
+import os
+os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
 
 class FilePaths:
     """Filenames and paths to data."""
diff --git a/src/webserver.py b/src/webserver.py
new file mode 100644
index 0000000..b8c43b4
--- /dev/null
+++ b/src/webserver.py
@@ -0,0 +1,40 @@
+from flask import Flask, request, jsonify
+
+import main as htr
+import model as htr_model
+import dataloader_iam as htr_data_loader
+import preprocessor as htr_preprocessor
+import numpy as np
+
+
+app = Flask(__name__)
+
+image_size = 32
+
+model_name = htr_model.Model(htr.char_list_from_file(), htr_model.DecoderType.WordBeamSearch, must_restore=True)
+
+
+@app.route('/predict', methods=['POST'])
+def predict():
+    image_array = np.frombuffer(request.data, dtype=np.uint64)
+    h=image_array[-2]
+    w=image_array[-1]
+    image_array = image_array[:-2]
+    image_array = image_array.reshape((h, w))
+
+
+
+    print(image_array)
+    preprocessor = htr_preprocessor.Preprocessor(htr.get_img_size(), dynamic_width=True, padding=16)
+    processed_image = preprocessor.process_img(image_array)
+    batch = htr_data_loader.Batch([processed_image], None, 1)
+    recognized, probability = htr_model.Model.infer_batch(model_name, batch)
+
+    result = {
+        'recognized': recognized[0],
+    }
+    return jsonify(result)
+
+
+if __name__ == '__main__':
+    app.run(debug=True,port=8000)
-- 
GitLab