diff --git a/src/main.py b/src/main.py
index 1ee11d29dc923b7958ed9ed7cbb80a527c74019a..98f385d967d6e7910007b5bb468d28a31b482ba4 100644
--- a/src/main.py
+++ b/src/main.py
@@ -10,6 +10,8 @@ from dataloader_iam import DataLoaderIAM, Batch
 from model import Model, DecoderType
 from preprocessor import Preprocessor
 
+import os
+os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
 
 class FilePaths:
     """Filenames and paths to data."""
diff --git a/src/webserver.py b/src/webserver.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8c43b4e28182df6d26ba6c29515a782f2cacab2
--- /dev/null
+++ b/src/webserver.py
@@ -0,0 +1,40 @@
+from flask import Flask, request, jsonify
+
+import main as htr
+import model as htr_model
+import dataloader_iam as htr_data_loader
+import preprocessor as htr_preprocessor
+import numpy as np
+
+
+app = Flask(__name__)
+
+image_size = 32
+
+model_name = htr_model.Model(htr.char_list_from_file(), htr_model.DecoderType.WordBeamSearch, must_restore=True)
+
+
+@app.route('/predict', methods=['POST'])
+def predict():
+    image_array = np.frombuffer(request.data, dtype=np.uint64)
+    h=image_array[-2]
+    w=image_array[-1]
+    image_array = image_array[:-2]
+    image_array = image_array.reshape((h, w))
+
+
+
+    print(image_array)
+    preprocessor = htr_preprocessor.Preprocessor(htr.get_img_size(), dynamic_width=True, padding=16)
+    processed_image = preprocessor.process_img(image_array)
+    batch = htr_data_loader.Batch([processed_image], None, 1)
+    recognized, probability = htr_model.Model.infer_batch(model_name, batch)
+
+    result = {
+        'recognized': recognized[0],
+    }
+    return jsonify(result)
+
+
+if __name__ == '__main__':
+    app.run(debug=True,port=8000)