Skip to content
Snippets Groups Projects
Select Git revision
  • 9c82b06b7008e0622faa23b4d44bc23c6dbde6bc
  • master default protected
  • release/1.1.4
  • release/1.1.3
  • release/1.1.1
  • 1.4.2
  • 1.4.1
  • 1.4.0
  • 1.3.0
  • 1.2.1
  • 1.2.0
  • 1.1.5
  • 1.1.4
  • 1.1.3
  • 1.1.1
  • 1.1.0
  • 1.0.9
  • 1.0.8
  • 1.0.7
  • v1.0.5
  • 1.0.5
21 results

BDefinitionsFinder.java

Blame
  • Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    webserver.py 5.46 KiB
    from flask import Flask, request, jsonify
    
    import main as htr
    import model as htr_model
    import dataloader_iam as htr_data_loader
    import preprocessor as htr_preprocessor
    import numpy as np
    from word_beam_search import WordBeamSearch
    import base64
    import tensorflow as tf
    
    app = Flask(__name__)
    
    image_size = 32
    
    model_name = htr_model.Model(htr.char_list_from_file(), htr_model.DecoderType.LexiconSearch, must_restore=True)
    model_name.setup_ctc
    csv_path = '../tns.csv'
    
    char_list = htr.char_list_from_file()
    chars = ''.join(char_list)
    word_chars = open('../model/wordCharList.txt').read().splitlines()[0]
    matrikel_numbers = []
    
    @app.route('/getNames', methods=['GET'])
    def getNames():
        return jsonify(matrikel_numbers)
    
    
    
    
    @app.route('/predictNachname', methods=['POST'])
    def predictNach():
        image_array = np.frombuffer(request.data, dtype=np.uint64)
        h=image_array[-2]
        w=image_array[-1]
        image_array = image_array[:-2]
        image_array = image_array.reshape((h, w))
        preprocessor = htr_preprocessor.Preprocessor(htr.get_img_size(), dynamic_width=True, padding=16)
        processed_image = preprocessor.process_img(image_array)
        batch = htr_data_loader.Batch([processed_image], None, 1)
    
        #change corpus for name
        model_name.corpus = open('../data/Nachname.txt').read().split()
        #model_name.decoder = WordBeamSearch(50, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'),word_chars.encode('utf8'))
        recognized, probability = htr_model.Model.infer_batch(model_name, batch)
    
        """     #convert corpus to list, split at space
        corpus = model_name.corpus
        result_list=[]
        print(recognized)
        for name in recognized:
            indecies = []
            for i in range(len(corpus)):
                if name == corpus[i]:
                    indecies.append(i)
            if len(indecies) == 0:
                indecies.append(-1)
            else:
                result_list.append((name, indecies))
        if len(result_list) == 0:
            result_list.append((-1, -1)) """
    
    
    
        processed_image = processed_image + 0.5
        processed_image = processed_image * 255
        #rotate image 90 degrees
        processed_image = np.rot90(processed_image,3)
        #mirror image
        processed_image = np.fliplr(processed_image)
        height, width = processed_image.shape
        image = np.reshape(processed_image,(height*width))
        image = np.append(image,height)
        image = np.append(image,width)
        image = image.astype(np.uint64)
        array_bytes = image.tobytes()
        image_base64 = base64.b64encode(array_bytes).decode('utf-8')
    
    
    
    
        result = {
            'recognized': recognized,
            'image': image_base64
        }
        return jsonify(result)
    
    
    
    @app.route('/predictVorname', methods=['POST'])
    def predictVor():
        image_array = np.frombuffer(request.data, dtype=np.uint64)
        h=image_array[-2]
        w=image_array[-1]
        image_array = image_array[:-2]
        image_array = image_array.reshape((h, w))
        preprocessor = htr_preprocessor.Preprocessor(htr.get_img_size(), dynamic_width=True, padding=16)
        processed_image = preprocessor.process_img(image_array)
        batch = htr_data_loader.Batch([processed_image], None, 1)
    
    
        #change corpus for name
        model_name.corpus = open('../data/Vorname.txt').read().split()
    
    
        
    
        #model_name.decoder = WordBeamSearch(50, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'),word_chars.encode('utf8'))
        recognized, probability = htr_model.Model.infer_batch(model_name, batch)
    
    
        """     corpus = model_name.corpus
        result_list=[]
        for name in recognized:
            indecies = []
            for i in range(len(corpus)):
                if name == corpus[i]:
                    indecies.append(i)
            if len(indecies) == 0:
                indecies.append(-1)
            else:
                result_list.append((name, indecies))
        
        if len(result_list) == 0:
            result_list.append(('KeinName', -1)) """
    
    
    
        processed_image = processed_image + 0.5
        processed_image = processed_image * 255
        #rotate image -90 degrees
        processed_image = np.rot90(processed_image,3)
        #mirror image
        processed_image = np.fliplr(processed_image)
        height, width = processed_image.shape
        image = np.reshape(processed_image,(height*width))
        image = np.append(image,height)
        image = np.append(image,width)
        image = image.astype(np.uint64)
        array_bytes = image.tobytes()
        image_base64 = base64.b64encode(array_bytes).decode('utf-8')
    
    
    
    
        result = {
            'recognized': recognized,
            'image': image_base64
        }
        return jsonify(result)
    
    
    def split_Student_Names():
        #csv looks like: Vorname;Nachname;Matrikelnummer
        #need to put Vorname in one list, Nachname in another
    
        #create /data/Vorname.txt and /data/Nachname.txt
        vorname_file = open('../data/Vorname.txt', 'w')
        nachname_file = open('../data/Nachname.txt', 'w')
        matrikelnummer_file = open('../data/Matrikelnummer.txt', 'w')
        numbers=[]
        with open(csv_path, 'r') as csv_file:
            lines = csv_file.readlines()
            for line in lines[1:]:
                line = line.split(',')
                vorname = line[2][:-1]
                nachname = line[1]
                matrikelnummer = line[0]
                print(vorname, nachname, matrikelnummer)
                vorname_file.write(vorname + " ")
                nachname_file.write(nachname + " ")
                matrikelnummer_file.write(matrikelnummer + " ")
                numbers.append(matrikelnummer)
        return numbers
    
    
    
    if __name__ == '__main__':
        #split csv file into Vorname, Nachname and Matrikelnummer
        matrikel_numbers = split_Student_Names()
    
        app.run(debug=True,port=8000)