File size: 1,531 Bytes
9ef53fc
 
18b26de
f8d0620
 
 
 
 
 
3a26397
f8d0620
 
 
b676e30
f8d0620
336ed91
f8d0620
73aee21
b7686db
73aee21
f007dbf
 
 
 
 
 
 
 
f8d0620
 
 
 
 
 
 
bf7581f
d0d1d93
ce69f2b
d0d1d93
 
 
de25959
 
 
 
 
 
 
 
f8d0620
924de23
de25959
f007dbf
d0d1d93
f007dbf
d0d1d93
 
 
3babb37
 
f8d0620
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# import os
# os.environ['HF_HOME'] = '/transf/cache/'
import sys
import pandas as pd
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
import torch
import numpy as np
from flask import Flask, render_template, request, jsonify
import transformers

app = Flask(__name__)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

new_model = AutoModelForSequenceClassification.from_pretrained('./model/').to(device)


tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased',model_max_length=128)

label_info = {
'LABEL_1':'digestive system diseases',
'LABEL_2':'cardiovascular diseases' ,
'LABEL_3':'neoplasms',
'LABEL_4':'nervous system diseases',
'LABEL_5':'general pathological conditions'
}


@app.route('/')
def index():
    return render_template('index.html')

@app.route('/predict', methods=['POST','GET'])
def predict():
    
    # try:

    input_text = request.get_json(force=True)['input']
    print(input_text,file=sys.stderr)

    pipeline = transformers.pipeline(
        "sentiment-analysis",
        model = new_model.to(device),
        tokenizer = tokenizer,
        max_length = 128,
        device=device,
        function_to_apply='softmax'
    )
    
    
    y_pred = pipeline(input_text)
    predictions = y_pred[0]['label']
    
    return jsonify({'prediction': (label_info[predictions])})
    
    # except Exception as e:
    #     return jsonify({'error': str(e)})


if __name__ == '__main__':
    app.run(host="0.0.0.0",port=7860)