from autooffer.offer import datadeplay
from autooffer.offer import datadealwith
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.utils import np_utils
import numpy as np
import os

def creatModel():

    data,dict,_,Y = datadeplay.readdata()
    ## 创建Model  两层LSTM  Dropout保留率
    model = Sequential()

    model.add(LSTM(700, input_shape=(data.shape[1], data.shape[2]), return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(700))
    model.add(Dropout(0.2))
    model.add(Dense(Y.shape[1], activation='softmax'))
    model.compile(optimizer="adam", loss="categorical_crossentropy")
    # model.fit(data, Y)
    model.fit(data, Y, epochs=500, batch_size=50)
    model.save_weights("../data/text.kModel")


def testmethods():
    testStr = input("请输入测试字符串:")
    print(testStr)
    _, dict, dicstr, _ = datadeplay.readdata()
    X= datadeplay.teststrconversionint(testStr,dicstr,dict)
    print(X)
    data, dict, _, Y = datadeplay.readdata()
    model = Sequential()

    model.add(LSTM(700, input_shape=(data.shape[1], data.shape[2]), return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(700))
    model.add(Dropout(0.2))
    model.add(Dense(Y.shape[1], activation='softmax'))
    model.compile(optimizer="adam", loss="categorical_crossentropy")
    model.load_weights("../data/text.kModel")
    prediction = model.predict(X, verbose=0)
    print(prediction)
    index = np.argmax(prediction)
    print(index)
    result = dict[index]
    print(result)

def euclidean(p,q):
    # same = 0
    # for i in p:
    #     if i in q:
    #         same += 1


    if len(p) < len(q):
        e = sum([(p[i] - q[i]) ** 2 for i in range(len(p))])
    else:
        e = sum([(p[i] - q[i]) ** 2 for i in range(len(q))])
    return 1/(1+e**.5)

def text():
    results = []
    testStr = input("请输入测试字符串:")
    print(testStr)
    data = datadealwith.datacontent(testStr)
    print(data.dataArr_vectors)
    # input_str_vectors = datadealwith.datacontent.input_vectors
    # data = datadealwith.datacontent.get_data_vectors()
    i = 0
    for key,value in data.dataArr_vectors.items():
        print(value)
        print(data.input_vectors[0])
        data.dataArr_vectors[key] = i
        print(euclidean(value,data.input_vectors[0]))
        if len(value) < len(data.input_vectors[0]):
            results.append(euclidean(value,data.input_vectors[0]))
        else:
            results.append(euclidean(data.input_vectors[0],value))
        i += 1
    textstr = list(data.dataArr_vectors.keys())[list(data.dataArr_vectors.values()).index(results.index(max(results)))]
    print(textstr)






if __name__ == "__main__":
    text()
    # ## 判断模型文件是否存在
    # model = Sequential()
    # if os.path.exists("../data/text.kModel"):
    #     print("11111")
    #     testmethods()
    #     # creatModel()
    # else:
    #     print("22222")
    #     creatModel()

