# -*-coding:utf-8-*-
from train_lstm import bi_classify_model, multi_classify_model
# from core.data_help import VegDB, load_filter_words, TextFilter, filterDataFrame
from core.data_help import *
import pandas as pd
from tensorflow import keras

   


class LstmPredictModel:
    '''传入一个vegdb用来处理数据
    使用load_model函数来加载模型
    SentiPredict和IndPredict来进行预测
    '''
    def __init__(self, vegdb):
        self.vegdb = vegdb
        self.maxlen = 50
        self.vocab_size = self.vegdb.vocab_size
    

    def load_model(self, name='all'):
        ret = 0
        if name == 'bi' or name == 'all':
            self.bi_model = bi_classify_model(self.vocab_size)
            ret = 1
        if name == 'multi' or name == 'all':
            self.multi_model = multi_classify_model(self.vocab_size)
            ret = 1
        return ret
        
    def SentiPredict(self, df, data_field='Text'):
        '''
        功能：使用binary分类器进行sentiment的predict
        输入：一个DataFrame, 文本列的名字要是Text(见下面一行代码的接口)
        '''
        datas = [i[:self.maxlen] for i in self.vegdb.get_predict_data_from_dataframe(df, data_field=data_field)]
        train_data = keras.preprocessing.sequence.pad_sequences(datas,
                                                                value=0,
                                                                padding='post',
                                                                maxlen=self.maxlen)
        # Restore the weights
        model = self.bi_model
        model.load_weights('model/bi/veg_lstm')
        labels = model.predict(train_data)
        label = ['正向', '负向', '未知']
        '''
        由于数据集不足， 模型实际准确度不高， 所以定义了阈值0.8和0.2,在阈值之间的认为无法确认情感。
        如果数据模型准确度足够高，或者将未知也作为1类构建神经网络可以省略这一步
        '''
        def get_label(p):
            if p >= 0.5:
                return 1
            elif p <= 0.5:
                return 0
            else:
                return 2

        df['PriceSenti'] = pd.Series([label[get_label(lb)] for lb in labels])
        return df

    def IndPredict(self, df, data_field='Text'):
        '''
        使用多分类器，其他同上
        '''
        datas = [i[:self.maxlen] for i in self.vegdb.get_predict_data_from_dataframe(df, data_field=data_field)]
        train_data = keras.preprocessing.sequence.pad_sequences(datas,
                                                                value=0,
                                                                padding='post',
                                                                maxlen=self.maxlen)
        model = self.multi_model
        model.load_weights('model/multi/veg_lstm')
        labels = model.predict(train_data)
        label = ['供给','需求','自然环境','未知']

        def get_label(x):
            threshold = 0.5
            x = list(x)
            maxx = max(x)
            if maxx >= threshold:
                return x.index(maxx)
            else:
                return 3

        df['Indicator'] = pd.Series([label[get_label(lb)] for lb in labels])
        return df




'''
测试代码
'''
def predictArticle(df, text_filter, lpm, data_field='文本'):
    df = text_filter.vegCityFilterDataFrame(df,['文本'])
    lpm.load_model('bi')
    lpm.SentiPredict(df,data_field)
    lpm.load_model('multi')
    lpm.IndPredict(df,data_field)
    df.to_excel('Test.xlsx')
    

if __name__ == "__main__":

    # 加载切词器
    stopwords = load_stop_words()
    sc = SentenceCut(stopwords)
    vegdb = VegDB(sc)
    # 加载过滤器
    text_filter = TextFilter(sc)
    lpm = LstmPredictModel(vegdb)

    # 批量测试
    df = pd.read_excel('corpus/corpus.xlsx')
    print(df.columns)
    print(df.head(1))
    predictArticle(df, text_filter, lpm)
    # ret = lpm.load_model('all')
    
    # if ret:
    #     print('加载模型成功')
    # while True:
    #     q = input().strip()
    #     df = pd.DataFrame(columns=['Text'], data=[q])
    #     if not q:
    #         break
    #     df = text_filter.filterDataFrame(df)
    #     if df.empty:
    #         print("文本需要包含地区和蔬菜")
    #     else:
    #         df = lpm.IndPredict(df)
    #         df = lpm.SentiPredict(df)
    #         print(df)


