import numpy as np
from datetime import datetime
import string
from tensorflow import keras

import re
import json

from nltk.tokenize import word_tokenize

from nltk.stem import WordNetLemmatizer


english_stopwords = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "you're", "you've", "you'll", "you'd", "your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "she's", "her", "hers", "herself", "it", "it's", "its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that", "that'll", "these", "those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before", "after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t", "can", "will", "just", "don", "don't", "should", "should've", "now", "d", "ll", "m", "o", "re", "ve", "y", "ain", "aren", "aren't", "couldn", "couldn't", "didn", "didn't", "doesn", "doesn't", "hadn", "hadn't", "hasn", "hasn't", "haven", "haven't", "isn", "isn't", "ma", "mightn", "mightn't", "mustn", "mustn't", "needn", "needn't", "shan", "shan't", "shouldn", "shouldn't", "wasn", "wasn't", "weren", "weren't", "won", "won't", "wouldn", "wouldn't"]
spanish_stopwords = ["de", "la", "que", "el", "en", "y", "a", "los", "del", "se", "las", "por", "un", "para",
                     "con", "no", "una", "su", "al", "lo", "como", "más", "pero", "sus", "le", "ya", "o",
                     "este", "sí", "porque", "esta", "entre", "cuando", "muy", "sin", "sobre", "también", "me",
                     "hasta", "hay", "donde", "quien", "desde", "todo", "nos", "durante", "todos", "uno", "les",
                     "ni", "contra", "otros", "ese", "eso", "ante", "ellos", "e", "esto", "mí", "antes",
                     "algunos", "qué", "unos", "yo", "otro", "otras", "otra", "él", "tanto", "esa", "estos",
                     "mucho", "quienes", "nada", "muchos", "cual", "poco", "ella", "estar", "estas", "algunas",
                     "algo", "nosotros", "mi", "mis", "tú", "te", "ti", "tu", "tus", "ellas", "nosotras",
                     "vosotros", "vosotras", "os", "mío", "mía", "míos", "mías", "tuyo", "tuya", "tuyos",
                     "tuyas", "suyo", "suya", "suyos", "suyas", "nuestro", "nuestra", "nuestros", "nuestras",
                     "vuestro", "vuestra", "vuestros", "vuestras", "esos", "esas", "estoy", "estás", "está",
                     "estamos", "estáis", "están", "esté", "estés", "estemos", "estéis", "estén", "estaré",
                     "estarás", "estará", "estaremos", "estaréis", "estarán", "estaría", "estarías",
                     "estaríamos", "estaríais", "estarían", "estaba", "estabas", "estábamos", "estabais",
                     "estaban", "estuve", "estuviste", "estuvo", "estuvimos", "estuvisteis", "estuvieron",
                     "estuviera", "estuvieras", "estuviéramos", "estuvierais", "estuvieran", "estuviese",
                     "estuvieses", "estuviésemos", "estuvieseis", "estuviesen", "estando", "estado", "estada",
                     "estados", "estadas", "estad", "he", "has", "ha", "hemos", "habéis", "han", "haya",
                     "hayas", "hayamos", "hayáis", "hayan", "habré", "habrás", "habrá", "habremos", "habréis",
                     "habrán", "habría", "habrías", "habríamos", "habríais", "habrían", "había", "habías",
                     "habíamos", "habíais", "habían", "hube", "hubiste", "hubo", "hubimos", "hubisteis",
                     "hubieron", "hubiera", "hubieras", "hubiéramos", "hubierais", "hubieran", "hubiese",
                     "hubieses", "hubiésemos", "hubieseis", "hubiesen", "habiendo", "habido", "habida",
                     "habidos", "habidas", "soy", "eres", "es", "somos", "sois", "son", "sea", "seas", "seamos",
                     "seáis", "sean", "seré", "serás", "será", "seremos", "seréis", "serán", "sería", "serías",
                     "seríamos", "seríais", "serían", "era", "eras", "éramos", "erais", "eran", "fui", "fuiste",
                     "fue", "fuimos", "fuisteis", "fueron", "fuera", "fueras", "fuéramos", "fuerais", "fueran",
                     "fuese", "fueses", "fuésemos", "fueseis", "fuesen", "sintiendo", "sentido", "sentida",
                     "sentidos", "sentidas", "siente", "sentid", "tengo", "tienes", "tiene", "tenemos",
                     "tenéis", "tienen", "tenga", "tengas", "tengamos", "tengáis", "tengan", "tendré",
                     "tendrás", "tendrá", "tendremos", "tendréis", "tendrán", "tendría", "tendrías",
                     "tendríamos", "tendríais", "tendrían", "tenía", "tenías", "teníamos", "teníais", "tenían",
                     "tuve", "tuviste", "tuvo", "tuvimos", "tuvisteis", "tuvieron", "tuviera", "tuvieras",
                     "tuviéramos", "tuvierais", "tuvieran", "tuviese", "tuvieses", "tuviésemos", "tuvieseis",
                     "tuviesen", "teniendo", "tenido", "tenida", "tenidos", "tenidas", "tened"]

num_keywords = ['ahora', 'ample', 'app', 'aprobada', 'aprobado', 'aqui', 'baja',
       'bajo', 'beneficios', 'bit', 'buen', 'cantidad', 'cash', 'clic',
       'click', 'cliente', 'codigo', 'com', 'comparta', 'comparte',
       'credito', 'cuenta', 'código', 'da', 'deposito', 'descarga',
       'descuento', 'dia', 'dinero', 'efectivo', 'enlace', 'estimado',
       'favor', 'fecha', 'felicidades', 'gracias', 'haga', 'haz', 'hola',
       'hora', 'hoy', 'info', 'informacion', 'inmediato', 'interes',
       'jskw', 'limite', 'linea', 'llamada', 'ly', 'mayor', 'mil', 'min',
       'minimo', 'minutos', 'mismo', 'monto', 'mx', 'nadie', 'numero',
       'obten', 'obtener', 'ofrecemos', 'onelink', 'osjw', 'paga',
       'pagar', 'pago', 'peso', 'prestamo', 'prestamos', 'préstamo',
       'puede', 'puedes', 'rapido', 'recarga', 'recibir', 'recuerda',
       'saldo', 'seguridad', 'si', 'sido', 'simple', 'sm', 'solicita',
       'solicitud', 'solo', 'sr', 'tarjeta', 'telcel', 'tiempo', 'unotv',
       'usted', 'usuario', 'valido', 'vence', 'verificacion', 'video',
       'vip', 'wbzd']

num_time_intervals = 24

num_time_diffs = 90


keyword_to_index = {keyword: index for index, keyword in enumerate(num_keywords)}
lemmatizer = WordNetLemmatizer()


# 加载保存的模型
loaded_model = keras.models.load_model("static/model_04")

def word_preprocessing(txt):
    # 数字处理
    txt = re.sub('[0-9]', ' ', txt)

    # 去除网址
    txt = re.sub('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', ' ', txt)

    # 统一小写
    txt = txt.lower()
    txt = re.sub('\n', ' ', txt)

    # 去除标点
    signos_puntuacion = string.punctuation

    # 创建一个翻译表，将标点符号映射为空格
    tabla_de_traduccion = txt.maketrans(signos_puntuacion, ' ' * len(signos_puntuacion))

    # 使用translate方法将标点符号替换为空格
    txt = txt.translate(tabla_de_traduccion)

    # 分词
    words = word_tokenize(txt)

    # 遍历词汇并进行词形还原
    reconstructed_words = []
    for word in words:
        # 使用词形还原器将词汇还原为基本形式（默认为英语）
        lemma = lemmatizer.lemmatize(word)
        reconstructed_words.append(lemma)

    # 去除停用词
    stop_words = set()
    languages = ["english", "spanish"]
    for language in languages:
        if language == "english":
            stop_words.update(english_stopwords)
        elif language == "spanish":
            stop_words.update(set(spanish_stopwords))

    filtered_words = [word for word in reconstructed_words if word not in stop_words]

    return filtered_words


def process_model(data):

    data_tensor = np.zeros((num_time_diffs, num_time_intervals, len(num_keywords)))

    data = json.loads(data)
    submit_time = data['submit_time']

    f_sms_data = data['f_sms_data']

    try:
        submit_time = datetime.strptime(submit_time, "%Y-%m-%d %H:%M:%S")

        f_sms_data = json.loads(f_sms_data)

        for sms in f_sms_data:

            type_ = sms["type"]
            if type_ == '1':
                try:
                    sms_time = datetime.strptime(sms["smsTime"], "%Y-%m-%d %H:%M:%S")

                    time_diff = (submit_time - sms_time).days

                    if 0 <= time_diff < num_time_diffs:
                        hour = sms_time.hour
                        if 'content' in sms:
                            tokens = word_preprocessing(sms["content"])

                            for keyword in num_keywords:
                                if keyword in tokens:
                                    keyword_index = keyword_to_index[keyword]
                                    data_tensor[
                                        time_diff, num_time_intervals - hour - 1, keyword_index] += 1

                        else:
                            print('没有短信。。。')

                except ValueError:
                    print("日期处理时出现错误:", sms["smsTime"])
    except:
        print("进件时间出现错误:", submit_time)

    # 使用加载的模型进行预测
    input_data = np.array([data_tensor])
    return loaded_model.predict(input_data)[0][0]

