import pandas as pd
import json
import re
from datetime import datetime, timedelta
import torch
from transformers import XLNetForSequenceClassification, XLNetTokenizer, XLNetModel
from transformers import XLNetConfig

import os
import numpy as np

print(f"开始!")
# 定义文本数据
text = [
    {
        "content": "Bienvenido a mi ATT, su código de seguridad es: 589646. No comparta su código de seguridad con nadie.",
        "oppositePhone": "+321",
        "smsTime": "2023-08-13 10:46:06",
        "type": "1"
    },
    {
        "content": "Bienvenido a mi ATT, su código de seguridad es: 650928. No comparta su código de seguridad con nadie.",
        "oppositePhone": "+321",
        "smsTime": "2023-08-12 00:31:57",
        "type": "1"
    },
    {
        "content": "Gracias por la compra de tu Paquete Internet 100 MB. Vigencia al 12/08/2023 01:29:53",
        "oppositePhone": "10090",
        "smsTime": "2023-08-12 01:31:59",
        "type": "1"
    },
    {
        "content": "Te informamos que tu consumo ha llegado al 100% de tu internet. Para información sobre tus opciones de navegación llama al *611 o al 018001010288.",
        "oppositePhone": "10090",
        "smsTime": "2023-08-12 14:40:22",
        "type": "1"
    },
    {
        "content": "[Superprestamos efectivo] Su contraseña de verificación de inicio de sesión en Superprestamos efectivo es 5862.",
        "oppositePhone": "2201711354",
        "smsTime": "2023-08-10 21:52:56",
        "type": "1"
    },
    {
        "content": "Tu saldo está por agotarse. Recarga con tu tarjeta bancaria en padnet.telcel.com/cc/001 or vía SMS en www.telcel.com/0/t2pdpc",
        "oppositePhone": "Telcel",
        "smsTime": "2023-08-12 14:17:23",
        "type": "1"
    }
]


class YourCustomXLNetModel(torch.nn.Module):
    def __init__(self, config):
        super(YourCustomXLNetModel, self).__init__()
        self.xlnet = XLNetModel.from_pretrained(config.xlnet_bath)
        self.classifier = torch.nn.Linear(768, 2)

    def forward(self, input_ids, attention_mask=None, token_type_ids=None):
        out = self.xlnet(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        output = self.classifier(out.last_hidden_state[:, 0, :])
        return output



# 载入停用词
stopwords_es = [line.strip() for line in
                open(r'C:\Users\12706\PycharmProjects\mxg_1017\re_data\stop_words_all.txt', 'r',
                     encoding='utf-8').readlines()]

# 读取 mxg_ls.txt 文件
with open(r'C:\Users\12706\PycharmProjects\mxg_1017\re_data\mxg_jian_low.txt', 'r', encoding='utf-8') as word_file:
    nrly_daik_words = set(word_file.read().lower().split())


def process_data(text, submit_time_str, max_days_difference=7):
    # ...
    print(f"text===>{text}")

    print(f"开始清理 ")
    pattern1 = r"\(.*?\)|\{.*?\}|\[.*?\]"
    pattern2 = r'(https?://[a-zA-Z0-9.?/&=:]*)'

    try:
        sms_data_list = text
        content_list = []

        submit_time = datetime.strptime(submit_time_str, "%Y-%m-%d %H:%M:%S")
        current_date = submit_time

        recent_contents = []

        for sms_data in sms_data_list:
            content = sms_data.get('content', '')
            sms_time_str = sms_data.get('smsTime', '')

            try:
                sms_time = datetime.strptime(sms_time_str, "%Y-%m-%d %H:%M:%S")
            except ValueError:
                continue

            if sms_data.get('type') == '1' and (abs(sms_time - current_date) <= timedelta(days=max_days_difference)):
                content1 = re.sub(pattern1, '', content)
                content2 = re.sub(pattern2, '', content1).lower()

                matching_sentences = content2.split(".")

                for sentence in matching_sentences:
                    for word in nrly_daik_words:
                        if word in sentence:
                            sentence = re.sub(r"[@#%\"&<>,;\\*/:\-?!]", "", sentence)
                            sentence = re.sub(r"[@#$%&<>'_=+,/*:\-?!\n]|x{3,}", "", sentence)
                            words = re.split(" ", sentence)
                            clean_content = " ".join([p for p in words if p not in stopwords_es])
                            recent_contents.append(clean_content)

        if not recent_contents:
            return ""

        combined_recent_content = ' '.join(recent_contents)
        print(f"combined_recent_content===>{combined_recent_content}")
        return combined_recent_content
    except Exception as e:
        return "-1"

def remove_numbers(text):
    text_no_numbers = re.sub(r'\d', "", text)
    text_cleaned = re.sub(r'\s+', " ", text_no_numbers).strip()
    return text_cleaned

class Config(object):
    def __init__(self):
        self.xlnet_bath = r"./xlnet-base-cased"
        self.max_length = 350
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.use_cuda = True


if __name__ == "__main__":
    print("Step 1: 读取数据")
    config = Config()

    submit_time_str = "2023-08-12 00:00:00"
    result = process_data(text, submit_time_str)

    print("完成数据预处理")

    result_without_numbers = remove_numbers(result)

    # 创建 Tokenizer
    print("Step 2: 使用本地tokenizer")
    tokenizer_xlnet = XLNetTokenizer.from_pretrained(r"./xlnet-base-cased")

    # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
    # 加载自己训练的 XLNet 模型
    print("Step 3: 加载自己训练的 XLNet 模型")

    # 指定自己训练模型的路径
    model_path_xlnet = "C:/Users/12706/PycharmProjects/mxg_1017/xl_model/directory/my_model_xlnet_20.bin"

    # # 使用 torch.load 加载模型
    # model_xlnet = torch.load(model_path_xlnet)

    model_xlnet = YourCustomXLNetModel(config)
    model_xlnet.load_state_dict(torch.load(model_path_xlnet, map_location=config.device), strict=False)
    # model_xlnet.load_state_dict(torch.load(model_path_xlnet, map_location=config.device))
    # model_xlnet.predict("hollo")
    # print(model_xlnet)

    ###############
    # 准备数据进行预测
    print("Step 4: 准备数据进行预测")


    input_texts = [result_without_numbers]

    input_tokenizer = tokenizer_xlnet.batch_encode_plus(input_texts,
                                                     add_special_tokens=False,
                                                     padding='max_length',
                                                     truncation=True,
                                                     max_length=config.max_length,
                                                     return_tensors='pt')

    input_ids = input_tokenizer['input_ids']
    attention_mask = input_tokenizer['attention_mask']
    token_type_ids = input_tokenizer['token_type_ids']
    # print(input_ids.shape)
    # print(attention_mask.shape)
    # print(token_type_ids.shape)

    with torch.no_grad():
        outputs = model_xlnet(input_ids=input_ids, attention_mask=attention_mask)

    # logits = outputs.logits
    predicted_class = torch.argmax(outputs, dim=-1).item()

    print("Step 5: 进行预测")
    print("Predicted Class:", predicted_class)

    # 创建一个 DataFrame 包含预测的文本和结果
    data = {'Text': [result_without_numbers], 'Predicted_Class': [predicted_class]}
    df = pd.DataFrame(data)

    # 保存 DataFrame 到 CSV 文件
    df.to_csv('./data_info/predicted_results.csv', index=False)

    print("预测结果已保存到 predicted_results.csv 文件")



