import pandas as pd
import torch
import shutil
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertModel, BertPreTrainedModel, BertTokenizer, get_linear_schedule_with_warmup
from torch.optim import AdamW
import numpy as np
import time
import os
import torch.nn as nn
import sys


global EPOCHS, BATCH_SIZE_RATIO, SEQUENCE_LEN, LEARNING_RATE, TOKENIZER, MODEL_NAME
import torch.nn.functional as F
from openpyxl import load_workbook

projectnum = 2

RESULT_FILE_PATH = r'model/open.xlsx'

wb = load_workbook(RESULT_FILE_PATH)
sheet = wb.active
EPOCHS = 40
BATCH_SIZE_RATIO = 0.3
SEQUENCE_LEN = 20
LEARNING_RATE = 5e-4
# define device
global DEVICE
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

# define files to be used
global DATA_PATH
DATA_PATH = r'model/open/'
ROW_MAE, ROW_MMRE, ROW_PRED = 3, 4, 5

TRAIN_TEST_FILE_PAIRS = os.listdir(DATA_PATH)
TRAIN_TEST_FILE_PAIRS.sort(key=str.lower)

OUTPUT = ''
MODEL = None
DYNAMIC_BATCH = True
BATCH_SIZE = None
WITHIN_PROJECT = None
MAE_RECORDS = []
MDAE_RECORDS = []


def data_processing(file_pair):
    global BATCH_SIZE, BATCH_SIZE_RATIO, DATA_PATH, DYNAMIC_BATCH

    # 读取数据
    fname = DATA_PATH + file_pair
    train_data = prepare_dataframe(fname)

    # 直接使用整个数据集，不再划分
    train_ex = train_data.iloc[:, 0:3]  # 取前3列作为训练特征
    train_text = train_data['Summary']  # 假设文本数据在 'Summary' 列
    train_labels = train_data['Custom field (Story Points)']  # 假设标签在 'Custom field (Story Points)'

    # 动态调整 batch size
    if DYNAMIC_BATCH:
        BATCH_SIZE = int(len(train_text) * BATCH_SIZE_RATIO)

    # tokenization
    tokens_train = tokenization(train_text.tolist())

    # 转换为 tensor 格式
    train_seq = torch.tensor(tokens_train['input_ids'])
    train_ex = torch.tensor(np.array(train_ex))
    train_labels = torch.tensor(train_labels.tolist()).type(torch.FloatTensor)

    # 组合输入数据
    train_seq = torch.cat((train_ex, train_seq), dim=1)

    # 生成数据加载器
    train_dataloader = prepare_dataloader(train_seq, train_labels, sampler_type='random')

    return file_pair, train_dataloader

def tokenization(text_list):
    tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
    return tokenizer(text_list, truncation=True, max_length=SEQUENCE_LEN, padding='max_length')


def prepare_dataframe(file_name):
    data = pd.read_csv(file_name)
    order = ['Assignee_count', 'Reporter_count', 'Creator_count', 'Summary', 'Custom field (Story Points)']
    data = data[order]
    data = data.fillna(0)
    return pd.DataFrame(data=data)


def prepare_dataloader(seq, y, sampler_type):
    global BATCH_SIZE
    tensor_dataset = TensorDataset(seq, y)
    if sampler_type == 'random':
        sampler = RandomSampler(tensor_dataset)
    elif sampler_type == 'sequential':
        sampler = SequentialSampler(tensor_dataset)
    dataloader = DataLoader(tensor_dataset, sampler=sampler, batch_size=BATCH_SIZE)
    return dataloader

class BertForSequence(nn.Module):
    def __init__(self):
        super(BertForSequence, self).__init__()
        self.bert = BertModel.from_pretrained("bert-base-cased")
        for name, param in self.bert.named_parameters():
            param.requires_grad = False
        self.hidden1 = nn.Linear(768, 3)
        self.hidden2 = nn.Linear(6, 50)
        self.score = nn.Linear(50, 1)

    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
        outputs_bert = self.bert(input_ids[:, 3:].long(), token_type_ids, attention_mask)
        outputs = outputs_bert.last_hidden_state[:, 0, :]
        outputs = self.hidden1(outputs)
        outputs = torch.cat((input_ids[:, 0:3], outputs), dim=1)
        outputs = torch.relu(self.hidden2(outputs.float()))
        logit = self.score(outputs)
        return logit


def predict_with_model(file_pair, model_path):
    global DEVICE

    # **加载模型**
    model = BertForSequence()  # **确保架构匹配**
    model.load_state_dict(torch.load(model_path, map_location=DEVICE, weights_only=True))  # **安全加载权重**
    model.to(DEVICE)
    model.eval()

    # **处理数据**
    file_pair, train_dataloader = data_processing(file_pair)
    train_data = prepare_dataframe(DATA_PATH + file_pair)  # **确保数据完整**

    summaries = train_data['Summary'].tolist()
    true_labels_all = train_data['Custom field (Story Points)'].tolist()

    predictions = []

    # **进行预测**
    with torch.no_grad():
        for batch in train_dataloader:
            b_input_ids = batch[0].to(DEVICE)
            logits = model(b_input_ids)

            predictions.extend(logits.detach().cpu().numpy().tolist())  # **保证匹配长度**

    # **确保所有数据点匹配**
    all_predictions = []
    for i in range(len(summaries)):
        pred_value = predictions[i] if i < len(predictions) else "N/A"
        all_predictions.append({
            'Summary': summaries[i],
            'True_Workload': true_labels_all[i],
            'Predicted_Workload': pred_value
        })
    '''
    # **保存预测结果**
    folder_name = os.path.splitext(file_pair)[0]
    folder_path = f'./model/pred_results/{folder_name}'
    os.makedirs(folder_path, exist_ok=True)

    csv_file_path = f'{folder_path}/predictions.csv'
    pd.DataFrame(all_predictions).to_csv(csv_file_path, index=False)

    print(f"Predictions saved to '{csv_file_path}'.")
    '''
    return all_predictions

def clear_folders():
    folders = ["./model/pred_output", "./model/pred_results", "./model/result_bert"]

    for folder in folders:
        if os.path.exists(folder):
            for filename in os.listdir(folder):
                file_path = os.path.join(folder, filename)
                try:
                    if os.path.isfile(file_path) or os.path.islink(file_path):
                        os.remove(file_path)  # 删除文件或符号链接
                    elif os.path.isdir(file_path):
                        shutil.rmtree(file_path)  # 递归删除文件夹
                except Exception as e:
                    print(f"删除 {file_path} 时出错: {e}")


WITHIN_PROJECT = True

def main():
    print('进入 FineSE 预测流程')

    clear_folders()

    data_dir = 'model/open/'
    model_dir = 'model/model/'
    output_dir = 'model/pred_output/'

    # 获取唯一的 CSV 数据文件
    csv_files = [f for f in os.listdir(data_dir) if f.endswith('.csv')]
    if len(csv_files) != 1:
        print(f"数据目录中应仅包含一个 .csv 文件，当前找到：{len(csv_files)}")
        return
    data_file = csv_files[0]
    data_path = os.path.join(data_dir, data_file)

    # 获取唯一的模型文件
    model_files = [f for f in os.listdir(model_dir) if f.endswith('.pt')]
    if len(model_files) != 1:
        print(f"模型目录中应仅包含一个 .pt 文件，当前找到：{len(model_files)}")
        return
    model_file = model_files[0]
    model_path = os.path.join(model_dir, model_file)

    print(f'使用模型: {model_file} 处理数据集: {data_file}')

    # 执行预测
    predictions = predict_with_model(data_file, model_path)

    # 保存预测结果
    os.makedirs(output_dir, exist_ok=True)
    result_file_path = os.path.join(output_dir, f'{os.path.splitext(data_file)[0]}.txt')
    with open(result_file_path, 'w+') as f:
        for pred in predictions:
            f.write(f"{pred['Summary']}\n")
            f.write(f"Predicted Workload: {pred['Predicted_Workload']}\n\n")

    print(f'预测结果已保存到 {result_file_path}')

    torch.cuda.empty_cache()
    print('预测流程完成！')

if __name__ == '__main__':
    main()
