import torch
import torch.nn as nn
from torch import Tensor
from argparse import ArgumentParser
from utils import Normalize
from transformers import AutoModel, AutoTokenizer
from core import PatchReprogram, OutputProjection, SpecificResNet


# 预测 TEC 所用的 TimeLLM 模型
class TimeLLMForTEC(nn.Module):
    
    def __init__(self, configs: ArgumentParser):
        super(TimeLLMForTEC, self).__init__()
        '''
        LLM 加载并冻结
        '''
        # 加载预训练 LLM 模型和 LLM 分词器
        try:
            self.llm_model = AutoModel.from_pretrained(configs.llm_model,
                                                  trust_remote_code=True, local_files_only=True,
                                                  cache_dir=configs.llm_model_root)
            self.llm_tokenizer = AutoTokenizer.from_pretrained(configs.llm_model,
                                                          trust_remote_code=True, local_files_only=True,
                                                          cache_dir=configs.llm_model_root)
        except EnvironmentError:  # downloads model from HF is not already done
            print("Local model files not found. Attempting to download...")
            self.llm_model = AutoModel.from_pretrained(configs.llm_model,
                                                  trust_remote_code=True, local_files_only=False,
                                                  cache_dir=configs.llm_model_root)
            self.llm_tokenizer = AutoTokenizer.from_pretrained(configs.llm_model,
                                                          trust_remote_code=True, local_files_only=False,
                                                          cache_dir=configs.llm_model_root)
        # 冻结 LLM 模型内部的参数
        for param in self.llm_model.parameters():
            param.requires_grad = False
        '''
        Patch and Reprogramming 相关模块
        '''
        # 获取 LLM 的词嵌入模块
        self.word_embedding = self.llm_model.get_input_embeddings()
        # 词表大小和 LLM 内部维度
        vocab_size, d_llm = self.word_embedding.weight.shape
        # Patch Reprogram 模块
        self.patch_reprogram = PatchReprogram(configs, vocab_size=vocab_size, llm_dim=d_llm)
        '''
        输入输出处理和其他模块
        '''
        # 数据标准化
        self.nomalizer_tec = Normalize(num_features=1, affine=True, subtract_last=False)
        self.nomalizer_sep = Normalize(num_features=configs.num_features - 1, affine=True, subtract_last=False)
        # 计算 patch 个数
        patch_num = (configs.seq_len - configs.patch_len) // configs.patch_stride + 1
        # 输出映射模块
        self.output_projection = OutputProjection(
            patch_num=patch_num, d_llm=d_llm, d_ff=configs.d_ff, pred_len=configs.pred_len, dropout=0.15
        )
        '''
        针对空间环境和 TEC 设计的前置后置残差模块
        '''
        self.in_resnet = SpecificResNet(num_features=configs.num_features, d_merge=configs.d_merge)
        # self.out_resnet = SpecificResNet(num_features=configs.num_features, d_merge=configs.d_merge)
        '''
        变量记录
        '''
        # 加载专业领域描述信息
        self.seq_len = configs.seq_len
        self.pred_len = configs.pred_len
        self.description = configs.domain_description
        self.top_k = configs.top_k
    
    # 按模版生成 prompt
    # 输入数据维度: B T
    def __create_prompt(self, x: Tensor, info: list) -> Tensor:
        # 计算统计值
        min_values = torch.min(x, dim=-1).values
        max_values = torch.max(x, dim=-1).values
        medians = torch.median(x, dim=-1).values
        lags = self.__calcute_lags(x)
        trends = x.diff().sum(-1)
        # 添加到模版中
        prompt = []
        for b in range(x.shape[0]):
            prompt.append((
                "<|start_prompt|> "
                f"Dataset description: {self.description}; "
                f"Task description: forecast the next {str(self.pred_len)} steps given the previous {str(self.seq_len)} steps information; "
                "Station Information: "
                f"Station ID: {info[b]['ID']}, Latitude {info[b]['position'][0]:.2f}, Longitude {info[b]['position'][1]:.2f}, "
                f"Year: {info[b]['date'].year}, Month: {info[b]['date'].month_name()}, Day: {info[b]['date'].day}; "
                "Input statistics: "
                f"min value {min_values[b].item():.3f}, "
                f"max value {max_values[b].item():.3f}, "
                f"median value {medians[b].item():.3f}, "
                f"the trend of input is {'upward' if trends[b] > 0 else 'downward'}, "
                f"top 5 lags are : {lags[b].tolist()}; "
                "<|<end_prompt>|>"
            ))
        return prompt
    
    # 计算 Lags
    # 输入数据维度: B T
    def __calcute_lags(self, x: Tensor) -> Tensor:
        # 计算自相关序列
        q_fft, k_fft = torch.fft.rfft(x), torch.fft.rfft(x)
        res = q_fft * torch.conj(k_fft)
        corr = torch.fft.irfft(res)
        return torch.topk(corr, self.top_k, dim=-1).indices
    
    # 输入数据维度: B T N
    # 输出数据维度: B L
    def forward(self, x: Tensor, info: list) -> Tensor:
        # 分离 TEC 和 SEP
        tec, sep = x[..., 0], x[..., 1:]
        # 数据标准化
        tec = self.nomalizer_tec(tec, 'norm')
        sep = self.nomalizer_sep(sep, 'norm')
        # 重组
        x = torch.cat([tec.unsqueeze(-1), sep], dim=-1)
        # # 获取各维大小
        # B, T, N = x.shape
        # 前置残差块融合
        x = self.in_resnet(x)
        # # 将不同特征并列为不同样本
        # x = x.permute(0, 2, 1).contiguous().reshape(B * N, T)
        # 生成 prompt 并转化为 ids
        prompt = self.llm_tokenizer(self.__create_prompt(x, info), 
                                    return_tensors="pt", padding=True, truncation=True, max_length=2048).input_ids
        # prompt 嵌入
        prompt_embedding = self.word_embedding(prompt.to(x.device))
        # 序列嵌入并重编程
        x = self.patch_reprogram(x, self.word_embedding.weight)
        # 级联，Prompt as Prefix
        x = torch.cat([prompt_embedding, x], dim=1)
        # 通过 LLM 模型
        x = self.llm_model(inputs_embeds=x).last_hidden_state
        # 输出映射
        x = self.output_projection(x)
        # 数据反标准化
        return self.nomalizer_tec(x, 'denorm')
        # # 分离不同特征对应的数据
        # x = torch.reshape(x, (B, N, x.shape[1])).transpose(1, 2).contiguous()
        # # 后置残差块融合
        # x = self.out_resnet(x)
        # # 数据反标准化
        # return self.nomalizer(x, 'denorm')
