import torch
import torch.nn as nn
import torch.optim as optim
from transformers import AutoModelForCausalLM, AutoTokenizer
import json
from peft import LoraConfig, get_peft_model, PeftModel
from tqdm import tqdm

model_path = "Qwen2___5-7B-Instruct"

device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

batch_size = 2
learning_rate = 1e-4
num_epochs = 2
temperature = 3.0
num_categories = 200
alpha = 0.1

def build_prompt(poi_id, poi):
    instruction = "Compress the poi information into one word for recommendation and generate its category."
    input_poi = f"{{'名称': '{poi_id}', '纬度': '{poi['poi_latitude']}', '经度': '{poi['poi_longitude']}', '评论': '{poi['poi_review_compress']}', '图片': '{poi['image_txt']}'}}"
    # poi_type_list = [f" POI category list : '{poi_type}'" for poi_type in poi_type_list]
    prompt = f"<|im_start|>{instruction} {input_poi} .The compression word is: '[EMB]'.The category is:<|im_end|>"
    return prompt


class GenerativeContrastiveLearning(nn.Module):
    def __init__(self, model, tokenizer, num_categories):
        super().__init__()
        self.model = model.to(device)
        self.tokenizer = tokenizer
        self.emb_token = "[EMB]"
        self._add_special_tokens()
        self.fc = nn.Linear(model.config.hidden_size, 128).to(device)
        self.classifier = nn.Sequential(nn.Linear(model.config.hidden_size, num_categories)).to(device)
        self._init_weights(self.fc)
        self._init_weights(self.classifier)

    def _add_special_tokens(self):
        self.tokenizer.pad_token = self.tokenizer.eos_token
        self.tokenizer.padding_side = "right" 
        
        if self.emb_token not in self.tokenizer.get_vocab():
            self.tokenizer.add_special_tokens(
                {"additional_special_tokens": [self.emb_token]}
            )
            self.model.resize_token_embeddings(len(self.tokenizer))
            # if hasattr(self.model, 'lm_head'):
            #     self.model.lm_head = nn.Linear(
            #         self.model.config.hidden_size,
            #         len(self.tokenizer),
            #         bias=False
            #     ).to(device)  # 确保分类头在GPU
            print(f"Added tokens")

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=0.02)
            if module.bias is not None:
                module.bias.data.zero_()

    def forward(self, input_ids, attention_mask):
        # 模型前向
        outputs = self.model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            output_hidden_states=True,
            return_dict=True
        )
        last_hidden = outputs.hidden_states[-1]  # [batch, seq, hidden]
        logits = self.classifier(last_hidden[:, -1, :])

        emb_id = self.tokenizer.convert_tokens_to_ids(self.emb_token)

        emb_id_tensor = torch.tensor([emb_id], device=input_ids.device)
        emb_mask = (input_ids == emb_id_tensor)
        if emb_mask.any():
            positions = torch.nonzero(emb_mask, as_tuple=True)
            emb_vectors = last_hidden[positions[0], positions[1]]

            # 确保每个样本只取第一个匹配项
            unique_batches = torch.unique(positions[0], sorted=True)
            if len(unique_batches) < input_ids.size(0):
                raise RuntimeError(f"部分样本未包含 {emb_token} 标记")

            # 按batch顺序重组
            emb_vectors = emb_vectors[positions[0].argsort()][:input_ids.size(0)]
        else:
            if self.training:
                raise RuntimeError(f"训练数据中缺失目标标记 {emb_token}")
            # 推理时使用备用方案
            emb_vectors = torch.zeros(
                (input_ids.size(0), last_hidden.size(-1)),
                device=last_hidden.device
            )
        return self.fc(emb_vectors), logits 
    
    def calculate_loss(self, features, temperature):
        self.tau = nn.Parameter(torch.tensor(temperature)).to(device)
        B2, D = features.shape
        B = B2 // 2
        
        features = nn.functional.normalize(features, p=2, dim=1)
        sim_matrix = torch.mm(features, features.T)
        
        pos_mask = torch.zeros_like(sim_matrix, dtype=torch.bool)
        for i in range(B2):
            if i % 2 == 0:  
                j = i + 1 if i + 1 < B2 else i
            else:           
                j = i - 1
            pos_mask[i, j] = True
        
        scaled_tau = torch.exp(self.tau)
        pos_sim = sim_matrix[pos_mask]
        numerator = torch.exp(pos_sim * scaled_tau)
        
        neg_mask = ~pos_mask
        neg_mask.fill_diagonal_(False)
        denominator = torch.sum(torch.exp(sim_matrix * scaled_tau) * neg_mask, dim=1)
        
        loss_per_sample = -torch.log(numerator / (denominator + 1e-8))
        return loss_per_sample.mean()
    
# 协同监督分类任务
# class CategoryClassification(nn.Module):
#     def __init__(self, model, tokenizer, num_categories):
#         super().__init__()
#         self.model = model
#         self.tokenizer = tokenizer
#         self.tokenizer.add_special_tokens({
#             'additional_special_tokens': ['[EOS]']
#         })
#         self.model.resize_token_embeddings(len(self.tokenizer))
#         self.classifier = nn.Sequential(
#             nn.Linear(model.config.hidden_size, num_categories),
#         )
    
#     def forward(self, input_ids, attention_mask):
#         outputs = self.model(
#             input_ids,
#             attention_mask=attention_mask,
#             output_hidden_states=True
#         )
        
#         last_hidden = outputs.hidden_states[-1][:, -1, :]
#         logits = self.classifier(last_hidden)
#         return logits

class CoordinatedSupervision(nn.Module):
    def __init__(self, model, tokenizer, num_categories):
        super().__init__()
        self.model = model.to(device)
        self.tokenizer = tokenizer
        self.classifier = nn.Sequential(
            nn.Linear(model.config.hidden_size, num_categories),
        )
    
    def forward(self, prompt):
        inputs = self.tokenizer(prompt, return_tensors="pt").to(device)
        input_ids = inputs.input_ids
        attention_mask = inputs.attention_mask
        outputs = self.model(
            input_ids,
            attention_mask=attention_mask,
            output_hidden_states=True
        )
        last_hidden = outputs.hidden_states[-1][:, -1, :]
        logits = self.classifier(last_hidden)
        return logits

# def proprocess_data(tokenizer, max_val, min_val, poi_pair_score, poi_info):
#     poi_id_list = []
#     input_ids_list = []
#     attention_masks_list = []

#     for key, value in poi_pair_score.items():
#         if value > min_val and value < max_val:
#             current_poi, next_poi = key.split('→')
#             poi_id_list.append(current_poi)
#             poi_id_list.append(next_poi)

#             current_prompt = build_prompt(current_poi, poi_info[current_poi])
#             encoded_current_prompt = tokenizer(current_prompt, return_tensors='pt', padding=True, truncation=True)
#             input_ids_list.append(encoded_current_prompt['input_ids'])
#             attention_masks_list.append(encoded_current_prompt['attention_mask'])

#             next_prompt = build_prompt(next_poi, poi_info[next_poi])
#             encoded_next_prompt = tokenizer(next_prompt, return_tensors='pt', padding=True, truncation=True)
#             input_ids_list.append(encoded_next_prompt['input_ids'])
#             attention_masks_list.append(next_prompt['attention_mask'])
    
#     return poi_id_list, input_ids_list, attention_masks_list

def proprocess_data(tokenizer, poi_id, poi_info):
    current_prompt = build_prompt(poi_id, poi_info)
    # print(current_prompt)
    encoded_prompt = tokenizer(current_prompt, return_tensors='pt', padding=True, truncation=True).to(device)
    input_ids = encoded_prompt['input_ids']
    attention_masks = encoded_prompt['attention_mask']
    
    return input_ids, attention_masks


if __name__ == "__main__" :
    model_save = "/home/chuanchang/毕设/qwen3_lora1"

    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        # device_map="auto",  
        # trust_remote_code=True,
        output_hidden_states=True 
    ).to(device)

    lora_config = LoraConfig(
        r=16,                   # 低秩维度
        lora_alpha=32,         # 缩放因子
        target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],  # 目标模块（根据模型结构调整）
        lora_dropout=0.05,
        bias="none",
        task_type="CAUSAL_LM"
    )
    # model = get_peft_model(model, lora_config)
    model = PeftModel.from_pretrained(
        model,
        model_save,
        # is_trainable = True
    )

    # tokenizer = AutoTokenizer.from_pretrained(model_path)
    tokenizer = AutoTokenizer.from_pretrained(model_save)

    with open('MMPOI_train_score.json', 'r', encoding='utf-8') as f:
        poi_pair_score = json.load(f)

    with open('merged_poi_info.json', 'r', encoding='utf-8') as f1:
        poi_info = json.load(f1)
    
    with open('label2id.json', 'r', encoding='utf-8') as f2:
        label_info = json.load(f2)

    poi_type_list = []
    for key, value in label_info.items():
        poi_type_list.append(key)

    gcl_model = GenerativeContrastiveLearning(model, tokenizer, 200).to(device)
    gcl_model.classifier.load_state_dict(torch.load(f"{model_save}/custom_layers.pt")['classifier'])
    gcl_model.fc.load_state_dict(torch.load(f"{model_save}/custom_layers.pt")['fc'])
    optimizer = optim.Adam(list(gcl_model.parameters()), lr=learning_rate)
    optimizer.load_state_dict(torch.load(f"{model_save}/opt_checkpoint.pt")['optimizer'])

    poi_emb = {}
    with torch.no_grad():
        for key, value in tqdm(poi_info.items()):
            input_ids, attention_masks = proprocess_data(tokenizer, key, value)
            emb_vectors, logits = gcl_model.forward(input_ids, attention_masks)
            poi_emb[key] = emb_vectors
            # print(emb_vectors)
            
    poi_emb_serializable = {
        key: value.detach().cpu().tolist()[0]  # 转换为列表
        for key, value in poi_emb.items()
    }

    with open('poi_emb_1ep_csft.json', 'w', encoding='utf-8') as f3:
        json.dump(poi_emb_serializable, f3, ensure_ascii=False, indent=4)


    
    











