import torch
import torch.nn as nn
import torch.optim as optim
from transformers import AutoModelForCausalLM, AutoTokenizer
import json
from peft import LoraConfig, get_peft_model, PeftModel
from tqdm import tqdm

model_path = "Qwen2___5-7B-Instruct"

device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

batch_size = 2
learning_rate = 5e-4
num_epochs = 3
temperature = 3.0
num_categories = 200
alpha = 0.1

def build_prompt(poi_id, poi, poi_type_list):
    instruction = "Compress the poi information into one word for recommendation and generate its category."
    input_poi = f"{{'名称': '{poi_id}', '纬度': '{poi['poi_latitude']}', '经度': '{poi['poi_longitude']}', '评论': '{poi['poi_review_compress']}', '图片': '{poi['image_txt']}'}}"
    # poi_type_list = [f" POI category list : '{poi_type}'" for poi_type in poi_type_list]
    prompt = f"<|im_start|>{instruction} {input_poi} .The compression word is: '[EMB]'.The category is:<|im_end|>"
    return prompt


class GenerativeContrastiveLearning(nn.Module):
    def __init__(self, model, tokenizer, num_categories):
        super().__init__()
        self.model = model.to(device)
        self.tokenizer = tokenizer
        self.emb_token = "[EMB]"
        self._add_special_tokens()
        self.fc = nn.Linear(model.config.hidden_size, 128).to(device)
        self.classifier = nn.Sequential(nn.Linear(model.config.hidden_size, num_categories)).to(device)
        self._init_weights(self.fc)
        self._init_weights(self.classifier)

    def _add_special_tokens(self):
        self.tokenizer.pad_token = self.tokenizer.eos_token
        self.tokenizer.padding_side = "right" 
        
        if self.emb_token not in self.tokenizer.get_vocab():
            self.tokenizer.add_special_tokens(
                {"additional_special_tokens": [self.emb_token]}
            )
            self.model.resize_token_embeddings(len(self.tokenizer))
            # if hasattr(self.model, 'lm_head'):
            #     self.model.lm_head = nn.Linear(
            #         self.model.config.hidden_size,
            #         len(self.tokenizer),
            #         bias=False
            #     ).to(device)  # 确保分类头在GPU
            print(f"Added tokens")

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=0.02)
            if module.bias is not None:
                module.bias.data.zero_()

    def forward(self, input_ids, attention_mask):
        # 模型前向
        outputs = self.model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            output_hidden_states=True,
            return_dict=True
        )
        last_hidden = outputs.hidden_states[-1]  # [batch, seq, hidden]
        logits = self.classifier(last_hidden[:, -1, :])
        emb_id = self.tokenizer.convert_tokens_to_ids(self.emb_token)
        emb_id_tensor = torch.tensor([emb_id], device=input_ids.device)
        emb_mask = (input_ids == emb_id_tensor)
        if emb_mask.any():
            positions = torch.nonzero(emb_mask, as_tuple=True)
            emb_vectors = last_hidden[positions[0], positions[1]]

            # 确保每个样本只取第一个匹配项
            unique_batches = torch.unique(positions[0], sorted=True)
            if len(unique_batches) < input_ids.size(0):
                raise RuntimeError(f"部分样本未包含 {emb_token} 标记")

            # 按batch顺序重组
            emb_vectors = emb_vectors[positions[0].argsort()][:input_ids.size(0)]
        else:
            if self.training:
                raise RuntimeError(f"训练数据中缺失目标标记 {emb_token}")
            # 推理时使用备用方案
            emb_vectors = torch.zeros(
                (input_ids.size(0), last_hidden.size(-1)),
                device=last_hidden.device
            )
        return self.fc(emb_vectors), logits 
    
    def calculate_loss(self, features, temperature):
        self.tau = nn.Parameter(torch.tensor(temperature)).to(device)
        B2, D = features.shape
        B = B2 // 2
        
        features = nn.functional.normalize(features, p=2, dim=1)
        sim_matrix = torch.mm(features, features.T)
        
        pos_mask = torch.zeros_like(sim_matrix, dtype=torch.bool)
        for i in range(B2):
            if i % 2 == 0:  
                j = i + 1 if i + 1 < B2 else i
            else:           
                j = i - 1
            pos_mask[i, j] = True
        
        scaled_tau = torch.exp(self.tau)
        pos_sim = sim_matrix[pos_mask]
        numerator = torch.exp(pos_sim * scaled_tau)
        
        # neg_mask = ~pos_mask
        # neg_mask.fill_diagonal_(False)
        # denominator = torch.sum(torch.exp(sim_matrix * scaled_tau) * neg_mask, dim=1)
        neg_mask = ~torch.eye(B2, dtype=torch.bool, device=sim_matrix.device)
        denominator = torch.sum(torch.exp(sim_matrix * scaled_tau) * neg_mask, dim=1)
        
        loss_per_sample = -torch.log(numerator / (denominator + 1e-8)) * (0.5/B)
        return loss_per_sample.mean()
    
# 协同监督分类任务
# class CategoryClassification(nn.Module):
#     def __init__(self, model, tokenizer, num_categories):
#         super().__init__()
#         self.model = model
#         self.tokenizer = tokenizer
#         self.tokenizer.add_special_tokens({
#             'additional_special_tokens': ['[EOS]']
#         })
#         self.model.resize_token_embeddings(len(self.tokenizer))
#         self.classifier = nn.Sequential(
#             nn.Linear(model.config.hidden_size, num_categories),
#         )
    
#     def forward(self, input_ids, attention_mask):
#         outputs = self.model(
#             input_ids,
#             attention_mask=attention_mask,
#             output_hidden_states=True
#         )
        
#         last_hidden = outputs.hidden_states[-1][:, -1, :]
#         logits = self.classifier(last_hidden)
#         return logits

class CoordinatedSupervision(nn.Module):
    def __init__(self, model, tokenizer, num_categories):
        super().__init__()
        self.model = model.to(device)
        self.tokenizer = tokenizer
        self.classifier = nn.Sequential(
            nn.Linear(model.config.hidden_size, num_categories),
        )
    
    def forward(self, prompt):
        inputs = self.tokenizer(prompt, return_tensors="pt").to(device)
        input_ids = inputs.input_ids
        attention_mask = inputs.attention_mask
        outputs = self.model(
            input_ids,
            attention_mask=attention_mask,
            output_hidden_states=True
        )
        last_hidden = outputs.hidden_states[-1][:, -1, :]
        logits = self.classifier(last_hidden)
        return logits

# def proprocess_data(tokenizer, max_val, min_val, poi_pair_score, poi_info):
#     poi_id_list = []
#     input_ids_list = []
#     attention_masks_list = []

#     for key, value in poi_pair_score.items():
#         if value > min_val and value < max_val:
#             current_poi, next_poi = key.split('→')
#             poi_id_list.append(current_poi)
#             poi_id_list.append(next_poi)

#             current_prompt = build_prompt(current_poi, poi_info[current_poi])
#             encoded_current_prompt = tokenizer(current_prompt, return_tensors='pt', padding=True, truncation=True)
#             input_ids_list.append(encoded_current_prompt['input_ids'])
#             attention_masks_list.append(encoded_current_prompt['attention_mask'])

#             next_prompt = build_prompt(next_poi, poi_info[next_poi])
#             encoded_next_prompt = tokenizer(next_prompt, return_tensors='pt', padding=True, truncation=True)
#             input_ids_list.append(encoded_next_prompt['input_ids'])
#             attention_masks_list.append(next_prompt['attention_mask'])
    
#     return poi_id_list, input_ids_list, attention_masks_list

def proprocess_data(tokenizer, poi_id, poi_info, poi_type_list):
    current_prompt = build_prompt(poi_id, poi_info, poi_type_list)
    # print(current_prompt)
    encoded_prompt = tokenizer(current_prompt, return_tensors='pt', padding=True, truncation=True).to(device)
    input_ids = encoded_prompt['input_ids']
    attention_masks = encoded_prompt['attention_mask']
    
    return input_ids, attention_masks


if __name__ == "__main__" :
    # model_save = "/home/chuanchang/毕设/qwen_lora"

    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        # device_map="auto",  
        # trust_remote_code=True,
        output_hidden_states=True 
    ).to(device)

    lora_config = LoraConfig(
        r=16,                   # 低秩维度
        lora_alpha=32,         # 缩放因子
        target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],  # 目标模块（根据模型结构调整）
        lora_dropout=0.05,
        bias="none",
        task_type="CAUSAL_LM"
    )
    model = get_peft_model(model, lora_config)
    # model = PeftModel.from_pretrained(
    #     model,
    #     model_save,
    #     is_trainable = True
    # )

    tokenizer = AutoTokenizer.from_pretrained(model_path)
    # tokenizer = AutoTokenizer.from_pretrained(model_save)

    with open('MMPOI_train_score.json', 'r', encoding='utf-8') as f:
        poi_pair_score = json.load(f)

    with open('merged_poi_info.json', 'r', encoding='utf-8') as f1:
        poi_info = json.load(f1)
    
    with open('label2id.json', 'r', encoding='utf-8') as f2:
        label_info = json.load(f2)

    poi_type_list = []
    for key, value in label_info.items():
        poi_type_list.append(key)

    gcl_model = GenerativeContrastiveLearning(model, tokenizer, 200).to(device)
    # gcl_model.classifier.load_state_dict(torch.load(f"{model_save}/custom_layers.pt")['classifier'])
    # gcl_model.fc.load_state_dict(torch.load(f"{model_save}/custom_layers.pt")['fc'])
    optimizer = optim.Adam(list(gcl_model.parameters()), lr=learning_rate)
    # optimizer.load_state_dict(torch.load(f"{model_save}/opt_checkpoint.pt")['optimizer'])

    model_save_path = ["/home/chuanchang/毕设/qwen3_lora1", "/home/chuanchang/毕设/qwen3_lora2", "/home/chuanchang/毕设/qwen3_lora3", "/home/chuanchang/毕设/qwen3_lora4", "/home/chuanchang/毕设/qwen3_lora5"]

    poi_pair_score_list = list(poi_pair_score.items())
    print(len(poi_pair_score)/batch_size)
    print("start training")

    for epoch in range(num_epochs):
        for i in tqdm(range(0, len(poi_pair_score), batch_size)):
            emb_token_list = []
            labels_list = []
            logits_list = []
            max_len = min(i + batch_size, len(poi_pair_score_list))
            for j in range(i, max_len):
                key = poi_pair_score_list[j][0]
                current_poi, next_poi = key.split('→')
                current_input_ids, current_attention_masks = proprocess_data(tokenizer, current_poi, poi_info[current_poi], poi_type_list)
                next_input_ids, next_attention_masks = proprocess_data(tokenizer, next_poi, poi_info[next_poi], poi_type_list)

                current_emb_vectors, current_logits = gcl_model.forward(current_input_ids, current_attention_masks)
                next_emb_vectors, next_logits = gcl_model.forward(next_input_ids, next_attention_masks)
                emb_token_list.append(current_emb_vectors)
                emb_token_list.append(next_emb_vectors)
                logits_list.append(current_logits)
                logits_list.append(next_logits)

                current_poi_type = poi_info[current_poi]['poi_type']
                next_poi_type = poi_info[next_poi]['poi_type']
                labels_list.append(int(label_info[current_poi_type]["index"]))
                labels_list.append(int(label_info[next_poi_type]["index"]))
            emb_token_tensor = torch.stack(emb_token_list, dim = 0).squeeze(1)
            labels_tensor = torch.tensor(labels_list, dtype=torch.long).to(device)
            logits_tensor = torch.stack(logits_list, dim = 0).squeeze(1)
            gcl_loss = gcl_model.calculate_loss(emb_token_tensor, temperature)
            csft_loss = nn.CrossEntropyLoss()(logits_tensor, labels_tensor)

            loss = csft_loss
            # loss = (gcl_loss + alpha * csft_loss)/(1 + alpha)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            print(f"Epoch {epoch + 1}, I {i}, J {j}, GCL Loss: {gcl_loss}, CSFT Loss: {csft_loss}, Total Loss: {loss}")
        
        model.save_pretrained(
            model_save_path[epoch],
            safe_serialization = True,
            save_embedding_layers = False
        )
        tokenizer.save_pretrained(model_save_path[epoch])
    
        torch.save({
            'epoch': epoch,
            'optimizer' : optimizer.state_dict(),
            'loss': loss
        }, f"{model_save_path[epoch]}/opt_checkpoint.pt")
    
        torch.save({
            'classifier': gcl_model.classifier.state_dict(),
            'fc': gcl_model.fc.state_dict(),
        },f"{model_save_path[epoch]}/custom_layers.pt")
        































# import torch
# import torch.nn as nn
# import torch.optim as optim
# from transformers import AutoModelForCausalLM, AutoTokenizer
# import json
# from tqdm import tqdm

# model_path = "/home/chuanchang/毕设/Qwen2___5-7B-Instruct"

# device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")

# batch_size = 10
# learning_rate = 1e-4
# num_epochs = 200
# temperature = 0.07
# num_categories = 200
# alpha = 0.5

# def build_prompt(poi_id, poi, poi_type_list):
#     instruction = "Compress the poi information into one word for recommendation and generate its category ."
#     input_poi = f"{{'名称': '{poi_id}', '纬度': '{poi['poi_latitude']}', '精度': '{poi['poi_longitude']}', '评论': '{poi['poi_review']}'}}"
#     # poi_type_list = [f" POI category list : '{poi_type}'" for poi_type in poi_type_list]
#     prompt = f"<|im_start|>{instruction} {input_poi}The compression word is: '[EMB]'.The category is:<|im_end|>"
#     return prompt


# class GenerativeContrastiveLearning(nn.Module):
#     def __init__(self, model, tokenizer):
#         super().__init__()
#         self.model = model.to(device)  # 显式移动模型到GPU
#         self.tokenizer = tokenizer
        
#         self.emb_token = "[EMB]"
#         self._add_special_tokens()

#         self.fc = nn.Linear(model.config.hidden_size, model.config.hidden_size).to(device)
#         self._init_weights(self.fc)

#     def _add_special_tokens(self):
#         self.tokenizer.pad_token = self.tokenizer.eos_token
#         self.tokenizer.padding_side = "right" 
        
#         if self.emb_token not in self.tokenizer.get_vocab():
#             self.tokenizer.add_special_tokens(
#                 {"additional_special_tokens": [self.emb_token]}
#             )
#             self.model.resize_token_embeddings(len(self.tokenizer))
#             if hasattr(self.model, 'lm_head'):
#                 self.model.lm_head = nn.Linear(
#                     self.model.config.hidden_size,
#                     len(self.tokenizer),
#                     bias=False
#                 ).to(device)  # 确保分类头在GPU
#             print(f"Added tokens")

#     def _init_weights(self, module):
#         if isinstance(module, nn.Linear):
#             module.weight.data.normal_(mean=0.0, std=0.02)
#             if module.bias is not None:
#                 module.bias.data.zero_()

#     def forward(self, input_ids, attention_mask):
#         outputs = self.model(
#             input_ids=input_ids,
#             attention_mask=attention_mask,
#             output_hidden_states=True,
#             return_dict=True
#         )

#         last_hidden = outputs.hidden_states[-1]
#         emb_id = self.tokenizer.convert_tokens_to_ids(self.emb_token)
#         emb_mask = (input_ids == emb_id)  # 输入已在GPU
#         emb_vectors = last_hidden[emb_mask]
#         emb_vectors = nn.functional.gelu(self.fc(emb_vectors))
#         return emb_vectors
    
#     def calculate_loss(self, features, temperature):
#         self.tau = nn.Parameter(torch.tensor(temperature)).to(device)
#         B2, D = features.shape
#         B = B2 // 2
        
#         features = nn.functional.normalize(features, p=2, dim=1)
#         sim_matrix = torch.mm(features, features.T)
        
#         pos_mask = torch.zeros_like(sim_matrix, dtype=torch.bool)
#         for i in range(B2):
#             if i % 2 == 0:  
#                 j = i + 1 if i + 1 < B2 else i
#             else:           
#                 j = i - 1
#             pos_mask[i, j] = True
        
#         scaled_tau = torch.exp(self.tau)
#         pos_sim = sim_matrix[pos_mask]
#         numerator = torch.exp(pos_sim * scaled_tau)
        
#         neg_mask = ~pos_mask
#         neg_mask.fill_diagonal_(False)
#         denominator = torch.sum(torch.exp(sim_matrix * scaled_tau) * neg_mask, dim=1)
        
#         loss_per_sample = -torch.log(numerator / (denominator + 1e-8))
#         return loss_per_sample.mean()
    
# # 协同监督分类任务
# class CoordinatedSupervision(nn.Module):
#     def __init__(self, model, tokenizer, num_categories):
#         super().__init__()
#         self.model = model.to(device)
#         self.tokenizer = tokenizer
#         self.classifier = nn.Sequential(
#             nn.Linear(model.config.hidden_size, num_categories),
#         )
    
#     def forward(self, prompt):
#         inputs = self.tokenizer(prompt, return_tensors="pt").to(device)
#         input_ids = inputs.input_ids
#         attention_mask = inputs.attention_mask
#         outputs = self.model(
#             input_ids,
#             attention_mask=attention_mask,
#             output_hidden_states=True
#         )
#         last_hidden = outputs.hidden_states[-1][:, -1, :]
#         logits = self.classifier(last_hidden)
#         return logits

# # class CoordinatedSupervision(nn.Module):
# #     def __init__(self, model, tokenizer):
# #         super().__init__()
# #         self.model = model.to(device)  # 确保模型在GPU
# #         self.tokenizer = tokenizer
# #         self.tokenizer.pad_token = self.tokenizer.eos_token 

# #     def forward(self, prompt):
# #         messages = [{"role": "user", "content": prompt}]
# #         text = self.tokenizer.apply_chat_template(
# #             messages,
# #             tokenize=False,
# #             add_generation_prompt=True,
# #             padding_side='left'
# #         )
        
# #         inputs = self.tokenizer(
# #             [text],
# #             return_tensors="pt",
# #             padding=True
# #         ).to(device)  # 输入直接移动到GPU
        
# #         generation_params = {
# #             "max_new_tokens": 2048,
# #             "do_sample": True,
# #             "temperature": 0.7,
# #             "top_p": 0.9
# #         }

# #         outputs = self.model.generate(**inputs, **generation_params)
# #         response = outputs[0][inputs.input_ids.shape[-1]:] 
# #         return self.tokenizer.decode(response, skip_special_tokens=True)

# # def proprocess_data(tokenizer, max_val, min_val, poi_pair_score, poi_info):
# #     poi_id_list = []
# #     input_ids_list = []
# #     attention_masks_list = []

# #     for key, value in poi_pair_score.items():
# #         if value > min_val and value < max_val:
# #             current_poi, next_poi = key.split('→')
# #             poi_id_list.append(current_poi)
# #             poi_id_list.append(next_poi)

# #             current_prompt = build_prompt(current_poi, poi_info[current_poi])
# #             encoded_current_prompt = tokenizer(current_prompt, return_tensors='pt', padding=True, truncation=True)
# #             input_ids_list.append(encoded_current_prompt['input_ids'])
# #             attention_masks_list.append(encoded_current_prompt['attention_mask'])

# #             next_prompt = build_prompt(next_poi, poi_info[next_poi])
# #             encoded_next_prompt = tokenizer(next_prompt, return_tensors='pt', padding=True, truncation=True)
# #             input_ids_list.append(encoded_next_prompt['input_ids'])
# #             attention_masks_list.append(next_prompt['attention_mask'])
    
# #     return poi_id_list, input_ids_list, attention_masks_list

# def proprocess_data(tokenizer, poi_id, poi_info, poi_type_list):
#     current_prompt = build_prompt(poi_id, poi_info, poi_type_list)
#     print(current_prompt)
#     encoded_prompt = tokenizer(current_prompt, return_tensors='pt', padding=True, truncation=True).to(device)
#     input_ids = encoded_prompt['input_ids']
#     attention_masks = encoded_prompt['attention_mask']
    
#     return input_ids, attention_masks

# # def parse_response(response):
# #     response = response.replace("json", "").replace("```", "")
# #     json_data = json.loads(response)
# #     return json_data

# def parse_response(response):
#     max_col = response.argmax(dim=1).item()
#     return max_col + 1

# def get_unique_parameters(*models):
#     params = []
#     seen = set()
#     for model in models:
#         for p in model.parameters():
#             if p.data_ptr() not in seen:
#                 params.append(p)
#                 seen.add(p.data_ptr())
#     return params

# if __name__ == "__main__" :

#     model = AutoModelForCausalLM.from_pretrained(
#         model_path,
#         # device_map="auto",  
#         # trust_remote_code=True,
#         output_hidden_states=True 
#     ).to(device)
#     tokenizer = AutoTokenizer.from_pretrained(model_path)

#     with open('all_poi_transitions_scores.json', 'r', encoding='utf-8') as f:
#         poi_pair_score = json.load(f)

#     with open('POI_review_dict.json', 'r', encoding='utf-8') as f1:
#         poi_info = json.load(f1)
    
#     with open('label2id.json', 'r', encoding='utf-8') as f2:
#         label_info = json.load(f2)

#     poi_type_list = []
#     for key, value in label_info.items():
#         poi_type_list.append(key)

#     gcl_model = GenerativeContrastiveLearning(model, tokenizer).to(device)
#     # cct_model = CategoryClassification(model, tokenizer, num_categories)
#     csft_model = CoordinatedSupervision(model, tokenizer, num_categories).to(device)

#     optimizer = optim.Adam(
#         get_unique_parameters(gcl_model, csft_model),
#         lr=learning_rate
#     )

#     poi_pair_score_list = list(poi_pair_score.items())

#     print("开始训练")
#     for epoch in range(num_epochs):
#         gcl_loss = 0
#         csft_loss = 0
#         for i in tqdm(range(0, len(poi_pair_score), batch_size)):
#             emb_token_list = []
#             labels_list = []
#             logits_list = []
#             for j in range(i, i + batch_size):
#                 key = poi_pair_score_list[j][0]
#                 current_poi, next_poi = key.split('→')
#                 current_input_ids, current_attention_masks = proprocess_data(tokenizer, current_poi, poi_info[current_poi], poi_type_list)
#                 next_input_ids, next_attention_masks = proprocess_data(tokenizer, next_poi, poi_info[next_poi], poi_type_list)

#                 current_emb_vectors = gcl_model.forward(current_input_ids, current_attention_masks)
#                 next_emb_vectors = gcl_model.forward(next_input_ids, next_attention_masks)
#                 emb_token_list.append(current_emb_vectors)
#                 emb_token_list.append(next_emb_vectors)

#                 current_poi_type = poi_info[current_poi]['poi_type']
#                 next_poi_type = poi_info[next_poi]['poi_type']
#                 labels_list.append(int(label_info[current_poi_type]["index"]))
#                 labels_list.append(int(label_info[next_poi_type]["index"]))

#                 current_prompt = build_prompt(current_poi, poi_info[current_poi])
#                 next_prompt = build_prompt(next_poi, poi_info[current_poi])
#                 current_logits = parse_response(csft_model(current_prompt))
#                 next_logits = parse_response(csft_model(next_prompt))
#                 # logits_list.append(int(label_info[current_logits]["index"]))
#                 # logits_list.append(int(label_info[next_logits]["index"]))
#                 logits_list.append(int(current_logits))
#                 logits_list.append(int(next_logits))
                

#             emb_token_tensor = torch.tensor(emb_token_list, dtype=torch.float32)
#             label_tensor = torch.tensor(labels_list, dtype=torch.long).to(device)
#             logits_tensor = torch.tensor(logits_list, dtype=torch.long).to(device)

#             gcl_loss += gcl_model.calculate_loss(emb_token_tensor, temperature)
#             csft_loss += nn.CrossEntropyLoss()(label_tensor, logits_tensor)

#         loss = (gcl_loss + alpha * csft_loss)/(1 + alpha)
#         optimizer.zero_grad()
#         loss.backward()
#         optimizer.step()

#         print(f"Epoch {epoch + 1}, GCL Loss: {gcl_loss}, CSFT Loss: {csft_loss}, Total Loss: {loss}")

