import os
import json
import torch
from datasets import Dataset
from modelscope import snapshot_download, AutoTokenizer
from qwen_vl_utils import process_vision_info
from peft import LoraConfig, TaskType, get_peft_model, PeftModel
from transformers import (
    Trainer,
    DataCollatorForSeq2Seq,
    Qwen2VLForConditionalGeneration,
    AutoProcessor,
)
from config_train import Config

import random
from PIL import Image, ImageFilter
import numpy as np

def read_data(file_name, pic_path):#解析格式 {name: {'question': 'answer'}}
    outputs = []
    with open(file_name, 'r', encoding='utf-8') as f:
        datas = json.load(f)
        for data in datas:
            name = data['data']['image']
            name = os.path.basename(name)
            real_name = os.listdir(pic_path)
            # 从real_name中匹配最相似的name
            for name_ in real_name:
                if name_ in name:
                    name = name_
                    break
            result_list = data['annotations'][0]['result']
            
            for x in result_list:
                if Config.random_select_question_prob[x['to_name']] > random.random():
                    outputs.append({'name': os.path.join(pic_path, name), 'question': Config.question_dict[x['to_name']], 'answer': x['value']['text']})
    return outputs

def read_data_list(json_list, pic_paths):
    outputs = []
    for j,p in zip(json_list,pic_paths):
        outputs += read_data(j,p)
    return outputs

def process_func(example):  
    """
    将数据集进行预处理
    """
    MAX_LENGTH = 8192
    input_ids, attention_mask, labels = [], [], []
    name = example['name']
    question = example['question']
    answer = example['answer']
    
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": f"{name}",
                    "resized_height": 280,
                    "resized_width": 280,
                },
                {"type": "text", "text": question},
            ],
        }
    ]
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )  # 获取文本
    image_inputs, video_inputs = process_vision_info(messages)  # 获取数据数据（预处理过）
    
    if isinstance(image_inputs, Image.Image):#在线增强
        if random.random() > 0.5:
            image_inputs = image_inputs.filter(ImageFilter.GaussianBlur(radius=2))
        else:
            image_array = np.array(image_inputs)
            noise = np.random.normal(0, 25, image_array.shape).astype(np.uint8)
            noisy_image_array = np.clip(image_array + noise, 0, 255).astype(np.uint8)
            image_inputs = Image.fromarray(noisy_image_array)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = {key: value.tolist() for key, value in inputs.items()} #tensor -> list,为了方便拼接
    # print(f'inputs:{inputs}')
    instruction = inputs

    response = tokenizer(f"{answer}", add_special_tokens=False)


    input_ids = (
            instruction["input_ids"][0] + response["input_ids"] + [tokenizer.pad_token_id]
    )

    attention_mask = instruction["attention_mask"][0] + response["attention_mask"] + [1]
    labels = (
            [-100] * len(instruction["input_ids"][0])
            + response["input_ids"]
            + [tokenizer.pad_token_id]
    )
    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]

    input_ids = torch.tensor(input_ids)
    attention_mask = torch.tensor(attention_mask)
    labels = torch.tensor(labels)
    inputs['pixel_values'] = torch.tensor(inputs['pixel_values'])
    inputs['image_grid_thw'] = torch.tensor(inputs['image_grid_thw']).squeeze(0)  #由（1,h,w)变换为（h,w）
    return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels,
            "pixel_values": inputs['pixel_values'], "image_grid_thw": inputs['image_grid_thw']}



def predict(messages, model):
    # 准备推理
    text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")

    # 生成输出
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    
    return output_text[0]

# ---------------------------------------------------------------------------------------------------------------

# 使用Transformers加载模型权重
tokenizer = AutoTokenizer.from_pretrained(Config.model_path, use_fast=False, trust_remote_code=True)
processor = AutoProcessor.from_pretrained(Config.model_path)
model = Qwen2VLForConditionalGeneration.from_pretrained(Config.model_path, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True,)
model.enable_input_require_grads()

out_dict = read_data_list(Config.dataset_path, Config.pic_path)#
print(f'问题权重占比:{Config.random_select_question_prob}')
print(f'共整理样本数据:{len(out_dict)}')
# 处理数据集：读取json文件
# 拆分成训练集和测试集，保存为data_vl_train.json和data_vl_test.json

train_set = out_dict[:int(len(out_dict)*Config.train_val_ratio)]
val_set = out_dict[int(len(out_dict)*Config.train_val_ratio):]

# train_ds = Dataset.from_list(train_set)
train_ds = Dataset.from_list(out_dict)#现阶段全部训练，离线测试。
train_dataset = train_ds.map(process_func)
# 配置LoRA
lora_config = Config.lora_cofig
# 获取LoRA模型
peft_model = get_peft_model(model, lora_config)

# 配置训练参数
args = Config.train_args
        
# 配置Trainer
trainer = Trainer(
    model=peft_model,
    args=args,
    train_dataset=train_dataset,
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
)
# 开启模型训练
trainer.train()

# ===测试模式===
# 配置测试参数
val_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=True,  # 训练模式
    r=64,  # Lora 秩
    lora_alpha=16,  # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.05,  # Dropout 比例
    bias="none",
)


# -------------------------------------------------------------------------------------------------------
# # 获取测试模型
# val_peft_model = PeftModel.from_pretrained(model, 
#                                            model_id=os.path.join(Config.output_dir, 'checkpoint-100'), 
#                                            config=val_config)

# # outputs.append({'name': os.join(pic_path, name), 'question': Config.question_dict[x['to_name']], 'answer': x['value']['text']})

# test_image_list = []
# for item in val_set:
#     input_question = item['question']
#     origin_image_path = item['answer']
#     messages = [({
#         "role": "user", 
#         "content": [
#             {
#                 "type": "image", 
#                 "image": item['name']
#             },
#             {
#                 "type": "text",
#                 "text": input_question
#             }
#         ]}]}
    
#     response = predict(messages, val_peft_model)
#     messages.append({"role": "assistant", "content": f"{response}"})
#     print(messages[-1])
    



    