import argparse
import copy
import os
import torch
import torch.nn.functional as F
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer,BitsAndBytesConfig
from datasets import load_dataset
import json
from fastchat.model.model_adapter import get_conversation_template
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path
from llava.utils import disable_torch_init
from PIL import Image
import requests
from io import BytesIO
import lmdb
import numpy as np
import pickle
import blosc
import threading
import queue
import concurrent.futures

parser = argparse.ArgumentParser(description='sp')
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=100)
parser.add_argument('--index', type=int, default=1)
parser.add_argument('--gpu_index', type=int, nargs='+', default=[0])
parser.add_argument('--outdir', type=str, default='outdir0')
parser.add_argument('--model', type=str, default=None)
parser.add_argument('--image_data_path', type=str, default=None)
parser.add_argument('--json_data_path', type=str, default=None)
parser.add_argument('--continue_id', type=int, default=39865)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_index)[1:-1]

bigname=args.model
image_data_path=args.image_data_path
json_data_path=args.json_data_path

def my_process_images(images, image_processor, model_cfg):
    image_aspect_ratio = model_cfg.get("image_aspect_ratio", "original")
    new_images = []
    if image_aspect_ratio == "original":
        for image in images:
            image_tensor = image_processor.preprocess(
                image, 
                do_resize=False, 
                do_center_crop=False, 
                return_tensors='pt'
            )['pixel_values'][0]
            new_images.append(image_tensor)
    elif image_aspect_ratio == "448":
        target_size = {"height": 448, "width": 448}
        for image in images:
            image_tensor = image_processor.preprocess(
                image, 
                do_resize=True,  
                size=target_size,  
                do_center_crop=False,
                return_tensors='pt'
            )['pixel_values'][0]
            new_images.append(image_tensor)
    else:
        return image_processor(images, return_tensors='pt')['pixel_values']
    if all(x.shape == new_images[0].shape for x in new_images):
        new_images = torch.stack(new_images, dim=0)
    return new_images

def load_image(image_file):
    if image_file.startswith('http://') or image_file.startswith('https://'):
        response = requests.get(image_file)
        image = Image.open(BytesIO(response.content)).convert('RGB')
    else:
        image = Image.open(image_file).convert('RGB')
    return image

def longest_common_prefix(list1, list2):
    prefix_length = 0
    min_length = min(len(list1), len(list2))

    for i in range(min_length):
        if list1[i] == list2[i]:
            prefix_length += 1
        else:
            break

    common_prefix = list1[:prefix_length]
    return common_prefix, prefix_length


def build_dataset_rank(tokenizer, split="train", select=None, continue_id=0):
    ds = load_dataset('json', data_files=json_data_path)
    ds = ds['train']
    ds = ds.shuffle(seed=42)
    ds1 = ds.select(range(args.start, args.end))
    original_columns1 = ds1.column_names
    num_proc = 1

    def preprocess_function(examples):
        new_examples = {
            "conversation": [],
            "input_ids": [],
            "loss_mask": [],
            "image": [],
            "image_size": []
        }
        for i in range(len(examples['id'])):
            # if i < args.continue_id:
            #     new_examples['conversation'].append(torch.tensor([0]))
            #     new_examples['input_ids'].append(torch.tensor([0]))
            #     new_examples['loss_mask'].append(torch.tensor([0]))
            #     new_examples['image'].append(torch.tensor([0]))
            #     new_examples['image_size'].append(torch.tensor([0]))
            #     # new_examples["conversation"].append(conversation)
            #     # new_examples["input_ids"].append(input_ids[None, :])
            #     # new_examples["loss_mask"].append(loss_mask[None, :])
            #     # new_examples["image"].append(image_tensor)
            #     # new_examples["image_size"].append(image_size)
            #     continue
            conv_mode = "llava_v1"
            conv = conv_templates[conv_mode].copy()
            conv.system = ""
            roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
            source = examples['conversations'][i]
            if source:
                if source[0]["from"] not in roles:
                    continue
            else:
                continue
            if roles[source[0]["from"]] != conv.roles[0]: # source and 'from' in source[0] and 
                source = source[1:]
            conv.messages = []
            for j, sentence in enumerate(source):
                if sentence["from"] not in roles:
                    continue
                role = roles[sentence["from"]]
                if role != conv.roles[j % 2]:
                    continue
                if sentence["from"] == "gpt":
                    sentence["value"] = " " + sentence["value"]
                conv.append_message(role, sentence["value"])
            conversation = conv.get_prompt()
            if not tokenizer.pad_token_id:
                tokenizer.pad_token_id = tokenizer.unk_token_id

            image_tensor = None
            image_size = None
            if "image" in examples and examples["image"][i] is not None:
                image_path = os.path.join(image_data_path, examples["image"][i])
                image = load_image(image_path)
                image_size = image.size
                image_tensor = process_images([image], image_processor, {"image_aspect_ratio": "pad"})

            input_ids = tokenizer_image_token(
                    conversation, 
                    tokenizer, 
                    IMAGE_TOKEN_INDEX, 
                    return_tensors='pt'
            )


            if -200 in input_ids:
                loss_mask = torch.ones(input_ids.shape[0] + 575, dtype=input_ids.dtype)
                cur_len = 1 + 575
            else:
                loss_mask = torch.ones_like(input_ids)
                cur_len = 1

            sep = conv.sep + conv.roles[1] + ": "

            total_len = int(input_ids.ne(tokenizer.pad_token_id).sum())
            turns = conversation.split(conv.sep2)

            loss_mask[:cur_len] = 0
            for i, turn in enumerate(turns):
                if turn == "":
                    break
                turn_len = len(tokenizer(turn).input_ids)
                parts = turn.split(sep)
                if len(parts) != 2:
                    break
                parts[0] += sep
                instruction_len = len(tokenizer(parts[0]).input_ids) - 2
                loss_mask[cur_len: cur_len + instruction_len] = 0
                cur_len += turn_len
                # cur_len += 2
                if i != 0 and not tokenizer.legacy:
                    cur_len -= 1
                # tokenizer.decode(input_ids[loss_mask[-input_ids.shape[0]:]==1])
            loss_mask[cur_len:] = 0

            new_examples["conversation"].append(conversation)
            new_examples["input_ids"].append(input_ids[None, :])
            new_examples["loss_mask"].append(loss_mask[None, :])
            new_examples["image"].append(image_tensor)
            new_examples["image_size"].append(image_size)

        return new_examples
    ds1 = ds1.map(
        preprocess_function,
        batched=True,
        num_proc=num_proc,
        remove_columns=original_columns1,
        load_from_cache_file=True
    )

    ds1.set_format(type="torch")
    return ds1

from llava.model.builder import load_pretrained_model
from llava.mm_utils import get_model_name_from_path
kwargs = {'torch_dtype': torch.float16, 'device_map': 'auto'}
big_model_model_name = get_model_name_from_path(bigname)
bigtokenizer, bigmodel, image_processor, _ = load_pretrained_model(bigname, None, big_model_model_name, **kwargs)
ds = build_dataset_rank(bigtokenizer, continue_id=args.continue_id)
print(ds)
bigmodel.eval()

@torch.no_grad()
def ge(data):
    input_ids = data["input_ids"].cuda()
    image = None
    image_size = None
    if data["image"] != None:
        image = data["image"].to(dtype=torch.float16).cuda()
        image_size = data["image_size"].cuda()
    if image is None and -200 in input_ids:
        print("警告：发现图像标记但没有图像数据，替换为unk_token")
        input_ids = torch.where(input_ids == -200, bigtokenizer.unk_token_id, input_ids)
    inputs_embeds,_ = bigmodel.get_inputs_embeds(input_ids, image, image_size)
    outs_big = bigmodel(inputs_embeds = inputs_embeds, output_hidden_states=True)
    hidden_state_big = outs_big.hidden_states[-1]
    td = {
        "input_ids": input_ids.cpu()[0],
        "inputs_embeds": inputs_embeds.cpu()[0],
        "hidden_state": hidden_state_big.cpu()[0],
        "loss_mask": data["loss_mask"].cpu()[0]
    }
    return td

outdir = f'{args.outdir}/{args.index}'
if not os.path.exists(outdir):
    os.makedirs(outdir)

# 将压缩函数定义在外部，使其可被pickle
def compress_data(data):
    try:
        serialized_data = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
        # 降低压缩级别以提高速度
        compressed_data = blosc.compress(
            serialized_data,
            clevel=9,  # 降低压缩级别，平衡压缩率和速度
            cname='zstd',
            shuffle=1,  # 默认的字节重排
            typesize=8  # 大多数数据结构的常见大小
        )
        return compressed_data
    except Exception as e:
        print(f"压缩数据时出错: {e}")
        return None

def save_data_to_lmdb(ds, outdir, continue_id=0):
    """
    将数据集保存为LMDB格式，优化压缩性能
    """
    lmdb_path = f'{outdir}/features.lmdb'
    # 估算数据集大小
    estimated_size = len(ds) * 20 * 1024 * 1024  # 每个样本预估30MB
    map_size = max(estimated_size, 3 * 1024 * 1024 * 1024)  # 至少3GB
    print(f"创建LMDB数据库: {lmdb_path}")
    print(f"预估大小: {map_size/1e9:.1f}GB")
    
    # 创建LMDB环境
    env = lmdb.open(lmdb_path, map_size=map_size, readonly=False, 
                   meminit=False, map_async=True)
    
    # 创建一个进程池用于压缩
    cpu_count = os.cpu_count() or 4
    process_count = min(cpu_count - 1, 6)  # 保留一个CPU核心给主线程
    
    # 批处理写入LMDB以减少事务开销
    batch_size = 5  # 调整这个值以平衡内存使用和性能
    current_batch = []
    # 使用进程池进行并行压缩
    with concurrent.futures.ProcessPoolExecutor(max_workers=process_count) as executor:
        # 用于跟踪提交的压缩任务
        compression_futures = {}
        # 处理每个样本
        for id, data in tqdm(enumerate(ds), total=len(ds), unit="samples", desc="Processing"):
            ## 处理的到39859数据中断了。。。
            if id < continue_id:
                continue
            try:
                # 串行执行ge(data) - 这部分在主线程中执行
                outdata = ge(data)
                # 提交压缩任务到进程池
                future = executor.submit(compress_data, outdata)
                compression_futures[future] = id
                
                # 检查已完成的压缩任务并写入LMDB
                done_futures = []
                for future in list(compression_futures.keys()):
                    if future.done():
                        sample_id = compression_futures[future]
                        try:
                            compressed_data = future.result()
                            if compressed_data:
                                # 将完成的任务添加到当前批次
                                current_batch.append((sample_id, compressed_data))
                            else:
                                print(f"样本 {sample_id} 压缩失败")
                        except Exception as e:
                            print(f"获取样本 {sample_id} 压缩结果时出错: {e}")
                        
                        done_futures.append(future)
                
                # 从跟踪字典中移除已处理的任务
                for future in done_futures:
                    del compression_futures[future]
                
                # 当批次达到指定大小时写入LMDB
                if len(current_batch) >= batch_size:
                    with env.begin(write=True) as txn:
                        for batch_id, batch_data in current_batch:
                            key = f'sample_{batch_id:06d}'.encode()
                            txn.put(key, batch_data)
                    
                    # 清空当前批次
                    current_batch = []
                
                # 控制内存使用 - 如果有太多未完成的任务，等待一些完成
                while len(compression_futures) > process_count * 2:
                    time.sleep(0.1)
                    # 再次检查已完成的任务
                    for future in list(compression_futures.keys()):
                        if future.done():
                            sample_id = compression_futures[future]
                            try:
                                compressed_data = future.result()
                                if compressed_data:
                                    current_batch.append((sample_id, compressed_data))
                                else:
                                    print(f"样本 {sample_id} 压缩失败")
                            except Exception as e:
                                print(f"获取样本 {sample_id} 压缩结果时出错: {e}")
                            
                            del compression_futures[future]
                    
                    # 写入批次
                    if len(current_batch) >= batch_size:
                        with env.begin(write=True) as txn:
                            for batch_id, batch_data in current_batch:
                                key = f'sample_{batch_id:06d}'.encode()
                                txn.put(key, batch_data)
                        
                        current_batch = []
                
            except Exception as e:
                print(f"处理样本 {id} 时出错: {e}")
                continue
        
        # 等待所有剩余的压缩任务完成
        for future, sample_id in compression_futures.items():
            try:
                compressed_data = future.result()
                if compressed_data:
                    current_batch.append((sample_id, compressed_data))
                else:
                    print(f"样本 {sample_id} 压缩失败")
            except Exception as e:
                print(f"获取样本 {sample_id} 压缩结果时出错: {e}")
        
        # 写入最后一批数据
        if current_batch:
            with env.begin(write=True) as txn:
                for batch_id, batch_data in current_batch:
                    key = f'sample_{batch_id:06d}'.encode()
                    txn.put(key, batch_data)
    # 保存元数据
    with env.begin(write=True) as txn:
        metadata = {
            'total_samples': len([key for key in txn.cursor().iternext(keys=True, values=False)]),
            'sample_range': f"{args.start}-{args.end}",
            'compression_algorithm': 'zlib',
            'compression_level': 9,  # 使用较低的压缩级别
            'storage_format': 'whole_sample'
        }
        txn.put(b'__metadata__', pickle.dumps(metadata, protocol=pickle.HIGHEST_PROTOCOL))  
    # 关闭LMDB环境
    env.close()
    print("LMDB数据库写入完成")
        
def load_lmdb_data(lmdb_path, sample_indices=None):
    """
    从LMDB读取数据，返回完整的样本数据
    """
    env = lmdb.open(lmdb_path, readonly=True, lock=True, readahead=False, meminit=False)
    env.reader_check()  # 验证LMDB文件完整性
    
    with env.begin() as txn:
        # 读取元数据
        metadata = pickle.loads(txn.get(b'__metadata__'))
        total_samples = metadata['total_samples']
        
        if sample_indices is None:
            sample_indices = list(range(total_samples))
        
        result = []
        
        for idx in tqdm(sample_indices, desc="Loading from LMDB"):
            key = f'sample_{idx:06d}'.encode()
            compressed_data = txn.get(key)
            
            if compressed_data is not None:
                # 解压数据
                try:
                    decompressed_data = blosc.decompress(compressed_data)
                except Exception as e:
                    print(f"样本 {idx} 解压失败: {e}")
                    continue
                try:
                    sample_data = pickle.loads(decompressed_data)
                except Exception as e:
                    print(f"样本 {idx} 反序列化失败: {e}")
                    continue
                
                # 转换回torch tensor
                for field_name, arr in sample_data.items():
                    sample_data[field_name] = torch.from_numpy(arr)
                
                result.append(sample_data)
            else:
                print(f"警告: 未找到样本 {idx}")
    
    env.close()
    return result

# 执行保存
print("开始处理并保存数据到LMDB...")
save_data_to_lmdb(ds, outdir, continue_id=args.continue_id)

# 提供使用示例
lmdb_path = f'{outdir}/features.lmdb'
print(f"\n=== 读取示例 ===")
print("# 安装依赖:")
print("pip install lmdb blosc")
print("\n# 读取所有数据:")
print(f"all_data = load_lmdb_data('{lmdb_path}')")
print("\n# 读取特定样本:")
print(f"samples = load_lmdb_data('{lmdb_path}', sample_indices=[0, 1, 2])")
print("\n# 读取特定字段:")
print(f"embeddings = load_lmdb_data('{lmdb_path}', field_names=['inputs_embeds'])")

print(f"\n数据已保存到: {lmdb_path}")