import torch
from transformers import LlavaForConditionalGeneration, LlavaProcessor
from torch.utils.data import Dataset, DataLoader
from torch.nn.parallel import DataParallel
import argparse
from tqdm import tqdm
import os
from PIL import Image, ImageDraw
# 自定义数据集类
class ImageTextDataset(Dataset):
    def __init__(self, image_paths, texts, processor):
        self.image_paths = image_paths
        self.texts = texts
        self.processor = processor
        
    def __len__(self):
        return len(self.image_paths)
    
    def __getitem__(self, idx):
  
        image = Image.open(self.image_paths[idx])  # 假设图像已预处理并保存为Tensor
        text = self.texts[idx]
        return {"image": image, "text": text}

# 数据预处理函数
def preprocess_batch(batch, processor):
    images = [item["image"] for item in batch]
    texts = [item["text"] for item in batch]
    print("len(images)",len(images))
    print("len(texts)",len(texts))
    inputs=[]
    # 使用processor处理图像和文本
    for i in range(len(images)):
        inputs.append(processor(images[i],texts[i],return_tensors="pt",padding=True,truncation=True,max_length=512))    
    # 将数据移至GPU
    # inputs = {k: v.cuda() for k, v in inputs.items()}
    return inputs

# 并行推理函数
def parallel_inference(model_path, image_paths, texts, batch_size=8, num_workers=4):
    # 加载模型和处理器
    print("加载模型中...")
    model = LlavaForConditionalGeneration.from_pretrained(
        model_path,
        torch_dtype=torch.bfloat16,
        low_cpu_mem_usage=True,
        attn_implementation="eager",
        device_map="auto"
    )
    
    processor = LlavaProcessor.from_pretrained(model_path, patch_size=14, use_fast=True)
    
    # 包装模型以支持多GPU并行
    model = DataParallel(model)
    model.cuda()
    model.eval()
    
    # 创建数据集和数据加载器
    dataset = ImageTextDataset(image_paths, texts, processor)
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        collate_fn=lambda x: x  # 自定义collate_fn，后续处理
    )
    
    # 存储结果
    all_predictions = []
    
    # 执行推理
    print("开始推理...")
    with torch.no_grad():
        for batch in tqdm(dataloader):
            # 预处理批次数据
            inputs = preprocess_batch(batch, processor)
            
            # 模型推理
            outputs = model(**inputs)
            
            # 处理输出
            predictions = processor.batch_decode(
                outputs.logits.argmax(dim=-1), 
                skip_special_tokens=True
            )
            
            all_predictions.extend(predictions)
    
    return all_predictions

# 主函数
def main():
    parser = argparse.ArgumentParser(description="LLaVA多GPU并行推理")
    parser.add_argument("--model_path", type=str, help="模型路径", default="/media/dual-4090/sata1/GuoTianxing/model/VLM/llava-v1.5-7b-hf")
    parser.add_argument("--image_dir", type=str,  help="图像目录", default="/media/dual-4090/sata1/GuoTianxing/dataset/vqav2/vqa_dataset/images/val2014")
    parser.add_argument("--text_file", type=str,  help="文本文件路径，每行对应一张图像的提示",default=None)
    parser.add_argument("--batch_size", type=int, default=8, help="批次大小")
    parser.add_argument("--num_workers", type=int, default=4, help="数据加载工作线程数")
    args = parser.parse_args()
    
    # 读取图像路径
    image_files = sorted(os.listdir(args.image_dir))
    image_paths = [os.path.join(args.image_dir, f) for f in image_files if f.endswith(('.png', '.jpg'))]
    
    # # 读取文本提示
    # with open(args.text_file, 'r') as f:
    #     texts = [line.strip() for line in f.readlines()]
    texts = ["Describe the image in detail."] * len(image_paths)
    print(f"图像数量({len(image_paths)})与文本提示数量({len(texts)})匹配")
    # 确保图像和文本数量匹配
    if len(image_paths) != len(texts):
        raise ValueError(f"图像数量({len(image_paths)})与文本提示数量({len(texts)})不匹配")
    
    # 执行并行推理
    predictions = parallel_inference(
        args.model_path,
        image_paths,
        texts,
        batch_size=args.batch_size,
        num_workers=args.num_workers
    )
    
    # 保存结果
    with open("predictions.txt", "w") as f:
        for pred in predictions:
            f.write(f"{pred}\n")
    
    print(f"推理完成，结果已保存到 predictions.txt")

if __name__ == "__main__":
    main()    




    model_path = "/media/dual-4090/sata1/GuoTianxing/model/VLM/llava-v1.5-7b-hf"
    image_path = "/home/dual-4090/Pictures/sede.png"