import os
import json
import argparse
import pandas as pd
import io
from datasets import Dataset, DatasetDict, Image as DsImage
from huggingface_hub import login
from PIL import Image

# Define paths (same as in prepare_dataset.py)
OUTPUT_DIR = "qwen_vl_dataset"
TRAIN_DIR = os.path.join(OUTPUT_DIR, "train")
VAL_DIR = os.path.join(OUTPUT_DIR, "val")
TEST_DIR = os.path.join(OUTPUT_DIR, "test")

def load_jsonl(file_path):
    """Load data from a JSONL file."""
    data = []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            data.append(json.loads(line))
    return data

def load_and_convert_image(image_path):
    """Load an image from file and convert to bytes."""
    try:
        # 打开图像
        img = Image.open(image_path)
        # 将图像转换为字节
        buffer = io.BytesIO()
        img.save(buffer, format=img.format or "JPEG")
        return buffer.getvalue()
    except Exception as e:
        print(f"Error loading image {image_path}: {e}")
        return None

def process_dataset_split(jsonl_path, images_dir):
    """Process a dataset split into ShareGPT format with images as bytes."""
    data = load_jsonl(jsonl_path)
    
    # Create lists for each column
    ids = []
    conversations = []
    images = []
    
    for item in data:
        ids.append(item["id"])
        conversations.append(json.dumps(item["conversations"]))
        
        # Handle image paths for image classification examples
        if "image" in item:
            image_path = os.path.join(images_dir, item["image"])
            # Load and convert the image to bytes
            image_bytes = load_and_convert_image(image_path)
            images.append(image_bytes)
        else:
            images.append(None)
    
    # Create a dictionary for pandas DataFrame
    return {
        "id": ids,
        "conversations": conversations,
        "image": images
    }

def create_hf_dataset(repository_id, token=None):
    """Create and upload a Hugging Face Dataset in ShareGPT format with images."""
    # Login to Hugging Face if token is provided
    if token:
        login(token=token)
    
    # Load data from each split
    print("Loading train data...")
    train_data = process_dataset_split(
        os.path.join(TRAIN_DIR, "train.json"),
        os.path.join(TRAIN_DIR, "images")
    )
    
    print("Loading validation data...")
    val_data = process_dataset_split(
        os.path.join(VAL_DIR, "val.json"),
        os.path.join(VAL_DIR, "images")
    )
    
    print("Loading test data...")
    test_data = process_dataset_split(
        os.path.join(TEST_DIR, "test.json"),
        os.path.join(TEST_DIR, "images")
    )
    
    # Create datasets with pandas
    print("Creating train dataset...")
    train_dataset = Dataset.from_dict(train_data)
    
    print("Creating validation dataset...")
    val_dataset = Dataset.from_dict(val_data)
    
    print("Creating test dataset...")
    test_dataset = Dataset.from_dict(test_data)
    
    # Create a DatasetDict
    dataset_dict = DatasetDict({
        "train": train_dataset,
        "validation": val_dataset,
        "test": test_dataset
    })
    
    # Add post-processing function to deserialize the conversations
    def process_examples(examples):
        # 反序列化对话
        examples["conversations"] = [json.loads(conv) for conv in examples["conversations"]]
        
        # 将字节转换为图像（如果图像字段不为None）
        if "image" in examples and any(img is not None for img in examples["image"]):
            processed_images = []
            for img_bytes in examples["image"]:
                if img_bytes is not None:
                    processed_images.append({"bytes": img_bytes})
                else:
                    processed_images.append(None)
            examples["image"] = processed_images
        
        return examples
    
    # Apply post-processing
    dataset_dict = dataset_dict.map(
        process_examples,
        batched=True,
        desc="Processing conversations and images"
    )
    
    # Set the format to include PIL images
    dataset_dict = dataset_dict.cast_column("image", DsImage())
    
    # Push to the Hub without using cache
    print(f"Pushing dataset to {repository_id}...")
    dataset_dict.push_to_hub(
        repository_id,
        private=False,
        token=token
    )
    
    print(f"Dataset successfully uploaded to https://huggingface.co/datasets/{repository_id}")
    print(f"Dataset follows ShareGPT format with embedded images")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Upload Qwen dataset to Hugging Face Hub in ShareGPT format")
    parser.add_argument(
        "--repo_id", 
        type=str, 
        required=True,
        help="Repository ID in the format 'username/dataset-name'"
    )
    parser.add_argument(
        "--token", 
        type=str, 
        help="Hugging Face API token (or set HF_TOKEN environment variable)"
    )
    
    args = parser.parse_args()
    
    # Use the token from args or environment variable
    token = args.token or os.environ.get("HF_TOKEN")
    if not token:
        print("警告：未提供 Hugging Face 令牌。您可能需要交互式登录。")
    
    create_hf_dataset(args.repo_id, token) 