import json
import os
import random
import shutil

import cv2
import pandas as pd
from tqdm import tqdm

# Set random seed for reproducibility
random.seed(42)

# Define paths
DATA_JSON_PATH = "images/data.json"
IMAGE_DIR = "images/img"
OUTPUT_DIR = "qwen_vl_dataset"
TRAIN_DIR = os.path.join(OUTPUT_DIR, "train")
VAL_DIR = os.path.join(OUTPUT_DIR, "val")
TEST_DIR = os.path.join(OUTPUT_DIR, "test")


def is_valid_image(file_path):
    # Common image file signatures (magic bytes)
    signatures = {
        b"\xff\xd8\xff": "JPEG",
        b"\x89PNG\r\n\x1a\n": "PNG",
        b"GIF87a": "GIF",
        b"GIF89a": "GIF",
        b"BM": "BMP",
        b"II*\x00": "TIFF",
        b"MM\x00*": "TIFF",
    }

    with open(file_path, "rb") as f:
        header = f.read(8)  # Read first 8 bytes

    for sig, fmt in signatures.items():
        if header.startswith(sig):
            return True
    return False


def extract_first_frame(video_path, output_path):
    # Open the video file
    video = cv2.VideoCapture(video_path)

    # Check if video opened successfully
    if not video.isOpened():
        raise Exception(f"Could not open video {video_path}")

    # Read the first frame
    ret, frame = video.read()

    if not ret:
        raise Exception(f"Could not read frame from video {video_path}")

    cv2.imwrite(output_path, frame)


# Create output directories
for directory in [TRAIN_DIR, VAL_DIR, TEST_DIR]:
    os.makedirs(directory, exist_ok=True)

# Load the JSON data
print("Loading data...")
with open(DATA_JSON_PATH, "r", encoding="utf-8") as f:
    data = json.load(f)

# Group data by labels
label_data = {}
for item_id, item in data.items():
    label = item["label"]
    if label not in label_data:
        label_data[label] = []

    # Fix Windows-style paths to be platform-independent
    item["url"] = item["url"].replace("\\", "/")

    # Add the item_id to the item data
    item["id"] = item_id
    label_data[label].append(item)

# Print statistics
print(f"Total items: {len(data)}")
print("Items per label:")
for label, items in label_data.items():
    print(f"  {label}: {len(items)}")

# Balance the dataset by taking equal number of samples from each label
# and splitting into train/val/test sets
train_data = []
val_data = []
test_data = []

# Define split ratios
TRAIN_RATIO = 0.7
VAL_RATIO = 0.15
TEST_RATIO = 0.15

for label, items in label_data.items():
    random.shuffle(items)

    # Calculate split indices
    total = len(items)
    train_end = int(total * TRAIN_RATIO)
    val_end = train_end + int(total * VAL_RATIO)

    # Split data
    train_data.extend(items[:train_end])
    val_data.extend(items[train_end:val_end])
    test_data.extend(items[val_end:])

# Shuffle the split datasets
random.shuffle(train_data)
random.shuffle(val_data)
random.shuffle(test_data)

# Print split statistics
print(f"\nTrain set size: {len(train_data)}")
print(f"Validation set size: {len(val_data)}")
print(f"Test set size: {len(test_data)}")


# Function to convert data to Qwen2.5-vl format
def convert_to_qwen_format(data_list, output_file, image_dir_dest):
    formatted_data = []

    for item in tqdm(data_list, desc=f"Processing {os.path.basename(output_file)}"):
        # Source image path
        src_img_path = os.path.join(IMAGE_DIR, os.path.basename(item["url"]))

        # Destination image path
        img_filename = f"{item['id']}_{os.path.basename(item['url'])}"
        dest_img_path = os.path.join(image_dir_dest, img_filename)

        # Copy image to destination directory

        if is_valid_image(src_img_path):
            shutil.copy(src_img_path, dest_img_path)
        else:
            try:
                extract_first_frame(src_img_path, dest_img_path)
                assert is_valid_image(dest_img_path)
            except Exception as e:
                print(f"Error processing {src_img_path}: {e}")
                continue

        # 1. 输入图片+prompt输出图片类别
        image_classification_item = {
            "id": f"classification_{item['id']}",
            "image": img_filename,
            "conversations": [
                {
                    "from": "human",
                    "value": f"<image>\n这张图片展示的是什么商品类别？直接输出类别名称，不要输出任何解释。",
                },
                {"from": "assistant", "value": f"{item['label']}"},
            ],
        }

        # 2. 输入商品标题描述，输出类别
        title_classification_item = {
            "id": f"title_classification_{item['id']}",
            "conversations": [
                {
                    "from": "human",
                    "value": f"商品标题：\"{item['title']}\"\n请根据这个标题判断商品的类别。直接输出类别名称，不要输出任何解释。",
                },
                {"from": "assistant", "value": f"{item['label']}"},
            ],
        }

        formatted_data.append(image_classification_item)
        formatted_data.append(title_classification_item)

    # Save to JSONL format
    with open(output_file, "w", encoding="utf-8") as f:
        for item in formatted_data:
            f.write(json.dumps(item, ensure_ascii=False) + "\n")


# Create image directories
train_img_dir = os.path.join(TRAIN_DIR, "images")
val_img_dir = os.path.join(VAL_DIR, "images")
test_img_dir = os.path.join(TEST_DIR, "images")

os.makedirs(train_img_dir, exist_ok=True)
os.makedirs(val_img_dir, exist_ok=True)
os.makedirs(test_img_dir, exist_ok=True)

# Convert and save datasets
print("\nPreparing Qwen2.5-vl datasets...")
convert_to_qwen_format(train_data, os.path.join(TRAIN_DIR, "train.json"), train_img_dir)
convert_to_qwen_format(val_data, os.path.join(VAL_DIR, "val.json"), val_img_dir)
convert_to_qwen_format(test_data, os.path.join(TEST_DIR, "test.json"), test_img_dir)

print("\nDataset preparation complete!")
print(f"Train dataset: {os.path.join(TRAIN_DIR, 'train.json')}")
print(f"Validation dataset: {os.path.join(VAL_DIR, 'val.json')}")
print(f"Test dataset: {os.path.join(TEST_DIR, 'test.json')}")