# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import pandas as pd
from pathlib import Path
import shutil
from huggingface_hub import snapshot_download
import json


def add_object_id_to_parquet(parquet_path: str) -> None:
    """
    Check if the parquet file has an 'object_id' column, and if not, add it with values from 0 to n-1.

    Args:
        parquet_path: Path to the parquet file.
    """
    try:
        df = pd.read_parquet(parquet_path)
        # Check if the 'object_id' column already exists
        if 'object_id' not in df.columns:
            # Add the 'object_id' column with values from 0 to n-1
            df['object_id'] = range(len(df))
            # Save the modified dataframe back to the same file
            df.to_parquet(parquet_path, index=False)
        else:
            print(f"{parquet_path} already contains the 'object_id' column")
    except Exception as e:
        print(f"Error processing {parquet_path}: {e}")


def process_ground_truth(dataset_path: str, query_path: str = None, target_path: str = None, ground_truth_ori_path: str = None) -> None:
    """
    Process the original ground_truth_ori.parquet and generate ground_truth.parquet
    containing 'query_id' and 'target_id' columns.

    Args:
        dataset_path (str): Path to the dataset directory.
        query_path (str, optional): Path to query.parquet. Defaults to None.
        target_path (str, optional): Path to target.parquet. Defaults to None.
        ground_truth_ori_path (str, optional): Path to original qrels file. Defaults to None.
    """
    try:
        if not query_path:
            query_path = os.path.join(dataset_path, "query", "query.parquet")
        if not target_path:
            target_path = os.path.join(dataset_path, "target", "target.parquet")
        if not ground_truth_ori_path:
            ground_truth_ori_path = os.path.join(dataset_path, "ground_truth", "ground_truth_ori.parquet")

        # Ensure all necessary files exist
        if not all(os.path.exists(path) for path in [query_path, target_path, ground_truth_ori_path]):
            print(f"Missing necessary files, cannot process {dataset_path}")
            return

        # Build file paths
        ground_truth_output_path = os.path.join(dataset_path, "ground_truth", "ground_truth.parquet")

        # Read data
        query_df = pd.read_parquet(query_path)
        target_df = pd.read_parquet(target_path)
        ground_truth_ori_df = pd.read_parquet(ground_truth_ori_path)

        # Align column names: rename query_id/target_id to qid/pid if present
        if 'query_id' in ground_truth_ori_df.columns and 'target_id' in ground_truth_ori_df.columns:
            ground_truth_ori_df = ground_truth_ori_df.rename(columns={'query_id': 'qid', 'target_id': 'pid'})

        # Ensure query_df and target_df have 'object_id' columns
        if 'object_id' not in query_df.columns:
            add_object_id_to_parquet(query_path)
            query_df = pd.read_parquet(query_path)

        if 'object_id' not in target_df.columns:
            add_object_id_to_parquet(target_path)
            target_df = pd.read_parquet(target_path)

        # Build mapping from target IDs to object IDs
        target_id_to_object_id = dict(zip(target_df['id'], target_df['object_id']))

        result = []

        # Process each query and ground truth
        for _, query_row in query_df.iterrows():
            try:
                query_id = query_row['id']
                query_object_id = query_row['object_id']

                # Find matching ground truth rows
                matching_gt_rows = ground_truth_ori_df[ground_truth_ori_df['qid'] == query_id]

                if not matching_gt_rows.empty:
                    # Collect matching target object IDs
                    target_object_ids = []

                    for _, gt_row in matching_gt_rows.iterrows():
                        pid = gt_row['pid']
                        if pid in target_id_to_object_id:
                            target_object_ids.append(str(target_id_to_object_id[pid]))

                    if target_object_ids:
                        # Create result dictionary
                        result.append({
                            "query_id": str(query_object_id),
                            "target_id": target_object_ids
                        })
            except KeyError as e:
                # Skip rows with missing fields
                continue

        # Create new dataframe and save
        if result:
            result_df = pd.DataFrame(result)
            result_df.to_parquet(ground_truth_output_path, index=False)
        else:
            print(f"No matching ground truth data found, cannot create {ground_truth_output_path}")
    except Exception as e:
        print(f"Error processing ground truth: {e}")


def process_dataset_directory(dataset_path: str) -> None:
    """
    Process a single dataset directory.

    Args:
        dataset_path (str): Path to the dataset directory.
    """
    # Process query.parquet
    query_path = os.path.join(dataset_path, "query", "query.parquet")
    if os.path.exists(query_path):
        add_object_id_to_parquet(query_path)

    # Process target.parquet
    target_path = os.path.join(dataset_path, "target", "target.parquet")
    if os.path.exists(target_path):
        add_object_id_to_parquet(target_path)

    # Process ground_truth_ori.parquet
    ground_truth_path = os.path.join(dataset_path, "ground_truth", "ground_truth_ori.parquet")
    if os.path.exists(ground_truth_path):
        process_ground_truth(dataset_path)


def main():
    """
    Main function to process all dataset directories.
    """
    # Get the base directory of the retrieval datasets
    base_dir = os.path.dirname(os.path.abspath(__file__))

    # Find all dataset directories
    dataset_dirs = []
    for item in os.listdir(base_dir):
        item_path = os.path.join(base_dir, item)
        if os.path.isdir(item_path) and item.endswith("Retrieval"):
            dataset_dirs.append(item_path)

    print(f"Found {len(dataset_dirs)} dataset directories")

    # Process each dataset directory
    for dataset_dir in dataset_dirs:
        print(f"\nProcessing {dataset_dir}")
        process_dataset_directory(dataset_dir)


def fetch_retrieval_dataset_from_hf(dataset_repo: str, out_root: Path):
    """Fetch C-MTEB retrieval dataset and split into query/target/ground_truth."""
    # 1. Clone dataset repo and copy original files
    repo_dir = snapshot_download(repo_id=dataset_repo, repo_type="dataset")
    data_dir = Path(repo_dir) / "data"
    corpus_files = list(data_dir.glob("corpus*"))
    query_files = list(data_dir.glob("queries*"))
    if not corpus_files or not query_files:
        raise ValueError("Cannot find corpus or queries in data dir")
    corpus_file = corpus_files[0]
    query_file = query_files[0]

    # Prepare output directories with new structure
    name = dataset_repo.split("/")[-1]
    dataset_dir = out_root / name

    # Create subdirectories for query, target, and ground_truth
    query_dir = dataset_dir / "query"
    target_dir = dataset_dir / "target"
    ground_truth_dir = dataset_dir / "ground_truth"

    # Create all directories
    query_dir.mkdir(parents=True, exist_ok=True)
    target_dir.mkdir(parents=True, exist_ok=True)
    ground_truth_dir.mkdir(parents=True, exist_ok=True)

    # Also create ori_data for reference
    ori_dir = dataset_dir / "ori_data"
    ori_dir.mkdir(parents=True, exist_ok=True)

    # Copy original files
    shutil.copy(corpus_file, ori_dir / corpus_file.name)
    shutil.copy(query_file, ori_dir / query_file.name)

    # 2. Build target.parquet (support parquet or JSON with fallback)
    corpus_path = ori_dir / corpus_file.name

    if corpus_path.suffix == '.parquet':
        df_corpus = pd.read_parquet(corpus_path)
    else:
        try:
            df_corpus = pd.read_json(corpus_path, lines=True)
        except UnicodeDecodeError:
            # fallback for encoding issues
            for encoding in ['latin1', 'gb18030', 'gbk', 'utf-8-sig']:
                try:
                    df_corpus = pd.read_json(corpus_path, lines=True, encoding=encoding)
                    break
                except:
                    continue
        except Exception as e:
            print(f"Error reading corpus: {e}")
            raise

    # Rename columns - handle different possible formats
    if len(df_corpus.columns) >= 2:
        col_mapping = {}
        col_mapping[df_corpus.columns[0]] = "id"
        col_mapping[df_corpus.columns[1]] = "text"
        df_corpus = df_corpus.rename(columns=col_mapping)
    elif 'id' in df_corpus.columns and 'text' in df_corpus.columns:
        # Already has correct columns
        pass
    elif 'pid' in df_corpus.columns and 'passage' in df_corpus.columns:
        df_corpus = df_corpus.rename(columns={'pid': 'id', 'passage': 'text'})
    else:
        raise ValueError(f"Cannot identify id and text columns in corpus file. Columns: {df_corpus.columns.tolist()}")

    # Add object_id column with string from
    df_corpus['object_id'] = [str(i) for i in range(len(df_corpus))]

    # Save to target directory
    target_path = target_dir / 'target.parquet'
    df_corpus[['id', 'text', 'object_id']].to_parquet(target_path, index=False)

    # 3. Build query.parquet (with encoding fallback)
    query_path = ori_dir / query_file.name

    df_query = None
    try:
        df_query = pd.read_json(query_path, lines=True)
    except UnicodeDecodeError:
        # Try with different encodings
        for encoding in ['latin1', 'gb18030', 'gbk', 'utf-8-sig']:
            try:
                df_query = pd.read_json(query_path, lines=True, encoding=encoding)
                break
            except Exception as e:
                print(f"Failed with {encoding}: {e}")
                continue
    except ValueError as e:
        print(f"JSON parsing error: {e}")
        # Try reading as regular JSON (not lines)
        try:
            df_query = pd.read_json(query_path)
        except:
            # Last resort: read line by line
            with open(query_path, 'r', encoding='utf-8', errors='ignore') as f:
                data = []
                for i, line in enumerate(f):
                    line = line.strip()
                    if not line:
                        continue
                    try:
                        data.append(json.loads(line))
                    except json.JSONDecodeError as e:
                        print(f"Error parsing line {i}: {e}")
                        print(f"Line content: {line[:100]}...")
                        continue
                if data:
                    df_query = pd.DataFrame(data)

    if df_query is None or len(df_query) == 0:
        # Check if it's a parquet file instead
        if query_path.suffix == '.parquet':
            try:
                df_query = pd.read_parquet(query_path)
            except:
                pass

        if df_query is None or len(df_query) == 0:
            raise ValueError(f"Failed to read query file or file is empty: {query_path}")

    # Rename columns - handle different possible formats
    if len(df_query.columns) >= 2:
        # Use position-based renaming
        col_mapping = {}
        col_mapping[df_query.columns[0]] = "id"
        col_mapping[df_query.columns[1]] = "text"
        df_query = df_query.rename(columns=col_mapping)
    elif 'id' in df_query.columns and 'text' in df_query.columns:
        # Already has correct columns
        pass
    elif 'qid' in df_query.columns and 'query' in df_query.columns:
        df_query = df_query.rename(columns={'qid': 'id', 'query': 'text'})
    else:
        raise ValueError(f"Cannot identify id and text columns in query file. Columns: {df_query.columns.tolist()}")

    # Add object_id column with string from
    df_query['object_id'] = [str(i) for i in range(len(df_query))]

    # Save to query directory
    query_out_path = query_dir / "query.parquet"
    df_query[["id", "text", "object_id"]].to_parquet(query_out_path, index=False)

    # 4. Fetch qrels and build ground_truth.parquet
    qrels_repo = dataset_repo + '-qrels'

    try:
        repo_qrels = snapshot_download(repo_id=qrels_repo, repo_type='dataset')

        # Copy qrels files to ori_data for reference
        qrels_ori_dir = ori_dir / "qrels"
        qrels_ori_dir.mkdir(exist_ok=True)

        # Find all possible qrels files (including parquet)
        qrels_files = (list(Path(repo_qrels).rglob('*.tsv')) +
                       list(Path(repo_qrels).rglob('*.csv')) +
                       list(Path(repo_qrels).rglob('*.txt')) +
                       list(Path(repo_qrels).rglob('*.parquet')))

        if not qrels_files:
            raise ValueError("Cannot find qrels files")

        # Copy all qrels files to ori_data
        for qf in qrels_files:
            shutil.copy(qf, qrels_ori_dir / qf.name)

        # Load qrels data
        df_qrels = None
        for qf in qrels_files:
            ext = qf.suffix.lower()
            try:
                if ext == '.parquet':
                    # Load parquet file
                    df_qrels = pd.read_parquet(qf)
                elif ext in [".tsv", ".csv"]:
                    sep = "\t" if ext == ".tsv" else ","
                    df_qrels = pd.read_csv(qf, sep=sep, header=None, names=["query_id", "target_id", "score"])
                elif ext == ".txt":
                    # Try tab-separated first, then space-separated
                    try:
                        df_qrels = pd.read_csv(qf, sep="\t", header=None, names=["query_id", "target_id", "score"])
                    except:
                        df_qrels = pd.read_csv(qf, sep=" ", header=None, names=["query_id", "target_id", "score"])

                if df_qrels is not None and len(df_qrels) > 0:
                    break
            except Exception as e:
                print(f"Failed to load {qf.name}: {e}")
                continue

        if df_qrels is None or len(df_qrels) == 0:
            raise ValueError("Failed to load any qrels data")

        # Save df_qrels as ground_truth_ori.parquet
        ground_truth_ori_path = ori_dir / "ground_truth_ori.parquet"
        df_qrels.to_parquet(ground_truth_ori_path, index=False)

        # Call process_ground_truth to create final ground_truth.parquet
        query_path = str(query_dir / "query.parquet")
        target_path = str(target_dir / "target.parquet")
        ground_truth_ori_path_str = str(ground_truth_ori_path)

        process_ground_truth(str(dataset_dir), query_path=query_path, target_path=target_path,
                             ground_truth_ori_path=ground_truth_ori_path_str)

    except Exception as e:
        print(f"Error fetching qrels: {e}")
        import traceback
        traceback.print_exc()
        raise

    print(f"\nSuccessfully saved retrieval dataset at {dataset_dir}")
    print(f"Directory structure:")
    print(f"  {dataset_dir}/")
    print(f"    ├── query/query.parquet ({len(df_query)} queries)")
    print(f"    ├── target/target.parquet ({len(df_corpus)} documents)")
    print(f"    ├── ground_truth/ground_truth.parquet")
    print(f"    └── ori_data/ (original files for reference)")


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset_repo", type=str, default="C-MTEB/CmedqaRetrieval")
    parser.add_argument("--out_root", type=str, default=str(Path(__file__).parent))
    args = parser.parse_args()
    fetch_retrieval_dataset_from_hf(args.dataset_repo, Path(args.out_root))
