import os
import json
import time
import tqdm
import re
from typing import Dict, Optional
from concurrent.futures import ThreadPoolExecutor
from omics_bert_label_encoding.utils.llm_api import openai_api_list
from omics_bert_label_encoding.utils.processing_charls import CharlsProcessor
from zkl_aiutils_datasets import load_dataset
from zkl_pyutils_fsspec import resolve_fs


class MakeLabelsCharls:
    def __init__(self, dataset_path: str, save_path: str, dta_file_path: str, api_key: str = None, instance_mapping: Dict[Optional[str], str] = None):
        self.dataset_path = dataset_path
        self.save_path = save_path
        self.dta_file_path = dta_file_path
        self.api_key = api_key
        self.instance_mapping = instance_mapping
        self.charls_processor = CharlsProcessor(dta_file_path=self.dta_file_path)

    def title_to_title_list(self, title: str):
        base_url = "https://api.v3.cm/v1"

        prompt1 = "Please generate 5 paraphrased versions of the following sentence. Make sure you fully understand the original meaning before rewriting. The paraphrased sentences must convey exactly the same meaning as the original. Output in Python list format. The original sentence is: \n"
        prompt2 = "Please translate the following sentence into Arabic, Chinese, French, Russian, Spanish and Portuguese. Make sure you fully understand the original meaning before translating. The translated sentences must convey exactly the same meaning as the original. Output in Python list format, displaying only the translated sentences, without language tags. The original sentence is:\n"
        
        synonym_list = openai_api_list(prompt1 + title, self.api_key, model_name='gpt-4.1', base_url=base_url)
        translation_list = openai_api_list(prompt2 + title, self.api_key, model_name='gpt-4.1', base_url=base_url)

        return synonym_list, translation_list

    def parse_column_instance(self, column_name: str) -> str:
        if not self.instance_mapping:
            return "baseline"
        
        match = re.search(r'(_\d+)$', column_name)
        if match:
            suffix = match.group(1)
            return self.instance_mapping.get(suffix, "unknown")
        else:
            return self.instance_mapping.get(None, "baseline")

    def build_label_list(self, tilte_list, value_type, instance):
        def build_label(**kwargs):
            parts = [f"{k}:{v}" for k, v in kwargs.items() if v is not None]
            return ", ".join(parts)

        label_list = []
        for t in tilte_list:
            label = build_label(value_type=value_type, instance=instance, title=t)
            label_list.append(label)

        return label_list

    def process_column_name(self, column_name: str):
        if hasattr(self, 'existing_columns') and column_name in self.existing_columns:
            return

        if column_name == 'eid':
            print(f"Skip {column_name}, it's the eid column.")
            return

        try:
            save_file_path = os.path.join(self.save_labels_list_dir, f"{column_name}.json")
            
            description = self.charls_processor.columns_name_to_description.get(column_name)
            if not description:
                print(f"No description found for {column_name}, skipping.")
                return

            value_type = self.charls_processor.get_column_data_types().get(column_name, "unknown")
            instance = self.parse_column_instance(column_name)

            synonym_list, translation_list = self.title_to_title_list(description)
            tilte_list = [description] + synonym_list + translation_list

            label_list = self.build_label_list(tilte_list, value_type, instance)

            with open(save_file_path, 'w', encoding='utf-8') as f:
                json.dump(label_list, f, ensure_ascii=False, indent=4)
            time.sleep(1)

        except Exception as e:
            print(f"Error processing column {column_name}: {e}")
            if 'save_file_path' in locals() and os.path.exists(save_file_path):
                os.remove(save_file_path)
            print(f"Error processing column {column_name}: {e}. END OF THIS COLUMN.")
            time.sleep(10)

    def main(self):
        fs = resolve_fs(self.dataset_path)
        dataset = load_dataset(fs)
        print(dataset)

        columns_names = []
        for name, child in dataset.named_children.items():
            columns_names.extend(child.columns_name)

        self.save_labels_list_dir = os.path.join(self.save_path, "labels")

        if not os.path.exists(self.save_labels_list_dir):
            os.makedirs(self.save_labels_list_dir)

        self.existing_columns = set()
        if os.path.exists(self.save_labels_list_dir):
            for file in os.listdir(self.save_labels_list_dir):
                if file.endswith('.json'):
                    col_name = file[:-5]
                    self.existing_columns.add(col_name)

        with ThreadPoolExecutor(max_workers=8) as executor:
            list(tqdm.tqdm(executor.map(self.process_column_name, columns_names), total=len(columns_names), desc='Processing all columns'))

        print("All labels have been generated and saved.")

if __name__ == '__main__':
    project_dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
    dataset_path = "/home/xw/python3test/omics-bert-datasets/datasets/ready/charls/v1"
    save_path = os.path.join(project_dir_path, "datasets/ready/charls/v1")
    dta_file_path = os.path.join(project_dir_path, "datasets/raw/charls/v1/codebook.csv")  # Note: This should be the actual .dta file path
    api_key = ""

    instance_mapping = {
        "_4": "2004",
        "_18": "2018",
        None: "baseline"
    }

    build_labels = MakeLabelsCharls(dataset_path, save_path, dta_file_path, api_key, instance_mapping=instance_mapping)
    build_labels.main()