import os
import sys
import re
import json
import time
from concurrent.futures import ThreadPoolExecutor

import tqdm
import numpy as np



from zkl_aiutils_datasets import load_dataset
from zkl_pyutils_fsspec import FsLike, resolve_fs

from ..utils.processing_uk import UKBioBankProcessor
from ..utils.llm_api import openai_api_list
from ..utils.parse_column_name_uk import parse_column_name_string_uk


class MakeLabelsUK:
    def __init__(self, dataset_path: str, save_path: str, id_file_path: str, encoding_file_path: str,
                 instance_file_path: str, categories_file_path: str, api_key: str = None):
        self.categories = None
        self.instances = None
        self.encodings = None
        self.fields = None

        self.dataset_path = dataset_path
        self.save_path = save_path
        self.id_file_path = id_file_path
        self.encoding_file_path = encoding_file_path
        self.instance_file_path = instance_file_path
        self.categories_file_path = categories_file_path
        self.api_key = api_key

    def title_to_title_list(self, title: str):
        base_url = "https://api.v3.cm/v1"

        prompt1 = "Please generate 5 paraphrased versions of the following sentence. Make sure you fully understand the original meaning before rewriting. The paraphrased sentences must convey exactly the same meaning as the original. Output in Python list format. The original sentence is: \n"
        prompt2 = "Please translate the following sentence into Arabic, Chinese, French, Russian, Spanish and Portuguese. Make sure you fully understand the original meaning before translating. The translated sentences must convey exactly the same meaning as the original. Output in Python list format, displaying only the translated sentences, without language tags. The original sentence is:\n"
        
        synonym_list = openai_api_list(prompt1 + title, self.api_key, model_name='gpt-4.1', base_url=base_url)
        translation_list = openai_api_list(prompt2 + title, self.api_key, model_name='gpt-4.1', base_url=base_url)

        return synonym_list, translation_list

    def build_label_list(self, array, category_descript, category_title, instance, notes_list, tilte_list, value_type):
        def build_label(**kwargs):
            parts = [f"{k}:{v}" for k, v in kwargs.items() if v is not None]
            return ", ".join(parts)

        label_list = []
        for t in tilte_list:
            for n in notes_list:
                label = build_label(value_type=value_type, instance=instance, title=t, notes=n, array=array)
                label_list.append(label)

        for t in tilte_list:
            label = build_label(value_type=value_type, instance=instance, title=t, array=array)
            label_list.append(label)
        for n in notes_list:
            label = build_label(value_type=value_type, instance=instance, notes=n, array=array)
            label_list.append(label)
        for t in tilte_list:
            label = build_label(value_type=value_type, instance=instance, title=t, category_title=category_title,
                                array=array)
            label_list.append(label)
        for n in notes_list:
            label = build_label(value_type=value_type, instance=instance, notes=n, category_title=category_title,
                                array=array)
            label_list.append(label)
        for t in tilte_list:
            for n in notes_list:
                label = build_label(value_type=value_type, instance=instance, title=t, notes=n,
                                    category_title=category_title, category_descript=category_descript, array=array)
                label_list.append(label)
        return label_list

    def process_column_name(self, column_name: str):
        if hasattr(self, 'existing_columns') and column_name in self.existing_columns:
            return

        if column_name == 'eid':
            print(f"Skip {column_name}, it's the eid column.")
            return

        try:
            save_file_path = os.path.join(self.save_labels_list_dir, f"{column_name}.json")
            field_id, instance_index, array = parse_column_name_string_uk(column_name)

            field = self.fields.get(str(field_id), None)

            value_type = field.value_type.name
            title = field.title
            notes = field.notes

            instance = None
            if instance_index:
                if field.instance_id in self.instances and int(instance_index) < len(
                        self.instances[field.instance_id].index_and_descript):
                    instance = self.instances[field.instance_id].index_and_descript[int(instance_index)]
                else:
                    instance = instance_index

            category_title = self.categories.get(field.category_id, None).title if field.category_id else None
            category_descript = self.categories.get(field.category_id, None).descript if field.category_id else None
            
            synonym_list, translation_list = self.title_to_title_list(title)
            tilte_list = [title] + synonym_list + translation_list
            
            notes_synonym_list, notes_translation_list = self.title_to_title_list(notes)
            notes_list = [notes] + notes_synonym_list + notes_translation_list
            
            label_list = self.build_label_list(array, category_descript, category_title, instance, notes_list,
                                               tilte_list,
                                               value_type)

            with open(save_file_path, 'w', encoding='utf-8') as f:
                json.dump(label_list, f, ensure_ascii=False, indent=4)
            time.sleep(1)

        except Exception as e:
            print(f"Error processing column {column_name}: {e}")
            if 'field_id' in locals():
                print(f"field_id: {field_id}, instance: {instance_index}, array: {array}")

            if 'f' in locals() and not f.closed:
                f.close()
            if 'save_file_path' in locals() and os.path.exists(save_file_path):
                os.remove(save_file_path)

            print(f"Error processing column {column_name}: {e}. END OF THIS COLUMN.")
            time.sleep(10)

    def main(self):
        fs = resolve_fs(self.dataset_path)
        dataset = load_dataset(fs)
        print(dataset)

        columns_names = []
        for name, child in dataset.named_children.items():
            columns_names.extend(child.columns_name)

        data_processor = UKBioBankProcessor(self.id_file_path, self.encoding_file_path,
                                            self.instance_file_path, self.categories_file_path)
        self.fields = data_processor.fields
        self.instances = data_processor.instances
        self.categories = data_processor.categories

        self.save_labels_list_dir = os.path.join(self.save_path, "labels")

        if not os.path.exists(self.save_labels_list_dir):
            os.makedirs(self.save_labels_list_dir)

        self.existing_columns = set()
        if os.path.exists(self.save_labels_list_dir):
            for file in os.listdir(self.save_labels_list_dir):
                if file.endswith('.json'):
                    col_name = file[:-5]
                    self.existing_columns.add(col_name)

        with ThreadPoolExecutor(max_workers=8) as executor:
            list(tqdm.tqdm(executor.map(self.process_column_name, columns_names), total=len(columns_names), desc='Processing all columns'))

        print("All labels have been generated and saved.")


if __name__ == '__main__':
    project_dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../.."))
    dataset_path = "/home/xw/python3test/omics-bert-datasets/datasets/ready/ukbiobank/v4"
    save_path = os.path.join(project_dir_path, "datasets/ready/ukbiobank/v3")

    id_file_path = os.path.join(project_dir_path, "datasets/raw/ukbiobank/v3/id.csv")
    encoding_file_path = os.path.join(project_dir_path, "datasets/raw/ukbiobank/v3/encoding.csv")
    instance_file_path = os.path.join(project_dir_path, "datasets/raw/ukbiobank/v3/instance.csv")
    categories_file_path = os.path.join(project_dir_path, "datasets/raw/ukbiobank/v3/category.csv")

    api_key = ""

    build_labels = MakeLabelsUK(dataset_path, save_path, id_file_path, encoding_file_path,
                                         instance_file_path, categories_file_path, api_key)

    build_labels.main()
