from json import (
    load as json_load,
    loads as json_loads
)
from os.path import basename
from pathlib import Path
from rich import print
from .config import conf
from .type import LabeledData

class Data:
    # Initialize tag2id and labels dictionary from json files
    with open(conf.path.labels, 'r', encoding='utf-8') as json_f:
        labels = json_load(json_f)

    def __init__(self):
        # Initialize file paths
        self.train = conf.path.train
        self.data = conf.path.data
        self.labeled = conf.path.labeled
        self.preprocessed = conf.path.preprocessed
        self.vocab = conf.path.vocab
        # Preprocess data
        self.preprocess()
        # Data transfer
        self.data_transfer()
        # Build vocabulary
        self.build_vocab()

    def __label_text(self, data_file: Path) -> dict:
        # Initialize label dictionary
        label_dict = {}
        with open(data_file, 'r', encoding='utf-8') as f:
            for line in f:
                # Split line
                parts = line.strip().split('\t')
                if len(parts) == 5:
                    id, _, start, end, label = parts
                    # Convert id to int
                    id = int(id)
                    # Convert start, end to int
                    start, end = int(start), int(end)
                    # Initialize label dictionary
                    if id not in label_dict:
                        label_dict[id] = {}
                    # Write label dictionary
                    for i in range(start, end):
                        # Get tag
                        tag = f'B-{label}' if i == start else f'I-{label}'
                        # Write label dictionary
                        label_dict[id][i] = tag
        return label_dict

    def preprocess(self) -> None:
        # Create preprocessed file if it does not exist
        if not self.preprocessed.exists():
            # Initialize p_data
            p_data = []
            # Read in_file
            for line in open(self.labeled, 'r', encoding='utf-8'):
                # Load data
                data: LabeledData = json_loads(line.strip())
                # Get id, text, labels
                id, text, labels = data['id'], data['text'], data['label']
                # Write p_data
                for label in labels:
                    # Get start, end, label
                    start, end, _label = label
                    # Get text
                    _text = text[start:end]
                    # Write p_line
                    p_line = f'{id}\t{_text}\t{start}\t{end}\t{_label}'
                    p_data.append(p_line)
            # Write p_data to output file
            with open(self.preprocessed, 'w', encoding='utf-8') as f:
                # Write p_data
                for line in p_data:
                    f.write(line + '\n')
            print(f'[bold green]{basename(self.preprocessed)}[/] saved!')

    def data_transfer(self) -> None:
        # Create train file if it does not exist
        if not self.train.exists():
            _label_dict = self.__label_text(self.preprocessed)
            # Write train file
            with open(self.train, 'w', encoding='utf-8') as train_f:
                # Read data file
                with open(self.data, 'r', encoding='utf-8') as f:
                    lines = f.readlines()
                    for idx, line in enumerate(lines):
                        text = line.strip()
                        # Get label dictionary
                        label_dict = _label_dict.get(idx, {})
                        for i, char in enumerate(text):
                            char_tag = label_dict.get(i, 'O')
                            train_f.write(f'{char}\t{char_tag}\n')
            print(f'[bold green]{basename(self.train)}[/] saved!')

    def build_vocab(self) -> tuple[list, dict]:
        """Build vocabulary from train file, return `data` and `word2id`"""
        # Initialize data, sample_x, sample_y
        data, sample_x, sample_y = [], [], []
        # Initialize vocab list
        vocab_list = ['PAD', 'UNK']
        # Read train file
        for line in open(self.train, 'r', encoding='utf-8'):
            line = line.strip()
            if not line:
                if sample_x and sample_y:
                    data.append([sample_x.copy(), sample_y.copy()])
                    sample_x, sample_y = [], []
                continue
            parts = line.split('\t')
            if len(parts) != 2:
                continue
            # Get char, cate
            char, cate = parts[0], parts[1]
            # Write sample_x, sample_y
            sample_x.append(char)
            sample_y.append(cate)
            # Write vocab_list
            if char not in vocab_list:
                vocab_list.append(char)
            # Write data
            if char in ['.', '?', '!', '。', '？', '！'] and sample_x and sample_y:
                data.append([sample_x.copy(), sample_y.copy()])
                sample_x, sample_y = [], []
        if sample_x and sample_y:
            data.append([sample_x, sample_y])
        # Initialize word2id
        word2id = {word: idx for idx, word in enumerate(vocab_list)}
        # Save vocab file
        if not self.vocab.exists():
            with open(self.vocab, 'w', encoding='utf-8') as f:
                f.write('\n'.join(vocab_list))
            print(f'[bold green]{basename(self.vocab)}[/] saved!')
        return data, word2id

data = Data()
