import jieba
import logging
import torch
import pandas as pd
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset
from sklearn.model_selection import train_test_split

jieba.setLogLevel(logging.INFO)
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class Model(nn.Module):
    def __init__(self, layers: nn.Sequential, device: torch.device = DEVICE):
        super(Model, self).__init__()
        self.device = device
        self.layers = layers.to(self.device)

    def forward(self, x):
        return self.layers(x.to(self.device))


def data_preprocessing(data: pd.DataFrame, label_col: str,
                       feature_col: list[str] | None = None) -> tuple[TensorDataset, TensorDataset, int, int]:
    """
    Prepare tabular data for neural network training/evaluation.
    
    Workflow:
    1. Separate features (x) and labels (y)
    2. Train/test split with stratified sampling
    3. Convert to PyTorch-compatible tensors
    4. Create TensorDataset objects for DataLoader
    
    Args:
        data: Raw DataFrame containing both features and labels
        label_col: Column name containing class labels
        feature_col: Specific columns to use as features (None=use all except label)
    
    Returns:
        tuple: `(train_dataset, test_dataset, num_features, num_classes)`
    """
    # Feature selection: use specified columns or exclude label column
    if feature_col is None:
        # Ensure float32 for NN compatibility
        x = data.drop(label_col, axis=1).astype(np.float32)
    else:
        x = data[feature_col].astype(np.float32)
    # Label extraction with int64 type for classification tasks
    y = data[label_col].astype(np.int64)
    # Stratified split preserving class distribution (80/20 split)
    x_train, x_test, y_train, y_test = train_test_split(
        x, y, 
        test_size=0.2,  # Standard evaluation split ratio
        random_state=0  # Reproducible sampling
    )
    # Create Tensor datasets with automatic tensor conversion
    train_data = TensorDataset(
        torch.from_numpy(x_train.values),  # Convert DataFrame to tensor
        torch.tensor(y_train.values)       # Direct tensor conversion
    )
    test_data = TensorDataset(
        torch.from_numpy(x_test.values),
        torch.tensor(y_test.values)
    )
    return train_data, test_data, x_train.shape[1], len(np.unique(y))


def jieba_cut(file_name: str) -> tuple[list[str], dict[str, int], int, list[str]]:
    """
    Chinese text preprocessing pipeline using Jieba segmentation.

    Workflow:
    1. Read text file line by line
    2. Segment each line into words using Jieba
    3. Build vocabulary and word-to-index mapping
    4. Generate indexed corpus with sentence separators

    Args:
        file_name: Path to raw text file (UTF-8 encoded)

    Returns:
        tuple: `(unique_words, word2index, word_count, corpus_idx)`
    """
    # Initialize vocabulary containers
    # Stores all distinct words and tokenized sentences
    unique_words, vocab_list = [], []
    # Process each line in the input file
    for line in open(file_name, encoding='utf-8'):
        # Chinese word segmentation using Jieba
        words = jieba.lcut(line)
        vocab_list.append(words)
        # Build vocabulary set
        for word in words:
            if word not in unique_words:
                unique_words.append(word)
    # Create word-to-index mapping
    word_count = len(unique_words)
    word2index = {word: i for i, word in enumerate(unique_words)}
    # Generate indexed corpus with space separators between sentences
    corpus_idx = []
    for words in vocab_list:
        tmp = []
        # Convert words to numerical indices
        for word in words:
            tmp.append(word2index[word])
        # Add sentence separator (space index)
        tmp.append(word2index[' '])
        corpus_idx.extend(tmp)
    return unique_words, word2index, word_count, corpus_idx
