import nltk
import json
import logging
import re
from rapidfuzz import process, fuzz
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from joblib import dump, load
from tqdm import tqdm
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Download NLTK stopwords if not available
try:
    stop_words = set(stopwords.words("russian"))
except LookupError:
    nltk.download("stopwords")
    stop_words = set(stopwords.words("russian"))

# Initialize stemmer and stopwords
stemmer = SnowballStemmer("russian")

def load_json(file_path):
    logging.info(f"Loading data from {file_path}...")
    with open(file_path, 'r', encoding='utf-8') as f:
        return json.load(f)

def save_json(data, file_path):
    logging.info(f"Saving data to {file_path}...")
    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=4)

def create_product_category_map(products):
    return {product["Name"]: product["ParentCode"] for product in products}

def train_model(products):
    X = [product["Name"] for product in products]
    y = [product["ParentCode"] for product in products]

    pipeline = make_pipeline(
        TfidfVectorizer(),
        KNeighborsClassifier(n_neighbors=5)
    )

    logging.info("Training model...")
    pipeline.fit(X, y)

    logging.info("Saving model...")
    dump(pipeline, 'category_prediction_model.joblib')

    return pipeline

def load_model():
    try:
        logging.info("Loading model...")
        return load('category_prediction_model.joblib')
    except FileNotFoundError:
        logging.warning("Model not found. Training a new model...")
        return None

def predict_category(model, product_name, old_to_new_category_map):
    if model:
        predicted_code = model.predict([product_name])[0]
        return old_to_new_category_map.get(str(predicted_code), None)
    else:
        return None

def create_category_maps(new_structure):
    old_to_new_category_map = {}
    new_category_map = {}
    category_names = {}

    for group in new_structure['Groups']:
        new_category_map[str(group['Code'])] = group['Code']
        category_names[str(group['Code'])] = group['Name']
        if 'ParentCode' in group:
            old_to_new_category_map[str(group['ParentCode'])] = group['Code']

    return old_to_new_category_map, new_category_map, category_names

def preprocess_text(text):
    # Tokenize, remove stop words, and stem
    tokens = re.findall(r'\b\w+\b', text.lower())
    filtered_tokens = [stemmer.stem(token) for token in tokens if token not in stop_words]
    return filtered_tokens

def generate_synonyms_and_keywords(category_names):
    synonyms = {
        "10001": ["пароконвектомат", "конвекционная печь"],
        "10002": ["подставка под пароконвектомат", "аксессуары пароконвектоматы"],
        "10003": ["плита профессиональная", "кухонная плита"],
        # Add more synonyms as needed
    }
    keywords = ["печь", "хлебопекарная", "хлебопекарня", "шкаф пекарный", "пароконвектомат", "фритюрница"]

    for code, name in category_names.items():
        processed_tokens = preprocess_text(name)
        if code not in synonyms:
            synonyms[code] = processed_tokens
        else:
            synonyms[code].extend(processed_tokens)
        keywords.extend(processed_tokens)

    return synonyms, list(set(keywords))

def find_similar_category(product_name, category_names, threshold=50):
    categories = list(category_names.items())
    best_match = process.extractOne(product_name, [cat[1] for cat in categories], scorer=fuzz.token_sort_ratio)
    if best_match and best_match[1] > threshold:
        return categories[best_match[2]][0]
    return None

def find_best_category(product_name, product_desc, old_category, category_names, old_to_new_category_map, category_synonyms, keywords):
    # Ensure product_desc is a string
    product_desc = product_desc or ""
    
    # Try to find a direct match first
    if old_category in old_to_new_category_map:
        return old_to_new_category_map[old_category]

    # Extend category names with synonyms
    extended_category_names = category_names.copy()
    for code, name in category_names.items():
        if code in category_synonyms:
            extended_category_names[code] = name + " " + " ".join(category_synonyms[code])

    # If no direct match, try fuzzy matching with product name and description
    combined_text = product_name + " " + product_desc
    similar_category = find_similar_category(combined_text, extended_category_names, threshold=60)
    if similar_category:
        return similar_category

    # If still no match, try fuzzy matching with old category name
    if old_category in category_names:
        similar_category = find_similar_category(category_names[old_category], extended_category_names, threshold=60)
        if similar_category:
            return similar_category

    # If all else fails, try matching based on key words
    for word in keywords:
        if word in combined_text.lower():
            for code, name in extended_category_names.items():
                if word in name.lower():
                    return code

    # If still no match, return None
    return None

def update_categories(old_structure_files, new_structure_file):
    # Load old structure
    old_products = []
    for file in old_structure_files:
        old_products.extend(load_json(file)['Products'])
    
    old_product_category_map = create_product_category_map(old_products)

    # Load new structure
    new_structure = load_json(new_structure_file)
    
    # Create category mappings
    old_to_new_category_map, new_category_map, category_names = create_category_maps(new_structure)

    # Generate category synonyms and keywords
    category_synonyms, keywords = generate_synonyms_and_keywords(category_names)

    # Train or load the model
    model = load_model()
    if model is None:
        model = train_model(old_products)

    # Ensure all products are processed
    processed_products = set()

    # Update product categories
    for product in tqdm(new_structure['Products'], desc="Updating categories"):
        old_category = old_product_category_map.get(product['Name'])
        new_category = find_best_category(product['Name'], product.get('Desc', ''), str(old_category), category_names, old_to_new_category_map, category_synonyms, keywords)

        if not new_category:
            new_category = predict_category(model, product['Name'], old_to_new_category_map)

        if not new_category or str(new_category) not in new_category_map:
            new_category = list(new_category_map.values())[0]
            logging.warning(f"Could not find valid category for product: {product['Name']}. Assigning to default category.")

        product['ParentCode'] = new_category
        processed_products.add(product['Name'])

    # Check for missing products
    missing_products = set([product['Name'] for product in new_structure['Products']]) - processed_products
    if missing_products:
        logging.error(f"Missing products: {missing_products}")
    
    return new_structure

if __name__ == "__main__":
    old_structure_files = ['transformed_file.json', 'file1.json']
    new_structure_file = 'final_structure_with_groups_and_products.json'
    output_file = 'final_structure_with_updated_categories.json'

    logging.info("Starting category update process...")
    updated_structure = update_categories(old_structure_files, new_structure_file)
    save_json(updated_structure, output_file)
    logging.info("Process completed.")
