import json
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from collections import defaultdict
import numpy as np
from rapidfuzz import process

class CategoryExtractor(BaseEstimator, TransformerMixin):
    def __init__(self):
        pass
    
    def fit(self, X, y=None):
        return self
    
    def transform(self, X):
        return [' '.join([product['Name']]) for product in X]

def load_json(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        return json.load(f)

def find_best_match_category_code(product_name, new_categories):
    new_cat_names = [cat["Name"] for cat in new_categories]
    best_match = process.extractOne(product_name, new_cat_names, score_cutoff=70)
    if best_match:
        best_match_name = best_match[0]
        for new_cat in new_categories:
            if new_cat["Name"] == best_match_name:
                return new_cat["Code"]
    return None

def train_model(products, new_categories, category_counts):
    X = []
    y = []
    for product in products:
        product_name = product["Name"]
        if product_name in category_counts:
            X.append(product)
            y.append(category_counts[product_name][0])
    
    if not X:
        return None

    pipeline = make_pipeline(
        CategoryExtractor(),
        TfidfVectorizer(),
        KNeighborsClassifier(n_neighbors=5)
    )

    pipeline.fit(X, y)
    return pipeline

def update_product_categories(products, new_categories, model, category_counts):
    unmatched_products = []
    for product in products:
        product_name = product["Name"]
        
        if product_name in category_counts:
            product["ParentCode"] = category_counts[product_name][0]
        elif model is not None:
            # Predict the category using the model if available
            predicted_code = model.predict([product])[0]
            if predicted_code in [cat['Code'] for cat in new_categories]:
                product["ParentCode"] = predicted_code
            else:
                unmatched_products.append(product)
        else:
            # No model available, add to unmatched
            unmatched_products.append(product)
    
    return products, unmatched_products

def distribute_products(new_structure_file, transformed_file, file1):
    # Load the JSON files
    new_structure = load_json(new_structure_file)
    transformed_data = load_json(transformed_file)
    file1_data = load_json(file1)

    # Combine products from both files into a single list
    all_products = transformed_data['Products'] + file1_data['Products']

    # Initialize category counts
    category_counts = defaultdict(lambda: [None, 0])  # [ParentCode, Count]
    
    # Learn from existing data
    for product in all_products:
        product_name = product["Name"]
        if product_name not in category_counts:
            category_counts[product_name][1] += 1
    
    # Identify categories with multiple occurrences
    new_categories = []
    for product_name, (parent_code, count) in category_counts.items():
        if count >= 2:  # Adjust as needed
            new_categories.append({"Code": parent_code, "Name": product_name})
    
    manual_assignments = {}

    # Train an initial model with any existing manual assignments
    model = train_model(all_products, new_categories, category_counts)
    
    updated_products, unmatched_products = update_product_categories(all_products, new_categories, model, category_counts)

    # Print the number of unmatched products
    if unmatched_products:
        print(f"Number of unmatched products: {len(unmatched_products)}")
        updated_products, _ = update_product_categories(unmatched_products, new_categories, model, category_counts)

    # Assign unique codes to each product
    unique_code_counter = 1
    for product in updated_products:
        product['Code'] = unique_code_counter
        unique_code_counter += 1

    # Initialize the final structure
    final_structure = {
        "Groups": new_structure['Groups'],
        "Products": updated_products
    }

    return final_structure

def save_final_structure(final_structure, output_file):
    # Save the final structure to a JSON file
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(final_structure, f, ensure_ascii=False, indent=4)

    print(f'Final structure with groups and products has been saved to {output_file}')

if __name__ == "__main__":
    # File paths
    new_structure_file = 'transformed_file_new.json'
    transformed_file = 'transformed_file.json'
    file1 = 'file1.json'
    output_file = 'final_structure_with_groups_and_products.json'

    # Distribute products according to the structure in new_structure_file
    final_structure = distribute_products(new_structure_file, transformed_file, file1)

    # Save the final structure to output file
    save_final_structure(final_structure, output_file)
