import os
import re
from collections import defaultdict, Counter
from itertools import combinations
import numpy as np
from concurrent.futures import ProcessPoolExecutor
from sklearn.feature_extraction.text import TfidfVectorizer
from datetime import datetime
def count_chunk(chunk_data):
    """支持计数的全局函数"""
    transactions, candidates = chunk_data
    counts = defaultdict(int)
    for transaction in transactions:
        for candidate in candidates:
            if candidate.issubset(transaction):
                counts[candidate] += 1
    return counts
class OptimizedApriori:
    def __init__(self, min_support=0.02, min_confidence=0.6, max_features=2000,
                 max_length=2, min_word_length=3):
        self.min_support = min_support
        self.min_confidence = min_confidence
        self.max_features = max_features
        self.max_length = max_length
        self.min_word_length = min_word_length
        self.feature_selector = None
        self.vectorizer = None
        self.rules = None

    def preprocess_text(self, text):
        """Optimized text preprocessing"""
        text = text.lower()
        text = re.sub(r'[^\w\s]', ' ', text)  # Remove special characters
        text = re.sub(r'\d+', ' ', text)  # Remove numbers
        text = re.sub(r'\s+', ' ', text).strip()  # Remove extra spaces
        return text

    def load_and_process_data(self, data_dir):
        """Load and preprocess data"""
        documents = []
        labels = []
        label_names = sorted(os.listdir(data_dir))

        print(f"Starting data loading from {data_dir}")
        start_time = datetime.now()

        for label_index, label_name in enumerate(label_names):
            label_dir = os.path.join(data_dir, label_name)
            if os.path.isdir(label_dir):
                for file_name in os.listdir(label_dir):
                    file_path = os.path.join(label_dir, file_name)
                    try:
                        with open(file_path, 'r', encoding='latin1') as file:
                            text = self.preprocess_text(file.read())
                            documents.append(text)
                            labels.append(label_index)
                    except Exception as e:
                        print(f"Error reading {file_path}: {e}")

        print(f"Data loading completed in {datetime.now() - start_time}")
        return documents, labels, label_names

    def extract_features(self, documents, is_training=True):
        """Extract features using TF-IDF"""
        if is_training:
            self.vectorizer = TfidfVectorizer(
                max_features=self.max_features,
                min_df=2,
                max_df=0.95,
                token_pattern=f'\\b\\w{{{self.min_word_length},}}\\b'
            )
            X = self.vectorizer.fit_transform(documents)
            return X
        else:
            return self.vectorizer.transform(documents)

    def generate_transactions(self, X, feature_names):
        """Convert sparse matrix to transactions"""
        transactions = []
        X_dense = X.toarray()

        for row in X_dense:
            present_features = np.where(row > 0)[0]
            transaction = {feature_names[idx] for idx in present_features}
            if transaction:
                transactions.append(transaction)

        return transactions

    def find_frequent_itemsets(self, transactions):
        """Find frequent itemsets"""
        n_transactions = len(transactions)
        min_count = int(self.min_support * n_transactions)

        item_counts = Counter()
        for transaction in transactions:
            item_counts.update(transaction)

        frequent_items = {item: count for item, count in item_counts.items() if count >= min_count}

        if not frequent_items:
            return {}

        frequent_itemsets = {frozenset([item]): count for item, count in frequent_items.items()}
        current_length = 1

        while current_length < self.max_length:
            current_length += 1
            candidates = self._generate_candidates(frequent_itemsets.keys(), current_length)

            if not candidates:
                break

            itemset_counts = self._parallel_count_support(transactions, candidates)

            new_frequent = {itemset: count for itemset, count in itemset_counts.items()
                            if count >= min_count}

            if not new_frequent:
                break

            frequent_itemsets.update(new_frequent)

        return frequent_itemsets

    def _generate_candidates(self, prev_frequent, k):
        """Generate candidate itemsets"""
        candidates = set()
        prev_list = list(prev_frequent)

        for i, itemset1 in enumerate(prev_list):
            for itemset2 in prev_list[i + 1:]:
                union = itemset1.union(itemset2)
                if len(union) == k and all(frozenset(c) in prev_frequent
                                           for c in combinations(union, k - 1)):
                    candidates.add(union)

        return candidates

    def _parallel_count_support(self, transactions, candidates):
        """Parallel support counting with logging and robust chunk handling."""
        n_cores = os.cpu_count() or 1
        chunk_size = max(10, len(transactions) // (n_cores * 2))
        chunks = [transactions[i:i + chunk_size] for i in range(0, len(transactions), chunk_size)]

        print(f"Starting parallel support counting with {len(chunks)} chunks...")
        total_counts = defaultdict(int)

        try:
            with ProcessPoolExecutor(max_workers=min(4, n_cores)) as executor:
                results = executor.map(count_chunk, [(chunk, candidates) for chunk in chunks])

            for counts in results:
                for itemset, count in counts.items():
                    total_counts[itemset] += count

        except Exception as e:
            print(f"Parallel processing failed: {e}")

        print(f"Finished parallel support counting. Found {len(total_counts)} itemsets.")
        return total_counts

    def generate_rules(self, transactions, labels, frequent_itemsets):
        """Generate high-confidence rules"""
        rules = []

        for itemset, support in frequent_itemsets.items():
            if len(itemset) < 2:
                continue

            for i in range(1, len(itemset)):
                for antecedent in combinations(itemset, i):
                    antecedent = frozenset(antecedent)
                    consequent = itemset - antecedent

                    antecedent_support = sum(1 for t in transactions if antecedent.issubset(t))
                    confidence = support / antecedent_support

                    if confidence >= self.min_confidence:
                        matching_labels = [labels[i] for i, t in enumerate(transactions)
                                           if antecedent.issubset(t) and consequent.issubset(t)]
                        if matching_labels:
                            predicted_label = Counter(matching_labels).most_common(1)[0][0]
                            rules.append((antecedent, consequent, confidence, predicted_label))

        return rules

    def fit(self, train_dir):
        """Train the model"""
        documents, labels, label_names = self.load_and_process_data(train_dir)
        X = self.extract_features(documents, is_training=True)
        feature_names = np.array(self.vectorizer.get_feature_names_out())

        transactions = self.generate_transactions(X, feature_names)
        frequent_itemsets = self.find_frequent_itemsets(transactions)

        self.rules = self.generate_rules(transactions, labels, frequent_itemsets)
        self.label_names = label_names

        return self

    def predict(self, test_dir):
        """Make predictions"""
        documents, labels, _ = self.load_and_process_data(test_dir)
        X = self.extract_features(documents, is_training=False)
        feature_names = np.array(self.vectorizer.get_feature_names_out())

        transactions = self.generate_transactions(X, feature_names)

        predictions = []
        for transaction in transactions:
            best_confidence = 0
            prediction = 0

            for antecedent, consequent, confidence, label in self.rules:
                if antecedent.issubset(transaction) and confidence > best_confidence:
                    best_confidence = confidence
                    prediction = label

            predictions.append(prediction)

        return predictions, labels


def main():
    classifier = OptimizedApriori(min_support=0.05, min_confidence=0.8,
                                  max_features=1500, max_length=2, min_word_length=3)

    train_dir = r'D:\Users\wjh\OneDrive\桌面\py-project\20news-bydate-test\20news-bydate-train'
    classifier.fit(train_dir)

    test_dir = r'D:\Users\wjh\OneDrive\桌面\py-project\20news-bydate-test\20news-bydate-test'
    predictions, test_labels = classifier.predict(test_dir)

    print("\n分类结果：")
    for i, (prediction, actual) in enumerate(zip(predictions, test_labels)):
        print(f"样本 {i + 1}: 实际类别 = {classifier.label_names[actual]}, "
              f"预测类别 = {classifier.label_names[prediction]}")

    accuracy = sum(p == t for p, t in zip(predictions, test_labels)) / len(test_labels)
    print(f"\n分类准确率：{accuracy:.2%}")

    print("\n发现的一些关联规则：")
    for i, (antecedent, consequent, confidence, label) in enumerate(classifier.rules[:5]):
        print(f"规则 {i + 1}:")
        print(f"如果文档包含词语: {', '.join(antecedent)}")
        print(f"那么可能也包含: {', '.join(consequent)}")
        print(f"置信度: {confidence:.2%}")
        print(f"预测类别: {classifier.label_names[label]}\n")


if __name__ == "__main__":
    main()
