import re
from collections import Counter

import nltk
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.tokenize import word_tokenize

# # 确保已下载必要的NLTK数据
# try:
#     nltk.data.find('corpora/stopwords')
#     nltk.data.find('tokenizers/punkt')
#     nltk.data.find('averaged_perceptron_tagger')
#     nltk.data.find('corpora/wordnet')
# except LookupError:
#     nltk.download('stopwords')
#     nltk.download('punkt')
#     nltk.download('averaged_perceptron_tagger')
#     nltk.download('wordnet')


class SoftwareEngineeringKeywordExtractor:
    def __init__(self):
        # 基本停用词
        self.stopwords = set(stopwords.words('english'))

        # 软件工程领域常见停用词扩展
        self.software_stopwords = {
            'paper', 'study', 'research', 'work', 'propose', 'proposed',
            'approach', 'method', 'result', 'show', 'based', 'use', 'using',
            'system', 'systems', 'application', 'applications', 'model', 'models',
            'failure', 'root', 'cause', 'error', 'log', 'line', 'case', 'event',
            'failed'
        }

        # 合并停用词
        self.all_stopwords = self.stopwords.union(self.software_stopwords)

        # 初始化词干提取器和词形还原器
        self.stemmer = PorterStemmer()
        self.lemmatizer = WordNetLemmatizer()

        # 软件工程领域重要词汇（可扩展）
        self.software_important_terms = {
            'performance', 'efficiency', 'optimization', 'algorithm', 'architecture',
            'framework', 'implementation', 'testing', 'debugging', 'deployment',
            'scalability', 'reliability', 'security', 'maintenance', 'refactoring',
            'integration', 'interface', 'component', 'module', 'library', 'api',
            'protocol', 'paradigm', 'pattern', 'methodology', 'process', 'agile',
            'devops', 'continuous', 'integration', 'delivery', 'deployment',
            'microservices', 'containerization', 'orchestration', 'virtualization',
            'cloud', 'distributed', 'concurrent', 'parallel', 'thread', 'process',
            'synchronization', 'memory', 'storage', 'network', 'bandwidth', 'latency',
            'ai',
        }

    def preprocess_text(self, text):
        """文本预处理"""
        # 转换为小写
        text = text.lower()

        # 移除标点符号和数字
        text = re.sub(r'[^\w\s]', ' ', text)
        text = re.sub(r'\d+', ' ', text)

        return text

    def lemmatize_tokens(self, tokens, pos_tags=None):
        """词形还原"""
        lemmatized_tokens = []

        for i, token in enumerate(tokens):
            if pos_tags and i < len(pos_tags):
                pos = pos_tags[i][1]
                # 将POS标签转换为WordNet格式
                if pos.startswith('J'):
                    pos_tag_wn = 'a'  # 形容词
                elif pos.startswith('V'):
                    pos_tag_wn = 'v'  # 动词
                elif pos.startswith('N'):
                    pos_tag_wn = 'n'  # 名词
                elif pos.startswith('R'):
                    pos_tag_wn = 'r'  # 副词
                else:
                    pos_tag_wn = 'n'  # 默认为名词

                lemmatized_tokens.append(self.lemmatizer.lemmatize(token, pos_tag_wn))
            else:
                lemmatized_tokens.append(self.lemmatizer.lemmatize(token))

        return lemmatized_tokens

    def stem_tokens(self, tokens):
        """词干提取"""
        return [self.stemmer.stem(token) for token in tokens]

    def extract_keywords(self, text, top_n=5, use_stemming=False):
        """提取关键词"""
        # 预处理文本
        processed_text = self.preprocess_text(text)

        # 分词
        words = word_tokenize(processed_text)

        # 词性标注
        pos_tags = pos_tag(words)

        # 词形还原或词干提取
        if use_stemming:
            normalized_words = self.stem_tokens(words)
        else:
            normalized_words = self.lemmatize_tokens(words, pos_tags)

        # 过滤停用词和短词
        filtered_words = [
            word for word in normalized_words
            if (word not in self.all_stopwords and
                len(word) > 2 and
                not word.isnumeric())
        ]

        # 计算词频
        word_freq = Counter(filtered_words)

        # 为软件工程领域重要术语增加权重
        weighted_freq = {}
        for word, freq in word_freq.items():
            # 如果是软件工程重要术语，增加权重
            if word in self.software_important_terms:
                weighted_freq[word] = freq * 2
            else:
                weighted_freq[word] = freq

        # 按加权频率排序
        sorted_words = sorted(weighted_freq.items(), key=lambda x: x[1], reverse=True)

        # 返回前N个关键词
        return [word for word, freq in sorted_words[:top_n]]

    def extract_keywords_with_pos(self, text, top_n=5, use_stemming=False):
        """基于词性提取关键词（只保留名词和形容词）"""
        # 预处理文本
        processed_text = self.preprocess_text(text)

        # 分词和词性标注
        words = word_tokenize(processed_text)
        pos_tags = pos_tag(words)

        # 词形还原或词干提取
        if use_stemming:
            normalized_words = self.stem_tokens(words)
        else:
            normalized_words = self.lemmatize_tokens(words, pos_tags)

        # 过滤停用词、短词，只保留名词和形容词
        filtered_words = []
        for i, (word, tag) in enumerate(zip(normalized_words, pos_tags)):
            if (word not in self.all_stopwords and
                    len(word) > 2 and
                    not word.isnumeric() and
                    tag[1].startswith(('NN', 'JJ'))):  # 名词或形容词
                filtered_words.append(word)

        # 计算词频
        word_freq = Counter(filtered_words)

        # 为软件工程领域重要术语增加权重
        weighted_freq = {}
        for word, freq in word_freq.items():
            # 如果是软件工程重要术语，增加权重
            if word in self.software_important_terms:
                weighted_freq[word] = freq * 2
            else:
                weighted_freq[word] = freq

        # 按加权频率排序
        sorted_words = sorted(weighted_freq.items(), key=lambda x: x[1], reverse=True)

        # 返回前N个关键词
        return [word for word, freq in sorted_words[:top_n]]
