import multiprocessing
import threading
import pickle
import jieba
import numpy as np
import pandas as pd
from jieba import Tokenizer


class SentimentCalculator:
    _instance = None
    _lock = threading.Lock()

    def __new__(cls, *args, **kwargs):
        if cls._instance is None:
            with cls._lock:
                if cls._instance is None:
                    cls._instance = super(SentimentCalculator, cls).__new__(cls, *args, **kwargs)
                    cls._instance.initialized = False
        return cls._instance

    def __init__(self):
        if not self.initialized:
            self.initialized = True
            # 构建分词器
            self.tokenizer = Tokenizer()
            # 初始化jieba分词器，例如加载字典等
            jieba.load_userdict("./data/userdict.txt")
            # 加载停用词表
            self.load_stopwords("./data/stopwords.txt")
            # 加载否定词列表
            self.load_not_words("./data/情感极性词典/否定词.txt")
            # 加载程度副词列表
            self.load_degree_words("./data/情感极性词典/程度副词.txt")
            # 加载情感词典
            self.load_sentiment_dict("./data/情感极性词典/BosonNLP_sentiment_score.txt")

    def load_model(self, model_path):
        with open(model_path, 'rb') as f:
            self.model = pickle.load(f)

    def load_stopwords(self, file_path):
        with open(file_path, 'r', encoding='utf-8') as fr:
            self.stopwords = set(line.strip() for line in fr)

    def load_not_words(self, file_path):
        with open(file_path, 'r', encoding='utf-8') as fr:
            self.not_words = [line.strip() for line in fr]

    def load_degree_words(self, file_path):
        with open(file_path, 'r', encoding='utf-8') as fr:
            self.degree_words = {item.split(',')[0]: item.split(',')[1] for item in fr}

    def load_sentiment_dict(self, file_path):
        with open(file_path, 'r', encoding='utf-8') as fr:
            self.sentiment_dict = {line.split(' ')[0]: line.split(' ')[1] for line in fr if len(line.split(' ')) == 2}

    def seg_word(self, sentence):
        seg_list = self.tokenizer.cut(sentence)
        seg_result = []
        for i in seg_list:
            seg_result.append(i)
        return list(filter(lambda x: x not in self.stopwords, seg_result))

    def classify_words(self, word_list):
        sen_word = dict()
        not_word = dict()
        degree_word = dict()
        for i in range(len(word_list)):
            word = word_list[i]
            if word in self.sentiment_dict.keys() and word not in self.not_words and word not in self.degree_words.keys():
                sen_word[i] = self.sentiment_dict[word]
            elif word in self.not_words and word not in self.degree_words.keys():
                not_word[i] = -1
            elif word in self.degree_words.keys():
                degree_word[i] = self.degree_words[word]
        return sen_word, not_word, degree_word

    def score_sentiment(self, sen_word, not_word, degree_word, seg_result):
        W = 1
        score = 0
        sentiment_index = -1
        sentiment_index_list = list(sen_word.keys())
        for i in range(0, len(seg_result)):
            if i in sen_word.keys():
                score += W * float(sen_word[i])
                sentiment_index += 1
                if sentiment_index < len(sentiment_index_list) - 1:
                    for j in range(sentiment_index_list[sentiment_index], sentiment_index_list[sentiment_index + 1]):
                        if j in not_word.keys():
                            W *= -1
                        elif j in degree_word.keys():
                            W *= float(degree_word[j])
                        # 定位到下一个情感词
            if sentiment_index < len(sentiment_index_list) - 1:
                i = sentiment_index_list[sentiment_index + 1]
        return score

    def sentiment_score(self, sentence):
        # 1.对文档分词
        seg_list = self.seg_word(sentence)
        # 2.将分词结果转换成字典，找出情感词、否定词和程度副词
        sen_word, not_word, degree_word = self.classify_words(seg_list)
        # 3.计算得分
        score = self.score_sentiment(sen_word, not_word, degree_word, seg_list)
        return score


def apply_parallel_func(df, func, analyzer_func, num_processes=multiprocessing.cpu_count()):
    df_split = np.array_split(df, num_processes)  # 将 DataFrame 拆分为多个部分
    pool = multiprocessing.Pool(processes=num_processes)  # 创建进程池
    results = pool.map(lambda x: func(x, analyzer_func), df_split)  # 并行地应用函数到每个部分
    pool.close()
    pool.join()

    return pd.concat(results)  # 合并处理后的结果
