import json
import requests
import pandas as pd
import os
import re
import regex
from loguru import logger
from collections import defaultdict

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from FashionShowComment.libs.timer import time_count


class SocialContentEmotion(object):

    def __init__(self):

        self.raw_dir = BASE_DIR + "/data/"  # 源文件夹路径
        self.res_dir = BASE_DIR + "/res/"  # 结果文件夹路径
        self.words_dir = BASE_DIR + "/libs/"  # 情感词语文件夹路径

    def load_words(self):
        """
        加载情感词语
        """
        pos_words_file = "positive_word"
        neu_words_file = "neutral_word_content"
        neg_words_file = "negative_word"

        self.pos_words = self.word_list(pos_words_file)
        self.neu_words = self.word_list(neu_words_file)
        self.neg_words = self.word_list(neg_words_file)

    def word_list(self, file_name):

        word_list = []
        with open(self.words_dir + file_name + ".txt", encoding="utf-8") as f:
            for line in f.readlines():
                word_list.append(line.strip())

        return word_list

    def get_file_name(self, file_dir):
        """获取当前路径下所有非目录子文件"""
        names = []
        for root, dirs, files in os.walk(file_dir):
            # print(root) #当前目录路径
            # print(dirs) #当前路径下所有子目录
            # print(files) #当前路径下所有非目录子文件
            names.append(files)
        return names

    @time_count
    def load_data(self, file_name, file_title, file_content):
        """
        加载数据逻辑
            file_name: 文件名
            match_column_name：需要匹配的列名，比如：text, title, content
        """
        self.data = pd.read_excel(self.raw_dir + file_name + ".xlsx")
        print("数据量：%s" % len(self.data))

        # 新增一列为title和content的拼接，根据这列进行情感判断
        self.data["new_content"] = self.data[file_title].fillna("").map(str) + self.data[file_content].fillna("").map(
            str)

    def emotion_judge_zh_ks_bili_dy(self, text, url):
        """ 情感判定逻辑 """
        # url = "http://192.168.3.100:8888/sentiment/social"
        # url = "http://192.168.3.108:8888/sentiment/social"

        text = str(text)

        if (re.search("[\u4e00-\u9fa5A-Za-z]+", text)) == None and ("yyds" not in text):
            return 0, "无文字内容"
        else:
            # 删除非中文或英文字符
            texts = re.findall("[\u4e00-\u9fa5A-Za-z]+", text)
            text = ""
            for t in texts:
                text = text + "," + t

            if len(text) <= 100:

                for w in self.pos_words:
                    if regex.search(w, text) != None:
                        word = regex.search(w, text).group()
                        return 1,word

                for w in self.neg_words:
                    if regex.search(w, text) != None:
                        word = regex.search(w, text).group()
                        return -1, word

                for w in self.neu_words:
                    if regex.search(w, text) != None:
                        word = regex.search(w, text).group()
                        return 0, word

                data = {"text": text}
                data_json = json.dumps(data)
                res = requests.post(url, data=data_json)
                feedback = json.loads(res.text)
                emotion = feedback["details"]["emotion"]
                return emotion, "模型"

            elif 100 < len(text) <= 500:
                data = {"text": text}
                data_json = json.dumps(data)
                res = requests.post(url, data=data_json)
                feedback = json.loads(res.text)
                emotion = feedback["details"]["emotion"]
                return emotion, "模型"

            else:
                return 0, "字数大于500"

    def emotion_judge_red_wb(self, text, url):
        """ 情感判定逻辑 """
        # url = "http://192.168.3.100:8888/sentiment/social"
        # url = "http://192.168.3.108:8888/sentiment/social"

        text = str(text)

        if (re.search("[\u4e00-\u9fa5A-Za-z]+", text)) == None and ("yyds" not in text):
            return 0, "无文字内容"
        else:
            # 删除非中文或英文字符
            texts = re.findall("[\u4e00-\u9fa5A-Za-z]+", text)
            text = ""
            for t in texts:
                text = text + "," + t
            # 长文本--添加文本长度的判定逻辑

            if len(text) <= 100:

                """text里不包含中文的判为0"""

                for w in self.pos_words:
                    if regex.search(w, text) != None:
                        word = regex.search(w, text).group()
                        return 1,word
                for w in self.neg_words:
                    if regex.search(w, text) != None:
                        word = regex.search(w, text).group()
                        return -1, word
                for w in self.neu_words:
                    if regex.search(w, text) != None:
                        word = regex.search(w, text).group()
                        return 0, word

                data = {"text": text}
                data_json = json.dumps(data)
                res = requests.post(url, data=data_json)
                feedback = json.loads(res.text)
                emotion = feedback["details"]["emotion"]
                return emotion, "模型"

            elif 100 < len(text) <= 500:
                data = {"text": text}
                data_json = json.dumps(data)
                res = requests.post(url, data=data_json)
                feedback = json.loads(res.text)
                emotion = feedback["details"]["emotion"]
                return emotion, "模型"

            else:
                return 1, "字数大于500"

    @time_count
    def output_data(self, date, file_name, data_res):
        """ 导出文件结果 """
        output_path = self.res_dir + "/" + file_name + "_res_" + date + ".xlsx"  # 输出结果文件

        # 将url, int 等类型转换成string输出
        # self.data[["作品ID", "用户ID", "抖音ID "]] = self.data[["作品ID", "用户ID", "抖音ID"]].astype(str)

        with pd.ExcelWriter(output_path, engine='xlsxwriter', engine_kwargs={'options': {'strings_to_urls': False}}) \
                as writer:
            data_res.to_excel(writer, sheet_name='sheet1', index=False)

    @time_count
    def run_emotion(self, platform_name, url):

        df = pd.DataFrame()

        self.data_red = self.data.loc[self.data[platform_name].str.contains("小红书|微博")]
        self.data_red = self.data_red.copy()
        # a = self.data["new_content"].apply(self.emotion_judge_red_wb)
        self.data_red["emotion"] = self.data_red["new_content"].apply(lambda x: self.emotion_judge_red_wb(x, url))
        # self.data_red["words"] = self.data["new_content"].apply(self.emotion_judge_red_wb)[1]
        df = df.append(self.data_red)

        self.data_bili = self.data.loc[self.data[platform_name].str.contains("B站|快手|知乎|抖音")]
        self.data_bili = self.data_bili.copy()
        self.data_bili["emotion"] = self.data_bili["new_content"].apply(lambda x: self.emotion_judge_zh_ks_bili_dy(x, url))
        # self.data_bili["words"] = self.data["new_content"].apply(self.emotion_judge_zh_ks_bili_dy)
        df = df.append(self.data_bili)

        self.data_wx = self.data.loc[self.data[platform_name].str.contains("微信")]
        self.data_wx["emotion"] = '0'
        # self.data_wx["words"] = 'wx'
        df = df.append(self.data_wx)
        for i in range(len(df)):
            df.loc[i, "emotions"] = df.loc[i, "emotion"][0]
            df.loc[i, "match_word"] = df.loc[i, "emotion"][1]

        del df["emotion"]

        return df

    def run(self, file_name, platform_name, file_title, file_content, date, url):

        """ 主程序 """

        self.load_data(file_name, file_title, file_content)
        self.load_words()
        data_res = self.run_emotion(platform_name, url)
        self.output_data(date, file_name, data_res)


if __name__ == '__main__':
    # 日志文件
    logger.add("../log/file_{time}.log")
    logger.info("program running --- ")
