import copy
import os
import random
import re
import threading
import time
import traceback
from queue import Queue
from typing import List, Dict, Union

import gradio
import redis
import requests
from fuzzywuzzy import fuzz
from lxml.etree import HTML
from regex import regex
from requests import adapters
from tenacity import retry, wait_random, stop_after_attempt, stop_after_delay

try:
    from conf.config import logger, config, BASE_DIR, REDIS_CONFIG
    from utils.constants import BAIDU_SEARCH_PARAMS
except ModuleNotFoundError:
    import os
    import sys
    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))  # 离开IDE也能正常导入自己定义的包
    from conf.config import logger, config, BASE_DIR, REDIS_CONFIG
    from utils.constants import BAIDU_SEARCH_PARAMS


class ProxyPool(object):
    """
    代理ip池对象
    """
    pool = redis.ConnectionPool(
        host=REDIS_CONFIG["host"], port=REDIS_CONFIG["port"],
        db=REDIS_CONFIG["db"], password=REDIS_CONFIG["password"],
        decode_responses=True
    )

    def get_proxy_list(self) -> List[str]:
        """
        获取代理ip列表
        :return:
        """

        redis_conn = redis.Redis(connection_pool=self.pool)
        try:
            proxy_pool_name = config["proxy_pool_name"]
            proxy_list = redis_conn.lrange(proxy_pool_name, 0, -1)
        except redis.RedisError:
            error_str = traceback.format_exc()
            logger.error(error_str)
            return []
        finally:
            if "redis_conn" in dir():
                redis_conn.close()

        return proxy_list


class Article(object):
    """
    文章对象
    """

    def __init__(self, content: str, *args, **kwargs):
        """
        初始化
        :param content: 文章正文
        """
        self.content = content
        super(Article, self).__init__(*args, **kwargs)

    @property
    def words(self):
        return self.count_words(self.content, excluded_char_ist=[])

    @staticmethod
    def count_words(string: str, excluded_char_ist: List) -> int:
        """
        计算字数：一个汉字计数1，一个单词计数1，一个标点符号计数1，一个数字计数1
        :param string: 待统计字符的字符串
        :param excluded_char_ist: 排除的字符列表
        :return: 单词数
        """

        # 删去无效的字符
        excluded_char_ist = copy.deepcopy(excluded_char_ist)
        for char in excluded_char_ist:
            string = string.replace(char, "")

        # 匹配中文字符、英文单词、数字和标点符号
        pattern = regex.compile(r'[\u4e00-\u9fa5]|[a-zA-Z]+|-?\d*\.?\d+%?|\p{P}')

        count = len(regex.findall(pattern, string))
        return count

    @staticmethod
    def split_text(text: str,
                   separators: List[str] = config["sentence_separators"], ) -> List[str]:
        """
        分割文本
        :param text: 待分割的文本
        :param separators: 分割符集合
        :return:
        """
        pattern = '|'.join(separators)
        sentences = re.split(pattern=pattern, string=text)
        return sentences

    def split_content(self) -> List[str]:
        """
        分割正文
        :return:
        """
        sentences = self.split_text(text=self.content)
        return sentences

    def split_content_by_chunk_size(self, chunk_size: int) -> List[str]:
        """
        分割正文，根据文本块大小
        :param chunk_size: 文本块大小
        :return:
        """
        text = self.content.replace('\n', '').replace(' ', '')
        sentences = [text[i: i + chunk_size] for i in range(0, len(text), chunk_size)]
        if len(sentences[-1]) < chunk_size:
            sentences = sentences[:-1]
        return sentences

    @staticmethod
    def get_evenly_spaced_list(ls: List, k: int):
        """
        获取等距列表，从长度为n的列表中，间隔均匀地取k个元素
        :param ls: 待抽取元素的列表
        :param k: 取几个元素
        :return:
        """
        n = len(ls)
        if k > n:
            logger.warning(f"选取个数应该小于等于列表长度")
            k = n

        interval = (n - 1) / (k - 1)  # 间隔
        result = []
        for i in range(k):
            index = int(i * interval)  # int向下取整
            result.append(ls[index])
        return result

    def choose_sentences(self, sentences: List[str], min_len: int = 10, max_len: int = 20,
                         is_random: bool = False, k: int = 10) -> List[str]:
        """
        选择句子
        :param sentences: 待选取的句子
        :param min_len: 分割后文本最小长度限制
        :param max_len: 分割后文本最大长度限制
        :param is_random: 是否随机取值
        :param k: 选取k个
        :return:
        """
        sentences_new = list()
        for sentence in sentences:
            if (self.count_words(sentence, []) >= min_len) and (self.count_words(sentence, []) <= max_len):
                sentences_new.append(sentence)

        if len(sentences_new) == 0:
            raise gradio.Error(f"无法完成检测，原因是满足检测要求的句子数为0")

        if k > len(sentences_new):
            logger.warning(f"选取个数应该小于等于列表长度")
            k = len(sentences_new)

        if is_random:
            # 随机取
            sentences_final = random.sample(sentences_new, k=k)
        else:
            # TODO 2023-5-26 待优化
            # 均匀取
            sentences_final = self.get_evenly_spaced_list(sentences_new, k)

        return sentences_final

    def choose_sentences_by_chunk_size(self, sentences: List[str], chunk_size: int = 10,
                                       is_random: bool = False, k: int = 10) -> List[str]:
        """
        选择句子，根据文本块大小
        :param sentences: 待选取的句子
        :param chunk_size: 文本块大小
        :param is_random: 是否随机取值
        :param k: 选取k个
        :return:
        """
        sentences_new = list()
        for sentence in sentences:
            if len(sentence) == chunk_size:
                sentences_new.append(sentence)

        if len(sentences_new) == 0:
            raise gradio.Error(f"无法完成检测，原因是满足检测要求的句子数为0")

        if k > len(sentences_new):
            logger.warning(f"选取个数应该小于等于列表长度")
            k = len(sentences_new)

        if is_random:
            # 随机取
            sentences_final = random.sample(sentences_new, k=k)
        else:
            # TODO 2023-5-26 待优化
            # 均匀取
            sentences_final = self.get_evenly_spaced_list(sentences_new, k)

        return sentences_final


class SearchEngine(object):
    """
    搜索引擎
    """

    def get_source_page(self, url: str) -> str:
        """
        获取html源页面
        :param url: 待访问的网页链接
        :return: html源页面
        """
        # 定义请求头
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:104.0) Gecko/20100101 Firefox/104.0',
        }

        # 发起请求
        resp = requests.get(url, headers=headers)
        resp.encoding = resp.apparent_encoding  # 防止出现乱码，不加这句取页面上的title时可能乱码

        if resp.status_code != 200:
            raise ValueError(f"页面请求失败，http状态值不为200，为{resp.status_code}")

        # 获取页面源
        html = resp.text

        return html

    def get_texts_within_tag(self, html: str, tag_name: str) -> List[str]:
        """
        获取特定html标签的文本
        :param html: html源页面
        :param tag_name: html标签名，比如 "p"、"strong"、"em"、"span"
        :return:
        """

        # 解析HTML文档
        tree = HTML(html)

        # 获取所有em节点的文本
        em_elements = tree.xpath(f"//{tag_name}")
        em_texts = [em_element.text for em_element in em_elements]

        return em_texts


class BaiduSearchEngine(SearchEngine):
    """
    百度搜索引擎
    """

    @retry(wait=wait_random(min=config["baidu"]["retry"]["min_wait"], max=config["baidu"]["retry"]["max_wait"]),
           stop=(stop_after_attempt(config["baidu"]["retry"]["max_attempt_number"])
                 | stop_after_delay(config["baidu"]["retry"]["stop_after_delay"])))
    def get_source_page(self, url: str) -> str:
        """
        获取html源页面
        :param url: 待访问的网页链接
        :return: html源页面
        """
        # 定义请求头
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:104.0) Gecko/20100101 Firefox/104.0',
            'accept': 'application/json, text/javascript, */*; q=0.01',
        }

        # 添加请求参数
        params = copy.deepcopy(BAIDU_SEARCH_PARAMS)

        # 添加代理ip
        proxy_pool = ProxyPool()
        proxy_list = proxy_pool.get_proxy_list()
        proxy = random.choice(proxy_list)
        proxies = {"http": f"http://{proxy}", "https": f"https://{proxy}"}

        # requests模块的重试次数赋值为1
        requests.adapters.DEFAULT_RETRIES = 1

        # 发起请求
        resp = requests.get(url, params=params, headers=headers, proxies=proxies, timeout=5)
        resp.encoding = resp.apparent_encoding  # 防止出现乱码，不加这句取页面上的title时可能乱码

        if resp.status_code == 407:  # 此时页面title为"错误：代理服务器拒绝访问"
            raise gradio.Error(f"{proxy}代理失效")
        if resp.status_code != 200:
            raise gradio.Error(f"页面请求失败，http状态值不为200，为{resp.status_code}")

        # 获取页面源
        html = resp.text

        # 判断是否出现百度反爬
        tree = HTML(html)  # 解析HTML文档
        title = tree.xpath("//title/text()")[0]
        if title == "百度安全验证":
            raise gradio.Error("出现百度安全验证，可能遇到了反爬")

        # 保存源网页到本地（注意硬盘空间容量）
        if config["baidu"]["save_webpage"]:
            timestamp = int(time.time() * 1000)  # 毫秒级时间戳
            with open(os.path.join(BASE_DIR, f"output/{timestamp}.html"), 'w', encoding='utf-8') as f:
                f.write(html)

        return html

    def get_similar_sentences(self, keyword: str, total_page: int, queue=None) -> List[str]:
        """
        查找百度，获取特定关键词的相似句子
        :param keyword: 要检索的关键词
        :param total_page: 要检索的总页数
        :param queue: 队列
        :return:
            例如：
            ["全国多地出现小区业主自发", "对于市场的实际作用很弱",
             "需要慎重考虑市场环境和政策导向", "无论何种方式，市场的规律都无法打破",
             "佛山南海万达华府的业主发出《倡议书》", "通过话术让其他业主对房产丧失信心"]
        """

        url = f"http://www.baidu.com/s?wd={keyword}&rn={10 * total_page}&pn=0"

        html = self.get_source_page(url)
        similar_sentences = self.get_texts_within_tag(html, tag_name="em")

        if queue:
            queue.put({"original_sentence": keyword, "similar_sentences": similar_sentences})

        return similar_sentences

    def get_similar_sentences_by_multi_thread(self, sentences: List[str], total_page: int = 5) -> List:
        """
        通过多线程，获取相似句子
        :param sentences: 待检索的句子
        :param total_page: 要检索的总页数
        :return:
        """

        # 多线程方式
        queue = Queue()  # 主线程与其他线程的通讯需要
        threads = []
        ls = list()
        for sentence in sentences:
            t = threading.Thread(target=self.get_similar_sentences, args=(sentence, total_page, queue))
            threads.append(t)
            time.sleep(random.uniform(0.1, 0.2))
            t.start()

        for t in threads:
            t.join()

        while not queue.empty():
            ls.append(queue.get())

        if len(ls) == 0:
            raise gradio.Error(f"无法完成检测，原因是飘红的句子数为0")

        return ls


def get_originality_info(text: str, min_len: int = 10, max_len: int = 20, chunk_size: int = None,
                         k: int = 10, total_page: int = 5,
                         similarity_limit: int = 90) -> Dict[str, Union[float, List]]:
    """
    获取原创度信息
    :param text: 带检测的文本
    :param k: 选取k个句子做检测
    :param total_page: 要检索的百度总页数
    :param chunk_size: 文本块大小，即切割句子时，按多大字符串长度进行切割
    :param min_len: 做检测的句子的最小长度限制
    :param max_len: 做检测的句子的最大长度限制
    :param similarity_limit: 相似度限值，大于该值认为完全匹配
    :return:
    """
    # 简单的参数检查
    if not isinstance(k, int):
        k = int(k)
    if not text:
        raise gradio.Error("正文不能为空")
    if max_len <= min_len:
        raise gradio.Error("句子的最大长度限制，不应该小于最小长度限制")

    # 1.分割正文
    article = Article(content=text)
    if chunk_size:
        sentences = article.split_content_by_chunk_size(chunk_size=chunk_size)
    else:
        sentences = article.split_content()
    logger.debug(f"{sentences}")
    # 2.选取句子
    if chunk_size:
        sentences = article.choose_sentences_by_chunk_size(sentences, chunk_size=chunk_size, k=k)
    else:
        sentences = article.choose_sentences(sentences, min_len=min_len, max_len=max_len, k=k)
    logger.debug(f"{sentences}")

    # 3.获取相似句子
    baidu_search_engine = BaiduSearchEngine()
    similarity_ls = baidu_search_engine.get_similar_sentences_by_multi_thread(sentences, total_page=total_page)
    logger.debug(f"{similarity_ls}")

    # 4.计算相似度分数：相似的句子数 ÷ 总句子数
    for item in similarity_ls:
        original_sentence = item["original_sentence"]
        similar_sentences = item["similar_sentences"]

        total_count = len(similar_sentences)
        similar_count = 0
        for sentence in similar_sentences:
            score = fuzz.ratio(original_sentence, sentence)
            if score > similarity_limit:
                similar_count += 1
        try:
            similarity_score = round((similar_count / total_count * 100), 2)
        except ZeroDivisionError:
            logger.warning(f"百度搜索结果没有相似句子，相似度赋值为0")
            similarity_score = 0
        item.update({"similarity_score": similarity_score})
    logger.info(f"{similarity_ls}")

    # 5.计算原创度分数：100 - 相似度分数的平均值
    similarity_score_ls = list()
    similarity_total_score = 0
    for item in similarity_ls:
        similarity_score_ls.append([item["original_sentence"], item["similarity_score"]])
        similarity_total_score += item["similarity_score"]
    similarity_average_score = similarity_total_score / len(similarity_ls)
    originality_score = round((100 - similarity_average_score), 2)
    logger.info(f"{originality_score}")

    originality_info = {
        "originality_score": originality_score,
        "similarity_score_ls": similarity_score_ls,
        "similarity_details_ls": similarity_ls
    }

    return originality_info


def main():
    text = """
        深圳房价近一年来的趋势如何？
        深圳一直以来都是房价高企的城市之一。但是，从过去一年来的房价数据来看，深圳房价似乎出现了一些变化。
        2022年5月，深圳房价为每平方米56408元。接下来几个月，房价持续上涨，一直到2022年12月，达到了每平方米62209元的峰值。但是，自2023年1月起，深圳房价开始下跌，到4月底已经降至每平方米59659元。
        总体来看，深圳房价在过去一年中经历了上涨和下跌两个阶段。尽管整体的房价依然高昂，但是这种小幅度的波动也可能意味着深圳房价的增长已经达到了一个瓶颈，市场正在逐渐趋于平稳。
        对于购房者，现在是入手深圳房产的好时机吗？
        对于购房者而言，现在是入手深圳房产的好时机吗？这需要我们从多个方面进行分析。从房价的走势来看，目前的深圳房价相对于去年同期已经有了一定的下跌。这对于一些有购房意愿但是一直被高昂房价所压制的人来说，无疑是一个好消息。
        从政策层面来看，国家鼓励房地产市场稳健健康发展，各地也出台了相应的房地产调控政策，遏制了房价过快上涨的趋势。如果这种政策能够继续得到执行，那么房价的稳定性将更有保障。
        然而，我们也需要意识到，深圳的房价依然属于较高水平，尤其是对于一些刚需购房者而言。此外，由于深圳是一个国际化城市，人口流动大，供需关系也相对比较复杂，一些热门区域的房价可能仍然会持续上涨，这需要购房者谨慎考虑。
        总体来看，对于购房者而言，现在虽然有一定的购买机会，但是也需要根据自身的经济实力和需求来进行仔细的考虑，避免因为盲目跟风而导致经济压力过大。
    """

    res = get_originality_info(text, chunk_size=40)
    logger.info(f"{res}")


if __name__ == '__main__':
    main()
