import json
import random
import re
import traceback

import requests
from lxml import html
from lxml.html import HtmlElement

from setting import HEADERS, PROXIES, ANWSER_PATH, LOCK, PLATFORM_LIS
from .cookie_utils import getCookie
from .exceptions import RiskControlError, NotLoginError
from .platform_utils import fetch_platform_data, extract_title_img_name_url,concat_url
from .request_utils import ownRequest
from .time_utils import getCurTime
from .url_utils import get_redirect_url, extract_or_return_url, parse_url_get_info, get_platform


def fillter_text(content: str, filter_set):
    ################################################ 1. 尝试匹配商品链接  ################################################
    # 需要匹配的文本示例：
    # 【京东】https://3.cn/2ihfHx-C 「滴露健康沐浴露薄荷950g」
    # 3.cn/2j-gx91F        (省略协议的URL)
    # 310.78-10fl=300.78   (不应该匹配的数字表达式)
    # 正确的正则表达式应该匹配前两个，不匹配第三个

    link_pattern = r'((?:https?://)?[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}(?:/[^\s]*)?)'

    urls = re.findall(link_pattern, content)

    temp_list1 = []
    temp_list2 = []

    for url in urls:
        if "http" not in url:
            url = "https://" + url
        temp_url = get_redirect_url(extract_or_return_url(url).strip())

        # 是否文章链接
        if "group/topic" in temp_url:
            temp_list1.insert(0, {"type": "extra_article", "text": get_article_data(temp_url, filter_set)})
        # 非文章链接
        else:
            # 先获取商品ID 再调用商品详情接口获取完整信息
            productId, platform_name = parse_url_get_info(temp_url)

            product_info = fetch_platform_data(productId, platform_name)

            # 占位
            content = content.replace(url, "endend占位符1endend")

            extra = extract_title_img_name_url(product_info, platform_name) if product_info else {}

            if productId:
                temp_list1.insert(0, {
                    "type": "link",
                    "text": extra.get("productUrl", temp_url),
                    "title": extra.get("title", None),
                    "productImage": extra.get("productImage", None),
                    "productName": extra.get("productName", None),
                    "product_id": productId,
                    "platform_name": platform_name,
                    "link_text": url})
            else:
                temp_list1.insert(0,
                                {"type": "link2", "text": url, "platform_name": get_platform(url), "link_text": url})

    ################################################ 2. 尝试匹配货号  ################################################
    sku_pattern = r'(\d{7,20})'
    sku_matchs = re.findall(sku_pattern, content)

    for sku in sku_matchs:
        for platform in PLATFORM_LIS:
            product_info = fetch_platform_data(sku, platform)  # 一般都是京东

            # if not product_info:
            #     continue

            content = content.replace(sku, "endend占位符2endend")

            extra = extract_title_img_name_url(product_info, platform) if product_info else {}

            temp_list2.insert(0,
                            {
                                "type": "link",
                                "text": extra.get("productUrl", concat_url(sku,platform)),
                                "title": extra.get("title", None),
                                "productImage": extra.get("productImage",None),
                                "productName": extra.get("productName",None),
                                "product_id": sku,
                                "platform_name": platform,
                                "link_text": "网页链接"}
                            )
            break
    result = []

    content_list = content.split("endend")

    for i in range(len(content_list)):
        if content_list[i] == "":
            continue
        if content_list[i] == "占位符1":
            result.append(temp_list1.pop())
        elif content_list[i] == "占位符2":
            result.append(temp_list2.pop())
        else:
            result.append(
                {
                    "type": "text",
                    "text": content_list[i]
                }
            )
    return result


def processQuestionId(questionId, tp="question", filter_set=None):
    """处理豆瓣问答和投票问题
    Args:
        questionId: 问题ID
        tp: 问题类型，可选 "question" 或 "poll"
    Returns:
        tuple: (问题标题, 正确答案)
    Raises:
        NotLoginError: Cookie失效时抛出
    """

    # 读取cookie
    cookies = getCookie()

    with open(ANWSER_PATH, mode="r", encoding="utf-8") as f:
        answer = random.choice(f.read().split('\n'))

    # 提取ck值
    ck_value = re.search(r'ck=([a-zA-Z0-9]+)', cookies).group(1) if re.search(r'ck=([a-zA-Z0-9]+)', cookies) else None

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
        "host": "m.douban.com",
        "Cookie": cookies,
    }

    LOCK.acquire()
    try:
        # 先检查是否已回答过
        if tp == "question":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/question/{questionId}?ck={ck_value}"
            res = ownRequest(url, method="get", headers=headers)
            data = res.json()
            if data.get("correct_answer"):
                return data.get("title"), fillter_text(data.get("correct_answer"), filter_set)
        elif tp == "poll":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/{questionId}?ck={ck_value}"
            res = ownRequest(url, method="get", headers=headers)
            data = res.json()

            if data.get("options"):
                vote_id = data.get("options")[0].get("id")
                options = [
                    {"option_contents": fillter_text(opt.get('title'), filter_set), "is_correct": opt.get("is_correct")}
                    for opt in data.get("options")]
                if options:
                    return data.get("title"), options

        # 未回答过，需要提交答案回答
        # time.sleep(2)
        if tp == "question":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/question/{questionId}/answer"
            payload = {
                "answer": answer,
                "ck": ck_value
            }
            res = ownRequest(url, "post", headers=headers, data=payload)
            try:
                data = res.json()
                result = data.get("title"), fillter_text(data.get("correct_answer"), filter_set)
            except:
                raise NotLoginError(f"Cookie可能失效，提交答案的响应内容：{res.json()}")
        elif tp == "poll":

            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/{questionId}/vote"
            data = f'option_ids={vote_id}&ck={ck_value}'
            res = ownRequest(url, "post", headers=headers, data=data)
            try:
                data = res.json()
                # fillter_text(opt.get('title')), opt.get("is_correct")
                result = data.get("title"), [
                    {"option_contents": fillter_text(opt.get('title'), filter_set), "is_correct": opt.get("is_correct")}
                    for opt in data.get("options")]
            except:
                raise NotLoginError(f"Cookie可能失效，提交答案的响应内容：{res.json()}")
        # TODO
        # time.sleep(random.randint(2, 5))
        return result
    finally:
        LOCK.release()


def parseQuoteItem(item, filter_set):
    data = {}

    data["comments"] = []
    # 没有被引用者的头像和昵称
    data["userName"] = None
    data["avatar"] = None

    reply_quote_content = item.xpath("./div[@class='reply-quote-content']")[0]

    # 被引用者的ID
    data["refUserId"] = reply_quote_content.xpath("./@data-author-id")[0].strip()

    # 被引用者的回复ID
    data["refReplyId"] = reply_quote_content.xpath("./@data-ref-cid")[0].strip()

    """
    bg-img-green
    reply-quote 
    comment-photos
    reply-content
    """

    # 可以使用 fillter_text处理  但会更麻烦
    for child in reply_quote_content:
        # 引用的图片
        if ('class' in child.attrib and 'quote-img' in child.attrib['class']):
            temp_imgs = child.xpath(".//div[@class='cmt-img']/img/@src")

            # 有些评论虽然有这个标签 但实际并无图片链接
            if not temp_imgs: continue
            data["comments"].append({
                "type": "image",
                "text": temp_imgs[0]  # TODO 图片处理
            })

        # 引用的文本内容
        elif ('class' in child.attrib and 'all ref-content' in child.attrib['class']):
            for i in extractDataFromContents(list(map(str.strip, child.xpath("./text()"))), filter_set):
                data["comments"].append(i)
        # 引用中@的用户
        elif ('class' in child.attrib and 'pubdate' in child.attrib['class']):
            data["comments"].append({
                "type": "@",
                "link": child.xpath("./a/@href")[0],
                "text": child.xpath("./a/text()")[0],
                "userId": child.xpath("./a/@href")[0].strip().split("/")[-2]
            })
    return data


def pareseReplyItem(item, filter_set):
    data = {}

    user_face_item = item.xpath("./div[@class='user-face']")[0]

    reply_doc_item = item.xpath("./div[@class='reply-doc content']")[0]

    data["userName"] = user_face_item.xpath("./a/img/@alt")[0]
    data["avatar"] = user_face_item.xpath("./a/img/@src")[0]  # TODO 图片处理
    data["userId"] = user_face_item.xpath("./a/@href")[0].strip().split("/")[-2]
    data["replyId"] = item.xpath("./@id")[0].strip()

    data["comments"] = []
    """
    bg-img-green
    reply-quote 
    comment-photos
    reply-content
    """
    # 可以使用 fillter_text处理  但会更麻烦
    for child in reply_doc_item:
        # 引用
        if ('class' in child.attrib and 'reply-quote' in child.attrib['class']):
            data['comments'].append({
                "type": "quote",
                "text": parseQuoteItem(child, filter_set)
            })
        # 图片
        elif ('class' in child.attrib and 'comment-photos' in child.attrib['class']):
            temp_imgs = child.xpath("./div/div/img/@src")

            # 有些评论虽然有这个标签 但实际并无图片链接
            if not temp_imgs: continue

            data['comments'].append({
                "type": "image",
                "text": temp_imgs[0]  # TODO 图片处理
            })

        # 回复内容
        elif ('class' in child.attrib and 'reply-content' in child.attrib['class']):
            for i in extractDataFromContents(
                    list(map(str.strip, child.xpath("./div/text() | ./div/a/@href | ./div/p/text()"))), filter_set):
                data["comments"].append(i)

        # 提取时间和IP
        if 'class' in child.attrib and 'bg-img-green' in child.attrib['class']:
            # 2025-06-02 11:19:34 上海

            # 时间
            data["publicTime"] = " ".join(child.xpath(".//span[@class='pubtime']/text()")[0].strip().split(" ")[:2])

            # 省份
            data["province"] = child.xpath(".//span[@class='pubtime']/text()")[0].strip().split(" ")[-1]
    return data


def extractDataFromContents(contents, filter_set):
    for _ in range(len(contents))[::-1]:
        element = contents[_]
        # 删除空元素
        if element == "":
            del contents[_]
            continue

        # 处理纯文本
        if not isinstance(element, HtmlElement):
            """
            [[{"type":"text","text":"有用请d"},{"type":"text","text":"test"}]]
            处理后
            [{"type":"text","text":"有用请d"},{"type":"text","text":"test"}]
            """
            del contents[_]
            for j in fillter_text(element, filter_set)[::-1]:
                contents.insert(_, j)

            continue

        # a 标签
        if element.tag == "a":

            url = element.get("href").strip()
            temp_url = get_redirect_url(extract_or_return_url(url).strip())

            if element.text is None:
                link_text = "网页链接"
            else:
                link_text = element.text.strip()

            # 是否是文章链接
            if element.get("class") == "topic-main" or "group/topic" in temp_url:
                contents[_] = {"type": "extra_article", "text": get_article_data(temp_url, filter_set)}
                continue

            # 若是普通链接 则尝试提取商品ID
            productId, platform_name = parse_url_get_info(temp_url)

            # 没有商品ID 说明是优惠券链等其他链接
            if not productId:
                # return {"type": "link2", "text": temp_url,"platform":get_platform(url),"link_text":link_text }
                contents[_] = {"type": "link2", "text": temp_url,
                            "platform": get_platform(temp_url) if not platform_name else platform_name,
                            "link_text": link_text}
            # 有商品ID 说明是商品链接
            else:
                productInfo = fetch_platform_data(productId, platform_name)

                extra = extract_title_img_name_url(productInfo, platform_name) if productInfo else {}




                # return {"type": "link", "text": temp_url,"product_id":productId,"platform_name":platform_name,"link_text":link_text}
                contents[_] = {
                    "type": "link",
                    "text": extra.get("productUrl", temp_url),
                    "title": extra.get("title", None),
                    "productImage": extra.get("productImage", None),
                    "productName": extra.get("productName", None),
                    "product_id": productId,
                    "platform_name": platform_name,
                    "link_text": link_text
                }

        # 问题链接
        elif element.tag == "div" and "data-entity-type" in element.attrib:
            if element.attrib["data-entity-type"] == "question":
                question, answer = processQuestionId(element.get("data-id"), "question", filter_set)
                contents[_] = {"type": "question", "question": question, "answer": answer}
            elif element.attrib["data-entity-type"] == "poll":
                question, answer = processQuestionId(element.get("data-id"), "poll", filter_set)
                contents[_] = {"type": "poll", "question": question, "answer": answer}

        # 图片
        elif element.tag == "img":
            contents[_] = {"type": "image", "text": element.get("src").strip()}

    return contents


def fetch_data(group_name, unique_id, url):
    data = get_article_data(url)
    data["group_name"] = group_name
    data["unique_id"] = unique_id
    data["extractTime"] = getCurTime()

    return data


def get_article_data(url, filter_set=None):
    """
    # URL中带有_spm_id参数的 不带Cookie就可以返回页面数据
    # https://www.douban.com/doubanapp/dispatch?uri=/group/topic/328900637  通过这个链接 可以直接返回指定ID的文章内容 不需要Cookie和其它参数
    """

    if filter_set == None:
        filter_set = set()
    if url in filter_set:
        return {"message": "循环递归 ", "flag": "__失败0__"}

    filter_set.add(url)

    try:
        response = ownRequest(url, headers=HEADERS, proxies=PROXIES)
        if response is not None and ("你没有权限访问这个页面" in response.text or "到别处去看看" in response.text):
            raise RiskControlError(f"网页提示没有权限 => {url}")

        tree = html.fromstring(response.text)

        avatar = tree.xpath('//div[@class="user-face"][1]/a/img/@src')[0].strip()
        userName = tree.xpath('//div[@class="user-face"][1]/a/img/@alt')[0].strip()
        publicTime = tree.xpath('//span[@class="create-time"]/text()')[0].strip()
        userId = tree.xpath('//div[@class="user-face"][1]/a/@href')[0].strip().split("/")[-2]

        # 标题
        title = tree.xpath("//*[@class='article']/h1/text()")[0].strip()

        # 正文内容 
        # contents =  tree.xpath('//p[@data-align="left"]/text() | \
        #                        //div[@class="topic-doc"]//img[@src]/@src | \
        #                        //div[@class="topic-doc"]//a[@class="link"]/@href')

        # 1. 处理正文内容 
        contents = tree.xpath(
            '//div[@id="link-report"]//p/text() | \
            //*[@id="link-report"]//div[@class="image-wrapper"]/img[@src] | \
            //div[@class="topic-doc"]//a[@class="link"] | \
            //div[@class="topic-doc"]//a[@class="topic-main"] |\
            //div[@data-entity-type="question"] |\
            //div[@data-entity-type="poll"]'
        )
        contents = extractDataFromContents(contents, filter_set)

        # 评论的Xpath对象
        replyItems = tree.xpath('//li[@class="clearfix comment-item reply-item"]')

        replys = []

        # 处理评论数据
        for item in replyItems:
            temp = pareseReplyItem(item, filter_set)
            replys.append(temp)

        # # 用于匹配 window._CONFIG.topic.photos 数据的正则表达式
        # pattern = r"'photos': (?P<data>.*),"
        # # 从给定文本中提取 topic 的 photos 数据
        # match = re.search(pattern, response.text)
        # if match:
        #     photos_data = json.loads(match.group('data'))
        #     photos_url = [photo['image']['large']['url'] for photo in photos_data]
        # else:
        #     photos_data = []
        #     print(getCurTime(), "No match found for photos")

        photos_url = list(map(lambda a:a.replace("\\", ""),re.findall(r'"large":{"height":.*?,"size":.*?,"url":"(?P<url>.*?)","width":.*?}', response.text)))
        """
        type 
        text
        img
        questionId
        """

        data = {
            "avatar": avatar,
            "article_url": url,
            "userId": userId,
            "userName": userName,
            "publicTime": publicTime,
            "title": title,
            "content": contents,
            "replys": replys,
            "photos_url": photos_url,
            # "photos_url": [],
            
        }
        return data

    except (NotLoginError, RiskControlError) as e:
        print(getCurTime(), e, f"文章URL：{url}")
        # time.sleep(1)
        return {"message": str(e), "flag": "__失败1__"}
    except IndexError as e:
        print(getCurTime(), "报错索引超出范围，代理问题，采集下一个...", e, f"文章URL：{url}")
        traceback.print_exc()
        # time.sleep(1)
        return {"message": str(e), "flag": "__失败2__"}
    except Exception as e:
        print(getCurTime(), f"出错=> {e}", f"文章URL：{url}")
        traceback.print_exc()
        # time.sleep(1)
        return {"message": str(e), "flag": "__失败3__"}


def toDatabase(data):
    temp_str_data = json.dumps(data, ensure_ascii=False)

    # # 进行文本替换
    # temp_str_data = temp_str_data.replace("淘宝联盟", "星返")
    # temp_str_data = temp_str_data.replace("京粉", "星返")
    # temp_str_data = temp_str_data.replace("一淘", "星返")

    data = json.loads(temp_str_data)

    for i in ["__失败1__", "__失败2__", "__失败3__"]:
        if i in temp_str_data:
            print(getCurTime(), f"数据不完整，不推送，失败原因 {i}: {str(data)[:300]}...")
            return False

    if "循环递归" in temp_str_data:
        print(getCurTime(), f"数据采集 循环递归，不推送: {str(data)[:300]}...")
        return True

    urls = ["http://xdrj.lottefuture.com/zxy/V1/Gen2/ed1XF/lineReportController/receive/douban",
            "https://forward.lottefuture.com/api/receive/douban"]

    for url in urls:
        req = requests.post(url, json=data, verify=False)

    if "success" in req.content.decode() or "接收成功" in req.content.decode():
        print(getCurTime(), f"推送成功:  URL = {data['article_url']} 完成采集,{str(data)[:300]}...")
        return True
    else:
        print(getCurTime(), "推送请求失败: ", data['article_url'])
        return False
