import json
import random
import re
import time
from setting import *
from .exceptions import NotLoginError
from .cookie_utils import getCookie
from .request_utils import ownRequest
from http.cookies import SimpleCookie

#from .data_utils import extractDataFromContents,fillter_text



#读取cookie 文件
def parse_cookie(cookie_str):
    cookie = SimpleCookie()
    cookie.load(cookie_str)
    return {k: v.value for k, v in cookie.items()}
# 豆瓣问题
def processQuestionId(questionId, tp="question", filter_set=None):
    """处理豆瓣问答和投票问题
    Args:
        questionId: 问题ID
        tp: 问题类型，可选 "question" 或 "poll"
    Returns:
        tuple: (问题标题, 正确答案)
    Raises:
        NotLoginError: Cookie失效时抛出
    """
    # print("问题类型");
    # print(tp)
    # print("问题类型");

    # 读取cookie
    cookie_str  = getCookie()
    cookies=parse_cookie(cookie_str)
    # 提取ck值
    ck_value = cookies.get("ck")
    dbcl2 = cookies.get("dbcl2")     


    with open(ANWSER_PATH, mode="r", encoding="utf-8") as f:
        answer = random.choice(f.read().split('\n'))


    headers = {
        'Referer': f'https://m.douban.com/poll/question/{questionId}/',
        'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1',
        'X-Requested-With': 'XMLHttpRequest'
    }

    cookies = {
        'dbcl2': '147813837:AnVMA0yvbBo',
        'dbcl2': f'{dbcl2}',
    }

    LOCK.acquire()
    answer_data={};
    try:
        # 先检查是否已回答过
        if tp == "question":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/question/{questionId}?ck={ck_value}"
            #print(url)
            res = ownRequest(url, method="get", headers=headers,cookies=cookies)
            data = res.json()
            if data.get("correct_answer"):
                answer_data=data.get("title"), fillter_text(data.get("correct_answer"), filter_set);
                 
        elif tp == "poll":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/{questionId}?ck={ck_value}"
            res = ownRequest(url, method="get", headers=headers,cookies=cookies)
            data = res.json()

            if data.get("options"):
                vote_id = data.get("options")[0].get("id")
                options = [
                    {
                        "option_contents": fillter_text(opt.get('title'), filter_set),
                        "is_correct": opt.get("is_correct")
                    }
                    for opt in data.get("options")]
                if options:
                    answer_data= data.get("title"), options
        if  answer_data:
            return answer_data;



        # 未回答过，需要提交答案回答
        # time.sleep(2)
        result={}
        if tp == "question":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/question/{questionId}/answer" #

            payload = {
                'answer': answer,  # 可能是选项ID或文本，取决于问题类型
                'ck': 'eGpL',  # 通常需要从页面中获取
                'question_id': f'{questionId}'
            }
            res = ownRequest(url, "post", headers=headers, data=payload,cookies=cookies)
            try:
                data = res.json()
                if data.code !='200':
                    print(f" Cookie可能失效,问题接口为: {url},接口报错{data.msg}")
                    raise NotLoginError(f"Cookie可能失效,问题接口为: {url},接口报错{data.msg}")

                result = data.get("title"), fillter_text(data.get("correct_answer"), filter_set)
            except:
                print(f"Cookie可能失效1，提交答案的响应内容：{res.json()}")

                raise NotLoginError(f"Cookie可能失效1，提交答案的响应内容：{res.json()}")
        elif tp == "poll":

            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/{questionId}/vote"
            data = f'option_ids={vote_id}&ck={ck_value}'
            res = ownRequest(url, "post", headers=headers, data=data,cookies=cookies)
            try:
                data = res.json()
                if data.code !='200':
                    print(f"Cookie可能失效,问题接口为: {url},接口报错{data.msg}")

                    raise NotLoginError(f"Cookie可能失效,问题接口为: {url},接口报错{data.msg}")
                # fillter_text(opt.get('title')), opt.get("is_correct")
                result = data.get("title"), [
                    {
                    "option_contents": fillter_text(opt.get('title'),filter_set),
                    "is_correct": opt.get("is_correct")
                    }
                    for opt in data.get("options")]
            except:
                print(f"Cookie可能失效2，提交答案的响应内容：{res.json()}")

                raise NotLoginError(f"Cookie可能失效2，提交答案的响应内容：{res.json()}")
        # TODO
        # time.sleep(random.randint(2, 5))
        return result
    finally:
        LOCK.release()


def parseQuoteItem(item, filter_set):
    data = {}

    data["comments"] = []
    # 没有被引用者的头像和昵称
    data["userName"] = None
    data["avatar"] = None

    reply_quote_content = item.xpath("./div[@class='reply-quote-content']")[0]

    # 被引用者的ID
    data["refUserId"] = reply_quote_content.xpath("./@data-author-id")[0].strip()

    # 被引用者的回复ID
    data["refReplyId"] = reply_quote_content.xpath("./@data-ref-cid")[0].strip()

    """
    bg-img-green
    reply-quote 
    comment-photos
    reply-content
    """

    # 可以使用 fillter_text处理  但会更麻烦
    for child in reply_quote_content:
        # 引用的图片
        if ('class' in child.attrib and 'quote-img' in child.attrib['class']):
            temp_imgs = child.xpath(".//div[@class='cmt-img']/img/@src")

            # 有些评论虽然有这个标签 但实际并无图片链接
            if not temp_imgs: continue
            data["comments"].append({
                "type": "image",
                "text": temp_imgs[0]  # TODO 图片处理
            })

        # 引用的文本内容
        elif ('class' in child.attrib and 'all ref-content' in child.attrib['class']):
            for i in extractDataFromContents(list(map(str.strip, child.xpath("./text()"))), filter_set):
                data["comments"].append(i)
        # 引用中@的用户
        elif ('class' in child.attrib and 'pubdate' in child.attrib['class']):
            data["comments"].append({
                "type": "@",
                "link": child.xpath("./a/@href")[0],
                "text": child.xpath("./a/text()")[0],
                "userId": child.xpath("./a/@href")[0].strip().split("/")[-2]
            })
    return data



def pareseReplyItem(item, filter_set):
    data = {}

    user_face_item = item.xpath("./div[@class='user-face']")[0]

    reply_doc_item = item.xpath("./div[@class='reply-doc content']")[0]

    data["userName"] = user_face_item.xpath("./a/img/@alt")[0]
    data["avatar"] = user_face_item.xpath("./a/img/@src")[0]  # TODO 图片处理
    data["userId"] = user_face_item.xpath("./a/@href")[0].strip().split("/")[-2]
    data["replyId"] = item.xpath("./@id")[0].strip()

    data["comments"] = []
    """
    bg-img-green
    reply-quote 
    comment-photos
    reply-content
    """
    # 可以使用 fillter_text处理  但会更麻烦
    for child in reply_doc_item:
        # 引用
        if ('class' in child.attrib and 'reply-quote' in child.attrib['class']):
            data['comments'].append({
                "type": "quote",
                "text": parseQuoteItem(child, filter_set)
            })
        # 图片
        elif ('class' in child.attrib and 'comment-photos' in child.attrib['class']):
            temp_imgs = child.xpath("./div/div/img/@src")

            # 有些评论虽然有这个标签 但实际并无图片链接
            if not temp_imgs: continue

            data['comments'].append({
                "type": "image",
                "text": temp_imgs[0]  # TODO 图片处理
            })

        # 回复内容
        elif ('class' in child.attrib and 'reply-content' in child.attrib['class']):
            for i in extractDataFromContents(
                    list(map(str.strip, child.xpath("./div/text() | ./div/a/@href | ./div/p/text()"))), filter_set):
                data["comments"].append(i)

        # 提取时间和IP
        if 'class' in child.attrib and 'bg-img-green' in child.attrib['class']:
            # 2025-06-02 11:19:34 上海

            # 时间
            data["publicTime"] = " ".join(child.xpath(".//span[@class='pubtime']/text()")[0].strip().split(" ")[:2])

            # 省份
            data["province"] = child.xpath(".//span[@class='pubtime']/text()")[0].strip().split(" ")[-1]
    return data

