import json
import random
import re
import time

from .exceptions import NotLoginError
from .cookie_utils import getCookie
from .request_utils import ownRequest
from .data_utils import extractDataFromContents,fillter_text

from setting import *

def processQuestionId(questionId, tp="question",filter_set=None):
    """处理豆瓣问答和投票问题
    Args:
        questionId: 问题ID
        tp: 问题类型，可选 "question" 或 "poll"
    Returns:
        tuple: (问题标题, 正确答案)
    Raises:
        NotLoginError: Cookie失效时抛出
    """

    # 读取配置
    cookies = getCookie()

    with open(ANWSER_PATH, mode="r", encoding="utf-8") as f:
        answer = random.choice(f.read().split('\n'))

    # 提取ck值
    ck_value = re.search(r'ck=([a-zA-Z0-9]+)', cookies).group(1) if re.search(r'ck=([a-zA-Z0-9]+)', cookies) else None

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
        "host": "m.douban.com",
        "Cookie": cookies,
    }

    LOCK.acquire()
    try:
        # 先检查是否已回答过
        if tp == "question":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/question/{questionId}?ck={ck_value}"
            res = ownRequest(url, method="get", headers=headers)
            data = res.json()
            if data.get("correct_answer"):
                return data.get("title"), fillter_text(data.get("correct_answer"),filter_set)
        elif tp == "poll":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/{questionId}?ck={ck_value}"
            res = ownRequest(url, method="get", headers=headers)
            data = res.json()
            
            if data.get("options"):
                vote_id = data.get("options")[0].get("id")
                options = [{"option_contents": fillter_text(opt.get('title'),filter_set), "is_correct": opt.get("is_correct")} for opt in data.get("options")]
                if options:
                    return data.get("title"), options

        
        # 未回答过，需要提交答案回答
        # time.sleep(2)
        if tp == "question":
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/question/{questionId}/answer"
            payload = {
                "answer": answer,
                "ck": ck_value
            }
            res = ownRequest(url, "post", headers=headers, data=payload)
            try:
                data = res.json()
                result = data.get("title"), fillter_text(data.get("correct_answer"),filter_set)
            except:
                raise NotLoginError(f"Cookie可能失效，提交答案的响应内容：{res.json()}")
        elif tp == "poll":
            
            
            url = f"https://m.douban.com/rexxar/api/v2/ceorl/poll/{questionId}/vote"
            data = f'option_ids={vote_id}&ck={ck_value}'
            res = ownRequest(url, "post", headers=headers, data=data)
            try:
                data = res.json()
                # fillter_text(opt.get('title')), opt.get("is_correct")
                result = data.get("title"), [{"option_contents": fillter_text(opt.get('title'),filter_set), "is_correct": opt.get("is_correct")} for opt in data.get("options")]
            except:
                raise NotLoginError(f"Cookie可能失效，提交答案的响应内容：{res.json()}")
        # TODO
        # time.sleep(random.randint(2, 5))
        return result
    finally:
        LOCK.release()


def parseQuoteItem(item,filter_set):
    data = {}
    
    data["comments"] = []
    # 没有被引用者的头像和昵称
    data["userName"] = None
    data["avatar"] = None

    reply_quote_content = item.xpath("./div[@class='reply-quote-content']")[0]

    # 被引用者的ID
    data["refUserId"] = reply_quote_content.xpath("./@data-author-id")[0].strip()
    
    # 被引用者的回复ID
    data["refReplyId"] = reply_quote_content.xpath("./@data-ref-cid")[0].strip()
        
    """
    bg-img-green
    reply-quote 
    comment-photos
    reply-content
    """
    
    # 可以使用 fillter_text处理  但会更麻烦
    for child in reply_quote_content:
        # 引用的图片
        if('class' in child.attrib and 'quote-img' in child.attrib['class']):
            temp_imgs = child.xpath(".//div[@class='cmt-img']/img/@src")
            
            # 有些评论虽然有这个标签 但实际并无图片链接
            if not temp_imgs: continue
            data["comments"].append({
                "type": "image",
                "text": temp_imgs[0] #TODO 图片处理
            })
        
        # 引用的文本内容
        elif('class' in child.attrib and 'all ref-content' in child.attrib['class']):
            for i in extractDataFromContents(list(map(str.strip, child.xpath("./text()"))),filter_set):
                data["comments"].append(i)
        # 引用中@的用户
        elif('class' in child.attrib and 'pubdate' in child.attrib['class']):
            data["comments"].append({
                "type": "@",
                "link": child.xpath("./a/@href")[0],
                "text": child.xpath("./a/text()")[0],
                "userId": child.xpath("./a/@href")[0].strip().split("/")[-2]
            })
    return data


def pareseReplyItem(item,filter_set):
    data = {}

    user_face_item = item.xpath("./div[@class='user-face']")[0]
    
    reply_doc_item = item.xpath("./div[@class='reply-doc content']")[0]
    
    data["userName"] = user_face_item.xpath("./a/img/@alt")[0]
    data["avatar"] = user_face_item.xpath("./a/img/@src")[0] #TODO 图片处理
    data["userId"] = user_face_item.xpath("./a/@href")[0].strip().split("/")[-2]
    data["replyId"] = item.xpath("./@id")[0].strip()
    
    data["comments"] = []
    """
    bg-img-green
    reply-quote 
    comment-photos
    reply-content
    """
    # 可以使用 fillter_text处理  但会更麻烦
    for child in reply_doc_item:
        
        if('class' in child.attrib and 'reply-quote' in child.attrib['class']):
            data['comments'].append({
                "type": "quote",
                "text": parseQuoteItem(child,filter_set)
            })
        elif('class' in child.attrib and 'comment-photos' in child.attrib['class']):
            temp_imgs = child.xpath("./div/div/img/@src")
            
            # 有些评论虽然有这个标签 但实际并无图片链接
            if not temp_imgs: continue
            
            data['comments'].append({
                "type": "image",
                "text": temp_imgs[0]  #TODO 图片处理
            })
        elif('class' in child.attrib and 'reply-content' in child.attrib['class']):
            for i in extractDataFromContents(list(map(str.strip, child.xpath("./div/text() | ./div/a/@href | ./div/p/text()"))),filter_set):
                data["comments"].append(i)

        # 提取时间和IP
        if 'class' in child.attrib and 'bg-img-green' in child.attrib['class']:
            # 2025-06-02 11:19:34 上海
            
            # 时间
            data["publicTime"] = " ".join(child.xpath(".//span[@class='pubtime']/text()")[0].strip().split(" ")[:2])
            
            # 省份
            data["province"] = child.xpath(".//span[@class='pubtime']/text()")[0].strip().split(" ")[-1]
    return data

