import requests
from bs4 import BeautifulSoup
import json
import time
import random
from fake_useragent import UserAgent
from datetime import datetime
from config.config import Config
class ZhihuCrawler:

    # 初始化
    def __init__(self):
        self.headers = {
            'User-Agent': UserAgent().random,
            'Accept': 'application/json',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'cookie': Config.ZHIHU_COOKIE  # 需要登录后的cookie
        }
        self.base_url = "https://www.zhihu.com/api/v4/topics/{topic_id}/feeds/essence" 

    # 获取话题内容
    def get_topic_content(self, topic_id,page):
        params = {
            'include': 'data[?(target.type=topic_sticky_module)].target.data[?(target.type=answer)].target.content,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp;data[?(target.type=topic_sticky_module)].target.data[?(target.type=answer)].target.is_normal,comment_count,voteup_count,content,relevant_info,excerpt.author.badge[?(type=best_answerer)].topics;data[?(target.type=topic_sticky_module)].target.data[?(target.type=article)].target.content,voteup_count,comment_count,voting,author.badge[?(type=best_answerer)].topics',
            'include': 'data%5B%3F(target.type%3Dtopic_sticky_module)%5D.target.data%5B%3F(target.type%3Danswer)%5D.target.content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%3Bdata%5B%3F(target.type%3Dtopic_sticky_module)%5D.target.data%5B%3F(target.type%3Danswer)%5D.target.is_normal%2Ccomment_count%2Cvoteup_count%2Ccontent%2Crelevant_info%2Cexcerpt.author.badge%5B%3F(type%3Dbest_answerer)%5D.topics%3Bdata%5B%3F(target.type%3Dtopic_sticky_module)%5D.target.data%5B%3F(target.type%3Darticle)%5D.target.content%2Cvoteup_count%2Ccomment_count%2Cvoting%2Cauthor.badge%5B%3F(type%3Dbest_answerer)%5D.topics%3Bdata%5B%3F(target.type%3Dtopic_sticky_module)%5D.target.data%5B%3F(target.type%3Dpeople)%5D.target.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics%3Bdata%5B%3F(target.type%3Danswer)%5D.target.annotation_detail%2Ccontent%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%3Bdata%5B%3F(target.type%3Danswer)%5D.target.author.badge%5B%3F(type%3Dbest_answerer)%5D.topics%3Bdata%5B%3F(target.type%3Darticle)%5D.target.annotation_detail%2Ccontent%2Cauthor.badge%5B%3F(type%3Dbest_answerer)%5D.topics%3Bdata%5B%3F(target.type%3Dquestion)%5D.target.annotation_detail%2Ccomment_count',
            'limit': 20,
            'offset': {page*20}
        }
        
        url = self.base_url.format(topic_id=topic_id)
        try:
            response = requests.get(url, headers=self.headers, params=params)
            # 打印原始响应内容，用于调试
            print(f"响应状态码: {response.status_code}")
            print(f"响应内容: {response.text[:200]}...")  # 只打印前200个字符
            
            if response.status_code == 200:
                return response.json()
            else:
                print(f"请求失败，状态码：{response.status_code}")
                return None
                
        except requests.exceptions.RequestException as e:
            print(f"请求发生错误: {e}")
            return None
        except json.JSONDecodeError as e:
            print(f"JSON解析错误: {e}")
            print(f"响应内容: {response.text}")
            return None

    # 解析数据
    def parse_answer(self, item):
        try:
            answer = {
                'question': item['target']['question']['title'],
                'answer_id': item['target']['id'],
                'author': item['target']['author']['name'],
                'content': item['target']['content'],
                'vote_count': item['target']['voteup_count'],
                'comment_count': item['target']['comment_count'],
                'created_time': item['target']['created_time']
            }
            return answer
        except KeyError:
            return None

    # 保存数据到文件
    def save_to_file(self, answers, filename):
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(answers, f, ensure_ascii=False, indent=2)

    # 传递topic_id爬取话题内容
    def crawl_topic(self, topic_id, max_pages=5):
        
        print(f"开始爬取话题,topic_id:{topic_id}")
        all_answers = []
        offset = 0
        
        for page in range(max_pages):
            try:
                print(f"正在爬取第 {page + 1} 页...")
                
                # 获取数据
                data = self.get_topic_content(topic_id,page)
                if not data or 'data' not in data:
                    break
                
                # 解析每个回答
                for item in data['data']:
                    if 'target' in item and item['target'].get('type') == 'answer':
                        answer = self.parse_answer(item)
                        if answer:
                            all_answers.append(answer)
                
                # 判断是否还有更多数据
                if not data.get('paging', {}).get('is_end', True):
                    offset += 20
                    time.sleep(random.uniform(1, 3))  # 随机延迟
                else:
                    break
                    
            except Exception as e:
                print(f"爬取过程中出现错误: {e}")
                break
                
        return all_answers

    def filter_answers(self, answers, min_votes=1000):
        """筛选高赞回答"""
        return [ans for ans in answers if ans['vote_count'] >= min_votes]

    def format_answer(self, answer):
        """格式化回答内容"""
        created_time = datetime.fromtimestamp(answer['created_time']).strftime('%Y-%m-%d %H:%M:%S')
        return {
            'question': answer['question'],
            'author': answer['author'],
            'vote_count': answer['vote_count'],
            'comment_count': answer['comment_count'],
            'created_time': created_time,
            'content': BeautifulSoup(answer['content'], 'html.parser').get_text()  # 清理HTML标签
        }

