import requests, re, json, pymongo
from lxml import etree
import pandas as pd
import numpy as np
from concurrent.futures import ProcessPoolExecutor  # 多进程

class MyProcessPool:
    def __init__(self, work_num, func, li, *args):#   5 loop key_ls
        self.pool = ProcessPoolExecutor(max_workers=work_num)  # 初始化进程数量，5个进程同时爬取
        self.func = func  # 任务函数名  
        self.func_ls = li  # 任务函数的参数列表
        if args:
            self.other = args

    def go(self):
        for i in self.func_ls:
             self.pool.submit(self.func, i)
        self.pool.shutdown()


class Spider:
    # 请求每页响应，获取每个问题的跳转链接
    def get_bighref(self, url, info_ls):
        response = requests.get(url)
        html = response.text
        xml = etree.HTML(html)
        Bighref_ls = xml.xpath('//a[@class="counseling-list-item--reading"]/@href')
        for href in Bighref_ls:

            response = requests.get('https://www.jiandanxinli.com' + href)
            html = response.text
            xml = etree.HTML(html)
            # 问题内容在源码中，可以直接xpath获取
            question = ''.join(xml.xpath('//div[@class="common-detail-article__main"]/p/text()'))
            self.get_answer(href, info_ls, question)
    # 请求详情页，获取有效回答
    def get_answer(self, href, info_ls, question):
        # 回答不在源码里，需要分析后台响应，发现回答信息全部存在以下地址，更换question_id即可
        question_id = re.findall(r'/(\d.+)', href)[0]   # 从网址中筛选到该问题对应的id
        answer_url = f'https://www.jiandanxinli.com/api/v1/answers?filter[question_id]={question_id}&filter[promoted]=false'
        response = requests.get(answer_url)
        # 分析响应的嵌套结构，先拿出储存回答信息的列表
        answer_ls = json.loads(response.text)['data']
        # 清洗筛选出有效的内容-->这字典嵌套够够的了
        answer_clean = []
        for dic in answer_ls:
            text = dic['attributes']['content']
            if '<br>' in text:
                text = re.sub(r'<br>', '\n', text)
            # 此处有个坑，匿名用户没有身份
            try:
                job_tit = dic['attributes']['user']['job_title']
            except TypeError:
                job_tit = dic['attributes']['user']
            vote = dic['attributes']['votes_count']
            if len(text) > 20 and vote > 0:
                answer_clean.append({'内容': text,
                                     '身份': job_tit,
                                     '感谢数': vote})
        # 没有回答的不入选
        if answer_clean != []:
            info_ls.append({'问题': question,
                            '回答': answer_clean})
# 保存至数据库
def save_to_mongo(collect, data):
    # 传入data类型为字典
    try:
        collect.insert(data)
    except Exception as e:
        print(e, '存储到MongoDb失败', data)

# 主循环函数
def loop(key):
    # 中文先编码再拼接到url上
    s_encode = str(key.encode('utf-8'))[2:-1].replace(r'\x', '%').upper()
    info_ls = []
    spider = Spider()
    print(f'开始爬{key}')
    # 避免去请求页码，直接分析翻页规律
    for page in range(100):
        try:
            print(f'正在爬第{page + 1}页')
            if page == 0:  # 第一页
                url = f'https://www.jiandanxinli.com/questions?q={s_encode}'
            else:
                url = f'https://www.jiandanxinli.com/questions?page={page + 1}&q={s_encode}'
            spider.get_bighref(url, info_ls)
            if (page + 1) == 100:
                print('即将存储')
                # 连接数据库
                client = pymongo.MongoClient('localhost')
                collect = client['mydb'][key]
                for i in info_ls:
                    save_to_mongo(collect, i)

        except Exception as e:
            print('出了个错', e, '接着爬')

if __name__ == '__main__':
    key_ls = ['痛苦', '压力', '迷茫', '抑郁', '自卑', '焦虑', '失眠', '工作', '消沉',
              '失恋', '暴躁', '紧张', '逆反', '孤独', '烦恼', '愤怒', '恐惧', '强迫症']
    # 多进程爬取，参数5表示每次以5个进程同时爬，一次爬5个关键词
    pool = MyProcessPool(5, loop, key_ls)
    pool.go()
