import requests
from bs4 import BeautifulSoup
import re
import json
import time


class net39_ASK(object):
    """
    爬取39net问题列表,构建数据集
    """

    def __init__(self):
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,"
                      "*/*;q=0.8, "
                      "application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Host": "ask.39.net",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/102.0.0.0 "
                          "Safari/537.36"}
        self.all_dept = {
            '内科': {'心血管内科': '237', '神经内科': '35', '肾内科': '140', '消化内科': '36', '内分泌科': '279', '血液科': '281', '风湿免疫科': '39',
                   '呼吸内科': '280'},
            '外科': {'骨科': '47', '心胸外科': '21', '泌尿外科': '163', '肛肠外科': '283', '乳腺外科': '286', '肝胆外科': '284', '普外科': '304',
                   '神经外科': '232', '血管外科': '181', '烧伤科': '285', '胃肠外科': '201'},
            '妇产科': {'妇科': '44', '产科': '229', '不孕不育': '3157'},
            '儿科': {'小儿内科': '45', '小儿外科': '231', '新生儿科': '319', '小儿精神科': '3159'}, '传染病科': {'传染病科': '311'},
            '男科': {'男科': '322'}, '减肥': {'减肥': '319524693'}, '中医科': {'中医科': '3163'}, '五官科': {'五官科': '323'},
            '皮肤性病科': {'皮肤性病科': '319465592'}, '肿瘤科': {'肿瘤科': '3162'}, '精神心理科': {'精神心理科': '3166'},
            '整形美容': {'整形美容': '3165'}}

    def get_url(self, data_id, dept, first):
        """
        获取问题列表
        :param data_id: 科室id
        :param dept: 科室名称
        :param first: 上级科室名称
        :return:
        """
        for i in range(1, 500):
            print("-----" + data_id + "-------完成" + str(i))
            url = "http://ask.39.net/news/" + data_id + "-" + str(i) + ".html"
            response = requests.get(url, headers=self.headers).text
            soup = BeautifulSoup(response, "lxml")
            detail = []
            for element in soup.find_all(class_="list_ask list_ask2"):
                for element_one in element.find_all("a"):
                    detail.append("http://ask.39.net" + element_one["href"])
            with open("data/" + data_id + "_" + str(i) + "url.json", "w", encoding="utf8") as f:
                json.dump({"url": detail, "dept": dept, "up_dept": first}, f, ensure_ascii=False, indent=2)

    def get_detail(self, url, depart, up_dept):
        """
        获取问题详细内容
        :param url: 问题url
        :param depart: 问题科室
        :param up_dept: 问题上级科室
        :return:
        """
        title, other, detail, label = "", "", "", []
        response = requests.get(url, headers=self.headers).text
        soup = BeautifulSoup(response, "lxml")
        for element in soup.find_all(class_="ask_cont"):
            for element_one in element.find_all(class_="ask_tit"):
                title = element_one.get_text()
                title = re.sub("\n|\t|\r| ", "", title)
            for element_two in element.find_all(class_="txt_ms"):
                detail = element_two.get_text()
                detail = re.sub("\n|\t|\r| ", "", detail)
            for element_three in element.find_all(class_="txt_bc"):
                other = element_three.get_text()
                other = re.sub("\n|\t|\r| ", "", other)
            for element_four in element.find_all(class_="txt_label"):
                for element_five in element_four.find_all("a"):
                    label.append(element_five.get_text())
        answer = []
        for element_six in soup.find_all(class_="sele_all marg_top"):
            for element_seven in element_six.find_all(class_="sele_txt"):
                answer.append(element_seven.get_text())
        return {"detail": detail, "url": url, "answer": answer, "other": other, "title": title, "depart": depart,
                "up_dept": up_dept}

    def spider_dept(self):
        """
        获取全部的科室问题
        :return:
        """
        for key, value in self.all_dept.items():
            for key_children, key_value in value.items():
                try:
                    self.get_url(key_value, key_children, key)
                    time.sleep(0.3)
                except:
                    pass


if __name__ == '__main__':
    net39_ASK = net39_ASK()
    net39_ASK.spider_dept()
#     print(net39_ASK.get_url("313"))
#     # print(net39_ASK.get_detail("http://ask.39.net/question/601624560.html", "keshi"))
