import requests
import js2xml
import socket
from lxml import etree
from DAO.DAO_score_level import DAO_score_level
class DATA_score_level:
    # 爬取所有学校的专业链接
    def major_catalog_link(self):
        urls = ['http://college.wendu.com/index.php?m=university&c=search&a=search&page={}'.format(str(i)) for i in range(1, 70)]
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        for url in urls:
            print(url)
            socket.setdefaulttimeout(600)
            response = requests.get(url, headers=headers, timeout=600)
            response_html = etree.HTML(response.content.decode())
            schools = response_html.xpath("//a[@class='school-list-itemLeft']")
            for school in schools:
                school_name = school.xpath(".//h3[@class='school-item-name']/text()")[0]
                major_link = "http://college.wendu.com/major_list-" + school.xpath("./@href")[0].split("-")[1]+"-1-a-a"
                print(school_name, major_link)
                dao = DAO_score_level()
                if dao.not_uni_name(school_name):
                    self.details_link(major_link)
            #     break
            # break
    # 爬取某一个学校的专业的查看详情链接
    def details_link(self, url):
        # url = 'http://college.wendu.com/major_list-1-1-a-a'
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        socket.setdefaulttimeout(600)
        response = requests.get(url, headers=headers, timeout=600)
        response_str = response.content.decode()
        response_html = etree.HTML(response_str)
        script_list = response_html.xpath("//script/text()")
        # print(script_list[6])
        try:
            script_xml = js2xml.parse(script_list[11], encoding='utf-8', debug=False)
            # print(script_xml)
            script_tree = js2xml.pretty_print(script_xml)
            # print(script_tree)
            script_data_html = etree.HTML(script_tree)
            div_all = script_data_html.xpath('//var[@name="datas"]/array/object')
            for div_one in div_all:
                unvid = div_one.xpath('.//property[@name="unvid"]/string/text()')[0]
                acadid = div_one.xpath('.//property[@name="acadid"]/string/text()')[0]
                majorid = div_one.xpath('.//property[@name="majorid"]/string/text()')[0]
                details_link = "http://college.wendu.com/major_detail-" + unvid + "-" + acadid + "-" + majorid
                maj_name = div_one.xpath('.//property[@name="major_name"]/string/text()')[0]
                maj_uni_name = response_html.xpath('//p[@class="where"]/a[2]/text()')[0]
                print(details_link, maj_name, maj_uni_name)
                self.score_level(details_link)
                break
        except Exception as e:
            print("爬取查看详情链接有错误》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》")
            print(e)
    # 在查看详情页面爬取 考试范围 具体数据，并插入数据库表 exam_area
    def score_level(self, url):
        # url = "http://college.wendu.com/major_detail-143-3504-137"
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
        socket.setdefaulttimeout(600)
        response = requests.get(url, headers=headers, timeout=600)
        response_str = response.content.decode()
        response_html = etree.HTML(response_str)
        sco_year = response_html.xpath("//ul[@class='jb_content']/li[4]/ul/li[1]/text()")[0].split("：")[1]
        sco_maj_name = response_html.xpath("//p[@class='where']/text()")[2]
        sco_college_name = response_html.xpath("//ul[@class='clearfix']/li[2]/span/text()")[0]
        sco_uni_name = response_html.xpath("//ul[@class='clearfix']/li[1]/span/text()")[0]
        sco_reexamine_line = response_html.xpath("//ul[@class='jb_content']/li[4]/ul/li[2]/text()")[0].split("：")[1]
        sco_subject1 = response_html.xpath("//ul[@class='jb_content']/li[4]/ul/li[3]/text()")[0].split("：")[1]
        sco_subject2 = response_html.xpath("//ul[@class='jb_content']/li[4]/ul/li[4]/text()")[0].split("：")[1]
        sco_subject3 = response_html.xpath("//ul[@class='jb_content']/li[4]/ul/li[5]/text()")[0].split("：")[1]
        sco_subject4 = response_html.xpath("//ul[@class='jb_content']/li[4]/ul/li[6]/text()")[0].split("：")[1]
        # sco_subjects作用：检查爬取分数线是否为空
        sco_subjects = sco_subject1+sco_subject2+sco_subject3+sco_subject4
        dao = DAO_score_level()
        if sco_subjects is not "":
            print(sco_year, sco_maj_name, sco_college_name, sco_uni_name, sco_reexamine_line, sco_subject1, sco_subject2, sco_subject3, sco_subject4)
            if dao.not_uni_name_maj_name(sco_uni_name, sco_college_name, sco_maj_name):
                dao.add(sco_year, sco_maj_name, sco_college_name, sco_uni_name, sco_reexamine_line, sco_subject1, sco_subject2, sco_subject3, sco_subject4)
            else:
                print("数据重复")




if __name__ == '__main__':
    print("1、爬取所有院校 分数线 数据")
    print("2、爬取一个专业 分数线 数据")
    print("3、清空数据库")
    print("4、输出所有数据")
    # res = 1
    res = int(input())
    if res == 1:
        score_level = DATA_score_level()
        score_level.major_catalog_link()
    elif res == 2:
        score_level = DATA_score_level()
        score_level.score_level("http://college.wendu.com/major_detail-9-2751-715")
    elif res == 3:
        pass
        dao = DAO_score_level()
        dao.truncate()
    elif res == 4:
        pass
        dao = DAO_score_level()
        dao.sel_all()