# -*- coding: UTF-8 -*-
__author__ = 'tzq'
import time
import xlwt

from basetools import tool
from basetools.dbheper import *
from basetools.WebHelper import HttpHelper


class GetLianjiaData:
    def __init__(self):
        self.siteUrl = 'http://college.gaokao.com/schpoint/a15/s1/d6/p%s/'
        self.tool = tool.Tool()
        self.HttpHelper = HttpHelper()
        self.DBHelper = DBHelper()
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': '__utmc=243165661; __utmz=243165661.1595481781.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); UM_distinctid=1737a1ff6e97f6-0ffa55f52e950d-15366650-13c680-1737a1ff6ea9cb; pgv_pvi=1445339136; pgv_si=s9908822016; CNZZDATA1997329=cnzz_eid%3D270718232-1595479198-null%26ntime%3D1595668407; Hm_lvt_aa27487f630124a75eaf9c8ac900811c=1595481782,1595669547; __utma=243165661.1977187595.1595481781.1595666513.1595672071.3; stuarea=15; __utmb=243165661.20.10.1595672071; Hm_lpvt_aa27487f630124a75eaf9c8ac900811c=1595672212',
            'Host': 'college.gaokao.com',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
        }

    def getContents(self, pageindex):
        contents = []
        url = self.siteUrl % str(pageindex)
        content = self.HttpHelper.getHtml(url, self.headers, 'gb2312')
        if content is not None:
            import lxml.html.soupparser as soupparser
            dom = soupparser.fromstring(content)
            nodes = dom.xpath("//*[@class='scores_List']/dl")
            for item in nodes:
                try:
                    # 大学名称
                    name = item.xpath('dt/strong/a')[0].text
                    # 分数线详情 url
                    record_urlItem = item.xpath('dd/ul/li[4]/span/a')[0]
                    record_url = record_urlItem.xpath("@href")[0]
                    dict = {'name': name,
                            'record_url': record_url}
                    contents.append(dict)
                except:
                    print
        return contents

    def saveData(self, pageIndex,sh1):
        # 获取第一页大学列表信息
        contents = self.getContents(pageIndex)
        if contents is not None:
            index = 1
            for item in contents:
                if item is None:
                    continue
                if item['name'] is None:
                    continue
                record_url = item['record_url']
                record_content = self.HttpHelper.getHtml(record_url, self.headers, 'gb2312')
                if record_content is not None:
                    import lxml.html.soupparser as soupparser
                    dom = soupparser.fromstring(record_content)
                    nodes = dom.xpath('//*[@id="pointbyarea"]/table/tr')
                    for node in nodes:
                        if node is None:
                            continue
                        try:
                            # 年份
                            year = node.xpath('td[1]')[0].text
                            # 最低
                            lowest = node.xpath('td[2]')[0].text
                            # 最高
                            highest = node.xpath('td[3]')[0].text
                            # 平均
                            avg = node.xpath('td[4]')[0].text
                            # 录取人数
                            count = node.xpath('td[5]')[0].text
                            # 录取批次
                            pici = node.xpath('td[6]')[0].text
                            # 写入第一个sheet
                            sh1.write(index, 0, item['name'])
                            sh1.write(index, 1, year)
                            sh1.write(index, 2, lowest)
                            sh1.write(index, 3, highest)
                            sh1.write(index, 4, avg)
                            sh1.write(index, 5, count)
                            sh1.write(index, 6, pici)
                        except:
                            print
                        index += 1
    def StartSpider(self, start, end):
        file = open('f.txt', 'w')
        wb = xlwt.Workbook()
        sh1 = wb.add_sheet('专科')
        sh1.write(0, 0, '学校')
        sh1.write(0, 1, '年份')
        sh1.write(0, 2, '最低')
        sh1.write(0, 3, '最高')
        sh1.write(0, 4, '平均')
        sh1.write(0, 5, '录取人数')
        sh1.write(0, 6, '录取批次')
        try:
            for i in range(start, end + 1):
                print(u"正在收集第", i, u"页的大学信息")
                self.saveData(i, sh1)
                # time.sleep(10)
        finally:
            wb.save('test_w.xls')



getLianjiaData = GetLianjiaData()
getLianjiaData.StartSpider(1, 67)
