import scrapy
from bs4 import BeautifulSoup
from scrapy.spiders import Spider

from gaokao.items import ProvinceItem, CollegeItem, TimeItem, MajorItem


# 省份分数
class ProvinceScoreLineSpider(Spider):
    name = 'province'
    start_urls = ['http://www.gaokao.com/beijing/fsx/']
    custom_settings = {
        'ITEM_PIPELINES': {
            'gaokao.pipelines.ProvincePipeline': 302,
        }
    }

    def parse(self, response):
        item = ProvinceItem()
        cur_url = response.request.url
        base_name = cur_url.split('/')[3]
        alldata = response.xpath('/html/body/div[2]/div[1]/div[1]/div[3]/div[2]').extract()
        urls = response.xpath('/html/body/div[2]/div[1]/div[1]/div[3]/div[1]/a/@href').extract()
        item['table'] = {base_name: alldata}
        yield item
        for url in urls:
            yield scrapy.Request(url, callback=self.parse)


# 院校分数线
class CollegeScoreLineSpider(Spider):
    name = 'college'
    start_urls = ['http://www.gaokao.com/baokao/lqfsx/dxfsx/index.shtml']
    custom_settings = {
        'ITEM_PIPELINES': {
            'gaokao.pipelines.CollegePipeline': 301,
        }
    }

    def parse(self, response):
        college = CollegeItem()
        alldata = response.xpath('/html/body/div[2]/div[2]/div[1]/div//a').extract()
        for item in alldata:
            soup = BeautifulSoup(item, 'html.parser')
            for href in soup.find_all('a'):
                if '公布' in href.string:
                    # print(href.string, href['href'])
                    college['title'] = {href.string: href['href']}
                    yield college
                if href.string == '下一页':
                    try:
                        yield scrapy.Request(href['href'], callback=self.parse)
                    except:
                        print('-' * 30, '下一页无数据')


class MajorClassification(Spider):
    name = 'major'
    start_urls = ['http://www.gaokao.com/baokao/zydq/']
    custom_settings = {
        'ITEM_PIPELINES': {
            'gaokao.pipelines.MajorPipeline': 303,
        }
    }

    def parse(self, response):
        majors = MajorItem()
        majors_table = response.xpath('//*[@id="benke_tab"]/div[1]/table')[0]
        # major_category = majors_table.xpath('.//th/text()').extract()  # 专业大类列表
        # majors = majors_table.xpath('.//td')
        #
        # for item in majors:
        #     print(item.xpath('string(.)').extract())

        # 直接获取整个专业分类table
        table = response.xpath('/html/body/div[3]/div[6]').extract()
        majors['major'] = {'table': table}
        yield majors


class TimeOfCollege(Spider):
    name = 'time'
    start_urls = ['https://www.eol.cn/e_html/gk/gktime/index.shtml']
    custom_settings = {
        'ITEM_PIPELINES': {
            'gaokao.pipelines.TimePipeline': 304,
        }
    }

    def parse(self, response):
        time = TimeItem()
        area_con = response.xpath('/html/body/div[1]/div')
        print(response.xpath('/html/body/div[1]/div/div/span/text()').extract())
        for con in area_con:
            region_list = con.xpath('./div[@class="area-head"]//span/text()').extract()  # 地区列表
            # time_list = con.xpath('./div[@class="area-body"]').extract()
            if len(region_list) > 0:
                region = region_list[0]
                name = region
                value = con.extract()
                time['time'] = {name: value}
                yield time
