import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisSpider
from area_code_v2_redis.items import AreaCodeV2RedisItem
from bs4 import BeautifulSoup
import re
class AreaRedisSpider(RedisSpider):
    name = 'area_redis'
    redis_key = "start_url"

    ### 获取每一年的数据
    def parse(self, response):
        soup = BeautifulSoup(response.body, "lxml")
        ul_list = soup.find("ul", class_="center_list_contlist")
        year_list = ul_list.find_all("li")
        for ele in year_list[0:23:2]:
            year = ele.text.strip()[:5]
            if ele.find("a")["href"][:3] != "http":
                year_url = "http://www.stats.gov.cn" + ele.find("a")["href"]
            else:
                year_url = ele.find("a")["href"]
            yield scrapy.Request(year_url, callback=self.parse_first, meta={'year': year, 'year_url': year_url})

    ########## 解析 省份数据
    def parse_first(self, response):
        print("crawling first_level:{}".format(response.url))
        soup = BeautifulSoup(response.text, "lxml")
        for url in soup.select(".provincetr a"):
            year = response.meta['year']
            year_url = response.meta['year_url']
            temp_link = url["href"]
            prov_url = year_url.replace("index.html", temp_link)
            prov_name = url.text
            yield scrapy.Request(prov_url, callback=self.parse_second, meta={'year': year,
                                                                             'year_url': year_url,
                                                                             'prov_url': prov_url,
                                                                             'prov_name': prov_name})

    ###  解析城市数据
    def parse_second(self, response):
        print("crawling second_level:{}".format(response.url))
        soup = BeautifulSoup(response.text, "lxml")
        all_links = soup.select("a")
        my_links = [ele for ele in all_links if re.search(".html$", ele["href"])]
        my_code = [ele.text for ele in my_links if re.search("\\d+", ele.text)]
        my_url = [ele["href"] for ele in my_links if re.search("\\d+", ele.text)]
        my_name = [ele.text for ele in my_links if not re.search("\\d+", ele.text)]
        for city_url, city_code, city_name in zip(my_url, my_code, my_name):
            year = response.meta['year']
            year_url = response.meta['year_url']
            prov_url = response.meta['prov_url']
            prov_name = response.meta['prov_name']
            temp = prov_url.split("/")
            temp[-1] = city_url
            city_url = "/".join(temp)
            city_code = city_code
            city_name = city_name
            yield scrapy.Request(city_url, callback=self.parse_third, meta={'year': year,
                                                                            'year_url': year_url,
                                                                            'prov_url': prov_url,
                                                                            'prov_name': prov_name,
                                                                            'city_url': city_url,
                                                                            'city_code': city_code,
                                                                            'city_name': city_name})

    ####区县
    def parse_third(self, response):
        print("crawling third_level:{}".format(response.url))
        soup = BeautifulSoup(response.text, "lxml")
        all_links = soup.select("a")
        my_links = [ele for ele in all_links if re.search(".html$", ele["href"])]
        my_code = [ele.text for ele in my_links if re.search("\\d+", ele.text)]
        my_url = [ele["href"] for ele in my_links if re.search("\\d+", ele.text)]
        my_name = [ele.text for ele in my_links if not re.search("\\d+", ele.text)]
        for county_url, county_code, county_name in zip(my_url, my_code, my_name):
            year = response.meta['year']
            year_url = response.meta['year_url']
            prov_url = response.meta['prov_url']
            prov_name = response.meta['prov_name']
            city_url = response.meta['city_url']
            city_code = response.meta['city_code']
            city_name = response.meta['city_name']
            temp = city_url.split("/")
            temp[-1] = county_url
            county_url = "/".join(temp)
            county_code = county_code
            county_name = county_name
            yield scrapy.Request(county_url, callback=self.parse_forth, meta={'year': year,
                                                                              'year_url': year_url,
                                                                              'prov_url': prov_url,
                                                                              'prov_name': prov_name,
                                                                              'city_url': city_url,
                                                                              'city_code': city_code,
                                                                              'city_name': city_name,
                                                                              'county_url': county_url,
                                                                              'county_code': county_code,
                                                                              'county_name': county_name})

    ### 解析 乡镇数据
    def parse_forth(self, response):
        print("crawling forth_level:{}".format(response.url))
        soup = BeautifulSoup(response.text, "lxml")
        all_links = soup.select("a")
        my_links = [ele for ele in all_links if re.search(".html$", ele["href"])]
        my_code = [ele.text for ele in my_links if re.search("\\d+", ele.text)]
        my_url = [ele["href"] for ele in my_links if re.search("\\d+", ele.text)]
        my_name = [ele.text for ele in my_links if not re.search("\\d+", ele.text)]
        for town_url, town_code, town_name in zip(my_url, my_code, my_name):
            year = response.meta['year']
            year_url = response.meta['year_url']
            prov_url = response.meta['prov_url']
            prov_name = response.meta['prov_name']
            city_url = response.meta['city_url']
            city_code = response.meta['city_code']
            city_name = response.meta['city_name']
            county_url = response.meta['county_url']
            county_code = response.meta['county_code']
            county_name = response.meta['county_name']
            temp = county_url.split("/")
            temp[-1] = town_url
            town_url = "/".join(temp)
            town_code = town_code
            town_name = town_name
            yield scrapy.Request(town_url, callback=self.parse_fifth, meta={'year': year,
                                                                            'year_url': year_url,
                                                                            'prov_url': prov_url,
                                                                            'prov_name': prov_name,
                                                                            'city_url': city_url,
                                                                            'city_code': city_code,
                                                                            'city_name': city_name,
                                                                            'county_url': county_url,
                                                                            'county_code': county_code,
                                                                            'county_name': county_name,
                                                                            'town_url': town_url,
                                                                            'town_code': town_code,
                                                                            'town_name': town_name
                                                                            })

        ### 解析 乡镇数据

    def parse_fifth(self, response):
        print("crawling fifth_level:{}".format(response.url))
        soup = BeautifulSoup(response.text, "lxml")
        my_code = [ele.text for ele in soup.select(".villagetr td:nth-child(1)")]
        my_url = [ele.text for ele in soup.select(".villagetr td:nth-child(2)")]
        my_name = [ele.text for ele in soup.select(".villagetr td:nth-child(3)")]
        for village_url, village_code, village_name in zip(my_url, my_code, my_name):
            item = AreaCodeV2RedisItem()
            year = response.meta['year']
            year_url = response.meta['year_url']
            prov_url = response.meta['prov_url']
            prov_name = response.meta['prov_name']
            city_url = response.meta['city_url']
            city_code = response.meta['city_code']
            city_name = response.meta['city_name']
            county_url = response.meta['county_url']
            county_code = response.meta['county_code']
            county_name = response.meta['county_name']
            town_url = response.meta['town_url']
            town_code = response.meta['town_code']
            town_name = response.meta['town_name']
            village_url = village_url
            village_code = village_code
            village_name = village_name
            item["year"] = year
            item["year_url"] = year_url
            item["prov_url"] = prov_url
            item["prov_name"] = prov_name
            item["city_url"] = city_url
            item["city_code"] = city_code
            item["city_name"] = city_name
            item["county_url"] = county_url
            item["county_code"] = county_code
            item["county_name"] = county_name
            item["town_url"] = town_url
            item["town_code"] = town_code
            item["town_name"] = town_name
            item["village_url"] = village_url
            item["village_code"] = village_code
            item["village_name"] = village_name
            yield item
