#!/usr/bin/env python3 
# -*- coding: utf-8 -*- 
# @Author : Leo
# @File : tianjingov.py

import re
import scrapy
from urllib import parse
from scrapy.http import Request
from scrapy.http import FormRequest
from scrapy.selector import Selector
from TianjinGovScrapy.items import TianjingovItem


class AnnouncementSpider(scrapy.Spider):
    name = 'tianjingov'
    allowed_domains = ['gov.cn']
    list_url = 'http://www.tjgp.gov.cn/portal/topicView.do'
    content_base_url = 'http://www.tjgp.gov.cn/portal/documentView.do'
    form_data = {
        'method': 'view',
        'page': '1',
        # 'id': '1665',  # id=1665为市级，id=1664为区级
        'step': '1',
        'view': 'Infor',
        'st': '1'
    }

    def start_requests(self):
        for each_cate in {'1665', '1664'}:
            self.form_data['id'] = each_cate
            # yield Request(url=self.list_url, callback=self.parse, method='POST')
            yield FormRequest(url=self.list_url,
                              callback=self.parse,
                              formdata=self.form_data,
                              meta={'area_id': each_cate})

    def parse(self, response):
        selector = Selector(response)
        max_page_num = selector.xpath('//span[@class="countPage"]/b[1]/text()').extract_first()
        for page in range(1, int(max_page_num) + 1):
            self.form_data['page'] = str(page)
            yield FormRequest(url=self.list_url,
                              callback=self.get_news_list,
                              formdata=self.form_data,
                              meta={'area_id': response.meta['area_id']})

    def get_news_list(self, response):
        selector = Selector(response)
        news_items_tags = selector.xpath('//ul[@class="dataList"]/li')
        for each_news in news_items_tags:
            news_title = each_news.xpath('./a/@title').extract_first()
            news_date = each_news.xpath('./span[@class="time"]/text()').extract_first()
            news_url = self.content_base_url + '?method=view&' + each_news.xpath('./a/@href').extract_first().split('?')[-1]
            news_id = parse.parse_qs(news_url.split('?')[-1]).get('id')[0]
            yield Request(url=news_url,
                          callback=self.get_news_content,
                          meta={'news_title': news_title, 'news_date': news_date, 'news_id': news_id, 'area_id': response.meta['area_id']})

    def get_news_content(self, response):
        selector = Selector(response)
        # news_content = selector.xpath('//body').extract_first()
        news_item = TianjingovItem()
        news_item['news_id'] = response.meta['news_id']
        news_item['news_title'] = response.meta['news_title']
        news_item['news_date'] = response.meta['news_date']
        news_item['news_url'] = response.url
        news_item['news_content'] = selector.xpath('//body').extract_first().replace('\xa0', '')
        news_item['area_id'] = response.meta['area_id']
        # print(news_item)
        return news_item
