# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, FormRequest
from time import *
import datetime

class TestSpider(scrapy.Spider):
    name = 'test'
    allowed_domains = ['weibo.cn']
    # start_urls = ['http://weibo.cn/']
    base_url = "https://weibo.cn"
    search_url = 'https://weibo.cn/search/'

    topic_list = [
        # '#好物分享#',

        '#一琪美啊#',
    ]

    # 起始时间
    start_time = "20190120"
    end_time = "20200120"
    # 允许爬取的最大页码数
    max_page = 200

    def start_requests(self):
        url_format = "https://weibo.cn/search/mblog?hideSearchFrame=&keyword={}&advancedfilter=1&starttime={}&endtime={}&sort=time"
        # 搜索的关键词，可以修改
        for topic in self.topic_list:
            url = url_format.format(topic, self.start_time, self.end_time)
            for page in range(1,4+1):
                data = {
                    "advancedfilter": "1",
                    "keyword": topic,
                    "hasori": "1",
                    "starttime": self.start_time,
                    "endtime": self.end_time,
                    "sort": "time",
                    "smblog": "搜索",
                    'mp': "4",
                    'page': str(page),
                }
                yield FormRequest(self.search_url, callback=self.parse_index, formdata=data)
        # # 搜索的起始日期，自行修改   微博的创建日期是2009-08-16 也就是说不要采用这个日期更前面的日期了
        # date_start = datetime.datetime.strptime("2019-05-20", '%Y-%m-%d')
        # # 搜索的结束日期，自行修改
        # date_end = datetime.datetime.strptime("2019-06-20", '%Y-%m-%d')
        #
        # time_spread = datetime.timedelta(days=1)
        # while date_start < date_end:
        #     next_time = date_start + time_spread
        #     url = url_format.format(keyword, date_start.strftime("%Y%m%d"), next_time.strftime("%Y%m%d"))
        #     date_start = next_time
        #     yield Request(url, callback=self.parse_tweet, dont_filter=True)

    def parse_index(self, response):
        # print(response.body.decode())
        print(response.url)
        item = response.meta.get("item")
        weibo_list = response.xpath('//div[@class="c" and contains(@id, "M_")]')
        for weibo in weibo_list:
            detail_url = weibo.xpath('.//a[contains(., "评论[")]/@href').extract_first()  # 第一个参数是点，代表选择当前的文本
            print(detail_url)

"""
注意1：高级搜索，form表单必须这样提交！！！！
"""

'''源代码

class MakeupSpider(scrapy.Spider):
    name = 'MakeUp'
    allowed_domains = ['weibo.cn', 'm.weibo.cn',]

    topic_list = [
        # '#好物分享#',

        '#一琪美啊#',
    ]
    search_url = 'https://weibo.cn/search/'
    turn_url = 'https://weibo.cn/search/mblog'

    # 搜索的起始日期，自行修改   微博的创建日期是2009-08-16 也就是说不要采用这个日期更前面的日期了
    date_start = datetime.datetime.strptime("2019-01-20", '%Y-%m-%d')
    # 搜索的结束日期，自行修改
    date_end = datetime.datetime.strptime("2020-01-20", '%Y-%m-%d')

    time_spread = datetime.timedelta(days=1)

    # 允许爬取的最大页码数
    max_page = 200

    # 用户详情API（点击用户头像或者昵称进入的页面，其最上方显示的信息）
    user_url = 'https://m.weibo.cn/api/container/getIndex?uid={uid}&type=uid&value={uid}&containerid=100505{uid}'

 1.每个话题发送请求（原创+实时）
    def start_requests(self):
        for topic in self.topic_list:
            form_data = {
                "advancedfilter":"1",
                "keyword":topic,
                "hasori":"1",
                "starttime":self.start_time,
                "endtime":self.end_time,
                "sort":"time",
                "smblog":"搜索",
            }
            yield FormRequest(url=self.search_url, callback=self.parse_out, formdata=form_data, meta={"topic":topic})

    # 2.判断是否超过100页
    def parse_out(self,response):
        total_pieces = int(response.xpath('//span[@class="cmt"]/text()').re_first('共(\d*?)条'))
        print("长度", total_pieces)
        total_pages = int(response.xpath('//div[@id="pagelist"]/form/div/text()').re_first('\/(\d*?)页'))
        print("总页数", total_pages)
        item = {}
        item["topic"] = keyword = response.meta.get("topic")
        # 原创+实时
        url = '{url}?keyword={keyword}&advancedfilter=1&hasori=1&starttime={start_time}&endtime={end_time}&sort=time' \
            .format(url=self.turn_url, keyword=keyword, start_time=self.start_time, end_time=self.end_time)
        total_turn =  total_pages if total_pages < 100 and total_pieces<=1000 else self.max_page
        for page in range(1, total_turn + 1):
            data = {
                'mp': str(total_turn),
                'page': str(page),
            }
            yield FormRequest(url, callback=self.parse_index, formdata=data, meta={"item":item})
'''