# -*- coding: utf-8 -*-
#引入类库
import csv
import scrapy
from lxml import html as htm

# 模拟浏览器
user_agent = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"

#定义scrapy爬虫
class LifeSpider(scrapy.Spider):
    name = 'life'

    def start_requests(self):
        #生成需要爬取的连接列表
        urls = []
        for i in range(1,26):
            urls.append('https://guangzhou.douban.com/events/future-1401?start={}'.format(i*10-10))

        #csv标题
        data = [['title','start_time','end_time','address','fee','join','like']]

        #打开文件句柄
        file = open('tongcheng.csv','w',encoding ='utf-8-sig',newline="")

        #实例化csv对象
        csv_writer = csv.writer(file)

        #生成csv表头
        csv_writer.writerows(data)

        #关闭文件流
        file.close()

        for url in urls:
            yield scrapy.Request(url=url,headers={"User-Agent": user_agent},callback=self.parse)

    #保存数据
    def save(self,data):
        file = open('tongcheng.csv','a+',encoding='utf-8-sig', newline="")

        csv_writer = csv.writer(file)

        csv_writer.writerows(data)

        file.close()

    #请求回调，解析处理请求的网页
    def parse(self, response):
        #解析html页面
        html = htm.fromstring(response.body)

        # print(response.body)

        #解析获取页面中li元素（包含电影信息的元素）
        li_list = html.xpath('//div[@id="db-events-list"]/ul/li')

        # print(li_list)

        #保存列表数据
        data = []

        #遍历数据,抽取信息
        for li in li_list:
            title = li.xpath('./div[@class="info"]/div[@class="title"]/a/@title')[0]
            start_time = li.xpath('./div[@class="info"]/ul[@class="event-meta"]/li[1]/time[1]/@datetime')[0]
            end_time = li.xpath('./div[@class="info"]/ul[@class="event-meta"]/li[1]/time[2]/@datetime')[0]
            address = li.xpath('./div[@class="info"]/ul[@class="event-meta"]/li[2]/@title')[0]
            fee = li.xpath('./div[@class="info"]/ul[@class="event-meta"]/li[3]/strong/text()')[0]
            # owner = li.xpath('./div[@class="info"]/ul[@class="event-meta"]/li[4]/a/text()')[0]
            join = li.xpath('./div[@class="info"]/p/span[1]/text()')[0].replace("人参加","")
            like = li.xpath('./div[@class="info"]/p/span[3]/text()')[0].replace("人感兴趣","")

            row = [title,start_time,end_time,address,fee,join,like]
            print(row)
            data.append(row)

        #保存数据
        self.save(data)

