# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html

import re
import time
from datetime import datetime
from meizhuang.items import *
import pymongo

class WeiboPipeline(object):
    def parse_item(self, date_time):
        if re.match('\d+月\d+日',date_time): #从开头处开始匹配
            date_time = time.strftime('%Y{y}', time.localtime()).format(y='年') + date_time
            array = time.strptime(date_time, u"%Y年%m月%d日 %H:%M")
            date_time = time.strftime("%Y-%m-%d %H:%M:%S", array)
        # 下面两个语句是没用的
        elif re.match('\d+分钟前', date_time):
            minute = re.match('(\d+)', date_time).group(1)
            date_time = time.strftime('%Y{y}%m{m}%d{d} %H:%M', time.localtime(time.time() - float(minute)*60)).format(y='年', m='月', d='日')
        elif re.match('今天.*', date_time):
            date_time = re.match('今天(.*)', date_time).group(1).strip()
            # 错：date_time = time.strftime('%Y年%m月%d日', time.localtime()) + ' ' + date_time
            date_time = time.strftime('%Y{y}%m{m}%d{d}', time.localtime()).format(y='年', m='月', d='日') + ' ' + date_time

        # 判断是否是周末
        apply_week = datetime.strptime(date_time.split(" ")[0], "%Y-%m-%d").weekday() + 1
        is_weekend = 1 if apply_week>5 else 0

        return date_time, is_weekend

    def process_item(self, item, spider):
        if isinstance(item, WeiboItem):
            # 处理发布时间
            if item.get('posted_at'):
                item['posted_at'] = item['posted_at'].strip()
                item['posted_at'], item["is_weekend"] = self.parse_item(item.get('posted_at'))
        return item

# 添加爬取时间
class TimePipeline():
    def process_item(self, item, spider):
        if isinstance(item, WeiboItem):
            now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
            item['crawled_at'] = now
        return item

# 数据存入MongoDB
class MongoPipeline():
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, clawer):
        return cls(
            mongo_uri = clawer.settings.get('MONGO_URI'),
            mongo_db = clawer.settings.get('MONGO_DATABASE'),
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]
        # 获取当前数据库下的所有collection名称
        collection_list = self.db.list_collection_names()
        # 为Item都添加索引,索引的字段是wid
        for collection in collection_list:
            self.db[collection].create_index([('wid', pymongo.ASCENDING)])

    def close_spider(self, spider):
        self.client.close()

    def process_item(self, item, spider):
        if isinstance(item, WeiboItem):
            # 根据话题存入不同的collection集合
            collection = re.search('#(.*?)#', item["topic"]).group(1)
            self.db[collection].update({'wid': item.get('wid')}, {'$set': item}, True)
        return item

