# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import logging
import time

import requests
from scrapy.conf import settings


class SiteAnalysisPipeline(object):

    def __init__(self):
        pass

    def process_item(self, item, spider):
        return item


class MongoPipeline(object):
    pie_data = {
        'legendData': [],
        'seriesData': []
    }
    bar_data = {
        'name': '',
        'namelist': [],
        'datalist': []
    }

    def __init__(self):
        self.saveurl = settings["SAVAAPI"]


    def close_spider(self, spider):
        # 插入数据
        u = spider.allowed_domains[0]
        t = time.time()
        # pie
        self.pie_data["url"] = u
        self.pie_data["date"] = t

        # tree 第一第二个重复
        spider.tree_data["url"] = u
        spider.tree_data["date"] = t
        # bar
        self.bar_data["url"] = u
        self.bar_data["date"] = t

        url = self.saveurl

        try:
            d = json.dumps({
                'pie': self.pie_data,
                'bar': self.bar_data,
                'tree': spider.tree_data
            })
            requests.post(url, data=d)
        except Exception as e:
            spider.logger.info(e)

    def process_item(self, item, spider):

        # 页面大小 饼状图
        self.pie_data.get("legendData").append(item.get('title'))
        im = {
            "name": item.get('title'),
            "value": item.get("page_size")
        }
        self.pie_data["seriesData"].append(im)

        # 柱状图
        self.bar_data.get("namelist").append(item.get('title'))
        self.bar_data.get("datalist").append(item.get('download_time'))
        return item
