#!/usr/bin/python
# -*- coding:utf-8 -*-

import json
import logging
import re
from collections import Iterable
from datetime import datetime

import scrapy
from scrapy import signals
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.xlib.pydispatch import dispatcher

from site_analysis.items import SiteAnalysisItem

try:
    # Python2
    from urlparse import urlparse
    from urlparse import urljoin
except ImportError:
    # Python3
    from urllib.parse import urljoin
    from urllib.parse import urlparse


class siteSpider(scrapy.spiders.Spider):
    name = "siteSpider"
    download_delay = 1
    allowed_domains = []
    start_urls = []
    depth_limit = 10
    all_urls = []
    scheduled = 0
    received = 0
    this_url = ""
    tree_data = {}

    def __init__(self, s_url=None, depth=None, *args, **kwargs):
        super(siteSpider, self).__init__(*args, **kwargs)
        if s_url:
            self.allowed_domains.append(
                '{uri.netloc}'.format(uri=urlparse(s_url)))
            # 初始化start_urls
            self.start_urls.append(s_url)
            # 初始化爬取深度
            self.depth_limit = depth
            if self.depth_limit is None:
                self.depth_limit = 10
            self.logger.info("spider depth_limit: " + str(self.depth_limit))
            # 初始化tree_data
            self.tree_data['name'] = self.start_urls[0]
            self.tree_data["children"] = []

        dispatcher.connect(self.spider_request_scheduled,
                           signals.request_scheduled)

        dispatcher.connect(self.spider_response_received,
                           signals.response_received)

    # 当引擎调度一个 Request 对象用于下载时，该信号被发送。
    def spider_request_scheduled(self):
        self.scheduled = datetime.now()

    # 当引擎从downloader获取到一个新的 Response 时发送该信号。
    def spider_response_received(self):
        self.received = datetime.now()
        self.download_time = (self.received - self.scheduled).total_seconds()

    def parse(self, response):
        # 设置(获取页面等级,父对象)
        rank = 0
        father = {}
        try:
            rank = response.meta["rank"]
        except Exception as e:
            rank = 0
        try:
            father = response.meta["node"]
        except Exception as e:
            if(response.url == self.start_urls[0]):
                father = self.tree_data
            else:
                father = {
                    "name": response.url,
                    "children": []
                }

        self.all_urls.append(response.url)
        self.this_url = response.url
        href_a = None
        try:
            href_a = response.xpath("//a[@href]/@href")
            #
            title = response.xpath("/html/head/title/text()").extract()[0]
        except Exception as e:
            self.logger.error(e)
            # 文件页面截取连接最后一段文件名
            title = self.this_url.split('/')[-1]

        item = SiteAnalysisItem()
        url = response.url
        length = len(response.body) / 1024.00
        # 标题
        item["title"] = title
        # 连接
        item["link"] = url
        # 下载时间
        item["download_time"] = self.download_time
        # 页面大小
        item["page_size"] = length
        # 页面等级(用于确定页面父子关系)
        item["rank"] = rank
        # 解决文件无 连接(a标签) 导致异常
        if isinstance(href_a, Iterable):
            thisNode = {
                "name": title,
                "url": url,
                "children": []
            }

            father["children"].append(thisNode)

            for a in href_a:
                # 转化为字符串
                a = urljoin(response.url, a.extract())
                # 跳过重复连接
                if a in self.all_urls:
                    continue

                # 限制爬取深度
                if rank + 1 <= self.depth_limit:
                    yield Request(a, meta={"rank": rank + 1, "node": thisNode}, callback=self.parse)
        else:
            father["children"].append({
                "name": title,
                "url": url,
                "value": length
            })
        yield item
