#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Create by Albert_Chen
# CopyRight (py) 2017年 陈超. All rights reserved by Chao.Chen.
# Create on 2017-01-22

from __future__ import absolute_import

import sys
sys.path.append('..')
import re
import time
import requests
from  urlparse import urljoin
from BeautifulSoup import BeautifulSoup
from celery.utils.log import get_task_logger

logger = get_task_logger(__name__)

class WebCheck():

    def __init__(self, url):
        self.url = url
        self.resp = requests.get(url)
        self.soup = self.make_soup(self.resp)

    @staticmethod
    def make_soup(resp):
        if resp.status_code == 200:
            try:
                if resp.encoding != 'utf-8':
                    resp.encoding = 'utf-8'
                soup = BeautifulSoup(resp.text)
                return soup
            except Exception, e:
                logger.error("Beautiful soup parse error %s" % str(e))
                return None
        else:
            return None

    def three_elements(self):
        """
        获得网站的三要素
        :return: title, keywords, description
        """
        title = self.get_title()
        keywords = self.get_keywords()
        desc = self.get_description()

        return title, keywords, desc

    def get_keywords(self):
        """
        获得网站三要素keywords
        :return: keywords
        """
        if self.soup:
            keywords = self.soup.findAll(attrs={"name": re.compile(r'keyword[s]?', re.I)})
            if keywords:
                return keywords[0]['content']
        else:
            return ''

    def get_title(self):
        """
        获得网站的三要素title
        :return: title
        """
        if self.soup:
            return self.soup.title.text
        else:
            return ''

    def get_description(self):
        """
        获得网站三要素 description
        :return: description
        """
        if self.soup:
            desc = self.soup.findAll(attrs={"name": re.compile(r'description', re.I)})
            if desc:
                return desc[0]['content']
        else:
            return ''

    def calculate_avg_time(self, times=3, timeout=10):
        """
        计算平均加载网页的时间
        :param times:
        :return: loading average time
        """
        start = time.time()
        for i in xrange(times):
            try:
                requests.get(self.url, timeout=timeout)
            except Exception,e:
                logger.error(str(e))
                return 'Loading url: %s out of %d secs' % (self.url, timeout)
        end = time.time()
        delta = end - start
        avg_time = delta/float(times)
        return 'Loading url: %s %d*time use %5.3f secs' % (self.url, times, avg_time)

    def get_children_urls(self):

        """
        获得当前加载页面的所有href
        :return:
        """
        def is_href(href):
            js_compiler = re.compile(r'javascript:', re.I)
            is_http = re.compile(r'^http://')
            is_slash = re.compile(r'^/')
            if href and  not (js_compiler.search(href) or is_http.search(href) or is_slash.match(href)):
                return href

        if self.soup:
            urls = self.soup.findAll('a', href=is_href)
            for url in urls:
                yield urljoin(self.url, url['href'])

    @classmethod
    def check_response(cls, url):
        """
        返回url检查的结果，用来检查是是否是404.html 或者其他东西，这一部分需要做进一步优化的
        :param url:
        :return: status_code
        :return: is_redirect
        :return: url
        """
        resp = requests.get(url)
        if resp.status_code in (200, 403, 301) and resp.is_redirect:
            if "/404.html"  in resp.url:

                return 'loading url: %s, status code is: %s redirect url is 404.html' % (url, str(resp.status_code))
            else:
                return 'loading url: {url}, status code is: {code} redirect url is {r_url}'.format(
                    url=url,
                    code=resp.status_code,
                    r_url=resp.url
                )
        else:
            return 'loading url: %s, status code is %s no redirect' % (url, resp.status_code)

    def __repr__(self):
        return '%r @ url: %s' % (self.__class__, self.url)


def result_generator(data):

    for item in data:
        item_title = item.get('title')
        item_keywords = item.get('keywords')
        item_description = item.get('description')
        url = item.get('url')

        website_entity = WebCheck(url)
        title, keywords, description = website_entity.three_elements()
        result = ''
        if item_title and (not item_title == title):
            result += 'title error; '

        if item_keywords and not item_keywords == keywords:
            result += 'keywords error; '

        if item_description and not item_description == description:
            result += 'desc error; '
        yield {'url': url, 'result': result}

if __name__ == "__main__":
    pass

