# -*- coding: utf-8 -*-
from YX_crawler.props.proxy_config import url_parse_dict, target_url, time_out
from lxml import etree
from YX_crawler.DAO.proxies import proxies, save_proxy
from YX_crawler.props import properties
import threading
import requests
import datetime
import random
import time
import re

get_header = properties.get_header
craw_thread = None


def craw():
    for key, value in url_parse_dict.iteritems():
        if value.get('status') == 'active':
            # 网站名
            website_name = key
            # 网站url
            website_urls = value.get('url')
            # 请求方法
            method = value.get('request_method')
            # 请求需要提交的数据
            post_datas = value.get('submit_data')
            # ip_port是否在一起
            ip_port_together = value.get('ip_port_together')
            # parser = parse_func
            # 自定义头
            header = value.get('header')
            header['User-Agent'] = random.choice(properties.agent_parser())

            get_data = None
            get_data_param = ''
            parse_type = value.get('parse_type')
            if parse_type == 'xpath':
                get_data = get_data_by_xpath
                ip_address_xpath = value.get('parse_method').get('ip_address')
                ip_port_xpath = value.get('parse_method').get('ip_port')
                get_data_param = (ip_address_xpath, ip_port_xpath)
            elif parse_type == 're':
                get_data = get_data_by_re
                get_data_param = value.get('parse_method').get('_pattern')

            proxy_list = []
            for url in website_urls:
                try:
                    page = requests.get(url, headers=header, timeout=10).content.decode("utf-8", 'ignore')
                except Exception, e:
                    print e.message
                ip_list = get_data(page, get_data_param, ip_port_together)
                for index, value in enumerate(ip_list):
                    _ip = ip_list[index]
                    _id = _ip + '_' + target_url
                    proxy = proxies(_id)
                    proxy.source = website_name
                    proxy.ip = _ip
                    proxy.target_url = target_url
                    proxy.insert_time = datetime.datetime.now()
                    proxy_list.append(proxy)
                time.sleep(1)
            helper = check_save_proxy_thread_help(proxy_list, header)
            helper.check_save_thread(10)
    print 'end proxy_craw'


def get_data_by_xpath(html_page_source, xpath_func, ip_port_together):
    ip_list = []
    selector = etree.HTML(html_page_source)
    if ip_port_together:
        ip_xpath = xpath_func
        _ip_res = selector.xpath(ip_xpath)
        for i in range(len(_ip_res)):
            _ip = _ip_res[i]
            ip_list.append(_ip)
    else:
        ip_address_xpath = xpath_func[0]
        ip_port_xpath = xpath_func[1]
        ip_address = selector.xpath(ip_address_xpath)
        ip_port = selector.xpath(ip_port_xpath)
        for i in range(len(ip_address)):
            _ip = ip_address[i] + ':' + ip_port[i]
            ip_list.append(_ip)
    return ip_list


# re
def get_data_by_re(html_page_source, pattern, ip_port_together, flags=re.DOTALL):
    try:
        data = re.findall(pattern, html_page_source, flags=flags)
        ip_list = []
        for i in range(len(data)):
            if ip_port_together:
                _ip = data[i]
            else:
                ip_address = data[i][0]
                ip_port = data[i][1]
                _ip = ip_address + ':' + ip_port
            ip_list.append(_ip)
    except Exception, e:
        msg = 'Error msg: %s in [get_data_by_re]' % e
        print msg
    else:
        return ip_list


def get_craw_thread():
    global craw_thread
    craw_thread = threading.Thread(target=craw)
    return craw_thread


class check_save_proxy_thread_help:
    header = {}
    target_url = target_url
    ip = ''
    proxy_list = []
    timeout = time_out
    thread = []

    def __init__(self, plist, header):
        self.proxy_list = plist
        self.header = header

    def check_save_thread(self, thread_count):
        param_proxy_list = self.proxy_list
        task_count = len(param_proxy_list) / thread_count + 1
        for i in range(0, thread_count):
            param_list = param_proxy_list[i * task_count:(i + 1) * task_count]
            t = threading.Thread(target=self.check_save_proxy, args=(param_list,))
            self.thread.append(t)

        for i in range(0, thread_count):
            self.thread[i].start()
        for i in range(0, thread_count):
            self.thread[i].join()
        del self.thread[:]

    def check_save_proxy(self, proxies):
        for p in proxies:
            _ip = p.ip
            response_time = self.valid(_ip)
            if response_time:
                p.response_time = response_time
                save_proxy(p)

    def valid(self, _ip):
        proxy = {
            'http': 'http://%s' % _ip,
            'https': 'http://%s' % _ip
        }
        try:
            print "is checking ip:", _ip
            con = requests.get(self.target_url, headers=get_header(), proxies=proxy, timeout=self.timeout)
            if con.status_code == 200:
                print _ip + "can visit douban!"
                return con.elapsed.microseconds / 1000000.
            else:
                return None
        except Exception, e:
            # print e
            return None


