# -*- coding: utf-8 -*-
# 导入包

from openpyxl import load_workbook
from openpyxl import Workbook
import requests
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
import codecs
import json
import re
import pymongo
import multiprocessing
import os
import urllib


class Create_crawl():
    def __init__(self):
        # http://www.cnblogs.com/pzxbc/articles/2269011.html
        self.set_Headers = {
            # 'Host':'weixin.sogou.com',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            # 'Cookie':'GOTO=Af21520; IPLOC=CN2102; SUID=0591C9B65F20940A00000000582D4D6A; SUV=00220B59B6C99105582D4D6C58B51259; SNUID=0017720DBBBEF62D2FA43D45BCF5027E; sct=75; CXID=CC7CA57537BEAD469DDB542E416F216F; pgv_pvi=9474459648; usid=9-GndK5t3p2fRL1n; LSTMV=293%2C403; LCLKINT=2066; ABTEST=0|1488505972|v1; weixinIndexVisited=1; accountTip=1; ppinf=5|1489051611|1490261211|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZTo4OkV4cGxvcmVyfGNydDoxMDoxNDg5MDUxNjExfHJlZm5pY2s6ODpFeHBsb3Jlcnx1c2VyaWQ6NDQ6MjIyNzNGMjUxNUM0RDIyNEJGODBEMzVEM0NDOEQwNDdAcXEuc29odS5jb218; pprdig=ucxfzHS0DF8oOy5poBj3e4YVu3MuOn0EGg77GGuykB0vPkcfZT20lyJtavJD9CcKFd0Pc8BodO5URhmE36BVGgq7i0kuKNYnAjpAgOyAzSn_4dg_ml6cg0KYrzpmA61sv9SxvVre4RP1jJBaIG1nQeo7rDwy4tbPqh50_torK64; sgid=; ad=kyllllllll2YeKarlllllV0VDm7lllllH7u7mZllll9lllllRqxlw@@@@@@@@@@@; ppmdig=14893919520000003cdcf5eea4e07ee605b876bdb32a273a; JSESSIONID=aaaxo8x-OvDT7hBEaFoQv; XXTZ_WEB=yes; wapsogou_qq_nickname=; PHPSESSID=i9kdgtu24b0srk04uh18olgjl2; SUIR=0017720DBBBEF62D2FA43D45BCF5027E',
            'Connection': 'keep-alive'
        }
        self.set_HeadersAndroid_QQBrowser = {
            'User-Agent': 'MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive'
        }
        self.set_HeadersAndroid_UCBrowser = {
            'User-Agent': 'JUC (Linux; U; 2.3.7; zh-cn; MB200; 320*480) UCWEB7.9.3.103/139/999',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive'
        }
        self.set_Headers_iPhone = {
            'User-Agent': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive'
        }
        self.set_Headers_iPad = {
            'User-Agent': 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive'
        }
    # Phantomjs+BeautifulSoup 处理响应
    def get_pha_Bs4_Headers(self, url, headers=None):
        """
        :param url: 网址
        :param headers: 请求头
        :return: soup对象
        """
        driver = webdriver.PhantomJS()
        desired_capabilities = DesiredCapabilities.PHANTOMJS.copy()
        for key, value in headers.iteritems():
            desired_capabilities['phantomjs.page.customHeaders.{}'.format(key)] = value
        driver = webdriver.PhantomJS(desired_capabilities=desired_capabilities)
        driver.get(url)
        soup = BeautifulSoup(driver.page_source, 'html.parser')
        return soup

    def get_pha_Bs4(self, url):
        """
        :param url: 网址
        :return: soup对象
        """
        driver = webdriver.PhantomJS()
        driver.get(url)
        soup = BeautifulSoup(driver.page_source, 'html.parser')
        return soup

        # Requests+BeautifulSoup 处理响应

    def get_req_Bs4(self, url, headers=None,encode='utf-8'):
        """
        :param url: 网址
        :param headers 请求头
        :return: soup对象
        """
        response = requests.get(url, headers=headers)
        response.encoding = encode
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup
    def post_req_Bs4(self, url, headers=None,encode='utf-8',data=None):
        """
        :param url: 网址
        :param headers 请求头
        :return: soup对象
        """
        response = requests.post(url=url,data = data)
        response.encoding = encode
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup

    def get_req_session(self, url, header):
        """
        :param url: 网址
        :param header: 请求头
        :return: soup对象
        """
        session = requests.session()
        response = session.get(url, headers=header)
        response.encoding='utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup

    def get_req_json(self, url):
        data = requests.get(url)
        datas = json.loads(data.text)
        return datas

    def post_req_json(self, url, postdata, file):
        response = requests.post(url, data=postdata, files=file)
        datas = json.loads(response.text)
        return datas

    ###################处理BeautifulSoup结果###############################
    def soup_find_one(self, soup, tag, cla=None, id=None, text=None):
        """
        :param soup: 父节点对象
        :param tag:目标节点标签
        :param cla: class匹配
        :param id: id匹配
        :param text: 文本内容匹配
        :return:目标节点对象
        """
        if not text:
            result = soup.find(tag, class_=cla, id=id)
        else:
            result = soup.find(tag, class_=cla, id=id, text=re.compile(text))
        return result

    def soup_find_all(self, soup, tag, cla=None, id=None, text=None):
        """
        :param soup: 父节点对象
        :param tag: 目标节点标签
        :param cla: class匹配
        :param id: id匹配
        :param text: 文本内容匹配
        :return: 目标节点对象集
        """
        if not text:
            result_list = soup.find_all(tag, class_=cla, id=id)
        else:
            result_list = soup.find_all(tag, class_=cla, id=id, text=re.compile(text))
        return result_list

        # 提取文本

    def text(self, object):
        """
        :param object: soup对象
        :return: 文本
        """
        try:
            result_text = object.get_text()
            return result_text
        except:
            return 'NULL'

        # 提去数字

    def number_pro(self, text):
        text_list = re.findall(r'\d+', text)

    def pymongo_insert(self, data, database, collection, host='localhost', prot='27017', **kwargs):
        client = pymongo.MongoClient(host=host, prot=prot)
        database = client[database]
        collection = database[collection]
        collection.insert(data)
        print u'添加到数据库'

    def pool_pro(self, func, processes, **kwargs):
        pool = multiprocessing.Pool(multiprocessing.cpu_count())
        for process in processes:
            pool.apply_async(func, (processes, kwargs))
        pool.close()
        pool.join()

    def saveImg(self, imgurl, filename, number=0, **kwargs):

        # 判断路径是否存在+创建路径
        path = filename.strip()
        isExists = os.path.exists(filename)
        if not isExists:
            print 'create new folder'
            os.makedirs(path)
        obj = urllib.urlopen(imgurl)
        data = obj.read()
        filename = path + '/' + str(number) + '.jpeg'
        f = open(filename, 'wb')
        f.write(data)
        f.close()
        print u'保存 ' + filename + u' 完毕'

    # 读取excel
    def excel_read(self, filename, sheetname):
        """
        :param filename: 文件名
        :param sheetname: 工作表名
        :return: list
        """
        wb = load_workbook(filename=filename, read_only=True)
        ws = wb[sheetname]
        xlsx_list = []
        try:
            for row in ws.rows:
                for cell in row:
                    xlsx_list.append(cell.value)
            return xlsx_list
        except Exception, e:
            print u'错误提示', e

    # 保存excel
    def excel_save(self, filename, list):
        """
        :param filename: 文件名
        :param list: 存储数组
        :return:提示
        """
        success = ''
        try:
            wb = Workbook(write_only=True)
            ws = wb.create_sheet()
            for irow in list:
                for cell in irow:
                    ws.append(cell)
            wb.save(filename=filename)
            success = u'保存成功'
        except Exception, e:
            print u'错误提示', e
        return success

    def errorULR(self, url):
        file = codecs.open('error.text', 'a', 'utf-8')
        file.write(url + '\n')
