# -*- coding: utf-8 -*-
import os
import re
import sys
import json
import requests
import hashlib
import threading
import subprocess
import pymysql
import random
import time
import uuid
import requests
from lxml import etree
from requests import request
# from retrying import retry
from threading import Thread
from queue import Queue
from requests.exceptions import *
from datetime import datetime
# from pymongo import ReturnDocument
# from pymongo import InsertOne, UpdateOne
from abc import ABCMeta, abstractmethod

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)

from conf.database import DATABASE
from conf.get_ip import get_proxy_ip, judge_ip_time  # 获取IP，判断IP是否过期
from conf.dber import MongoDBer
from conf.conn_pool import get_connection


class BaseSpider(metaclass=ABCMeta):
    """爬虫基类"""

    def __init__(self, thread_num=3):
        # self.conn = get_connection(DATABASE[db_name])  # mysql连接池

        self.headers = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36",
        }
        self.dir_path = self._need_dir('log')  # 初始日志目录
        self.proxy_flag = False
        self.thread_num = int(thread_num)
        self.proxy_que = Queue(maxsize=500)  # 代理IP的队列
        self.proxy_set = set()  # 代理IP失效去重
        self.unique_id = uuid.getnode()  # 获取机器的唯一识别符号
        self.thread_list = []
        self.re_rule = re.compile("[\u4e00-\u9fa5a-zA-Z0-9]")

    def gener_file(self, file_name):
        f = open(os.path.join(self.dir_path, file_name), 'a+', encoding='utf-8')
        return f

    def read_gener_file(self, file_name):
        f = open(os.path.join(self.dir_path, file_name), 'r', encoding='utf-8')
        return f

    def _need_dir(self, path_name):
        dir_path = os.path.join(BASE_DIR, path_name)
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        return dir_path

    def send_rquest_get(self, url, headers=None, proxy_flag=False):
        """发送get请求"""
        if not headers:
            headers = self.headers
        count = 0
        while True:
            try:
                if self.proxy_flag:
                    proxies_dict = self.get_one_proxy()
                    res = request(method='get', url=url, headers=headers, timeout=30, verify=False,
                                  proxies=proxies_dict)
                else:
                    res = request(method='get', url=url, headers=headers, timeout=30, verify=False)
                if res.status_code != 200:
                    count += 1
                    print(f"重新发起请求第{count}次,{res.url}")
                    if count >= 1:
                        raise Exception('多次请求状态码异常')
                    else:
                        time.sleep(1)
                        continue
                try:
                    html_str = res.content.decode(encoding='utf-8')
                except Exception as err:
                    try:
                        html_str = res.content.decode(encoding='GBK')
                    except Exception as err:
                        try:
                            html_str = res.content.decode(encoding='GB2312')
                        except Exception as err:
                            html_str = res.content.decode(encoding='GB18030')
            except (ProxyError, SSLError, HTTPError, TooManyRedirects, ConnectionError, Timeout):
                count += 1
                if count <= 20:
                    continue
                else:
                    raise Exception('more request Exception unknow')
            count += 1
            if count >= 20:
                raise Exception('more request Exception unknow')

            if "该IP被禁用一段时间" in html_str:
                print('..........IP被封!!!.......... %s ' % datetime.now())
                self.proxy_set.add(proxies_dict['https'])
                if len(self.proxy_set) > 50:
                    self.proxy_set.clear()
                count += 1
                if count <= 15:
                    continue
            return html_str

    def get_myself_ip(self):
        try:
            res = requests.get(url="http://httpbin.org/ip", headers={
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36",
                'Connection': 'keep-alive'
                # "Host": "icanhazip.com"
            }, timeout=5)
            res.encoding = res.apparent_encoding
            res = json.loads(res.text)
            return res["origin"]
        except:
            return "0.0.0.0"

    def judge_coll_exits2(self, db_name, db_m):
        """
        判断集合是否存在///该条数据是否存在
        """
        collist = db_m.list_collection_names()
        if db_name in collist:
            return False
        else:
            return True

    def send_rquest_post(self, url, data, headers=None, proxy_flag=True):
        """发送post请求"""
        if not headers:
            headers = self.headers
        count = 0
        while True:
            try:
                if self.proxy_flag:
                    proxies_dict = self.get_one_proxy()
                    res = request(method='post', url=url, data=data, headers=headers, timeout=30, verify=False,
                                  proxies=proxies_dict)
                else:
                    res = request(method='post', url=url, data=data, headers=headers, timeout=30, verify=False)
                if res.status_code != 200:
                    count += 1
                    if count >= 2:
                        raise Exception('多次请求状态码异常, url是%s' % url)
                    else:
                        time.sleep(5)
                        continue
                try:
                    html_str = res.content.decode(encoding='utf-8')
                except Exception as err:
                    try:
                        html_str = res.content.decode(encoding='GBK')
                    except Exception as err:
                        try:
                            html_str = res.content.decode(encoding='GB2312')
                        except Exception as err:
                            html_str = res.content.decode(encoding='GB18030')
            except (ProxyError, SSLError, HTTPError, TooManyRedirects, ConnectionError, Timeout):
                count += 1
                if count <= 20:
                    continue
                else:
                    raise Exception('more request Exception unknow')
            count += 1
            if count >= 20:
                raise Exception('more request Exception unknow')

            if "该IP被禁用一段时间" in html_str:
                print('..........IP被封!!!.......... %s ' % datetime.now())
                self.proxy_set.add(proxies_dict['https'])
                if len(self.proxy_set) > 50:
                    self.proxy_set.clear()
                count += 1
                if count <= 15:
                    continue
            return html_str

    def get_one_proxy(self):
        """ 从队列中拿到一个有效的代理 """
        while True:
            if self.finished:
                return
            origin_ip_dict = self.proxy_que.get()
            ip = origin_ip_dict['data'][0]['ip']
            port = origin_ip_dict['data'][0]['port']
            one_proxies_dict = {
                "https": "https://%s:%s" % (ip, port),
                "http": "http://%s:%s" % (ip, port)
            }
            if judge_ip_time(origin_ip_dict):  # 过期为True， 否则为False
                self.proxy_que.task_done()
                print('代理过期。。。。。')
                continue
            if one_proxies_dict["https"] not in self.proxy_set:
                self.proxy_que.task_done()
                print('获取代理成功::: %s ' % datetime.now())
                return one_proxies_dict
            self.proxy_que.task_done()

    def product_ip_proxy(self, flag=False):
        """ 生产代理IP的线程 """
        if not flag:
            while True:
                try:
                    if self.proxy_que.qsize():
                        continue
                    poxy_dict = get_proxy_ip()
                    if not poxy_dict:
                        print('获取代理多次为空..........')
                        continue
                    if self.finished:
                        break
                    print('change IP success ..........')
                    for i in range(self.proxy_que.maxsize):
                        self.proxy_que.put(poxy_dict)
                except KeyboardInterrupt:
                    break
                except Exception as err:
                    print('生产代理异常。。。。')
        else:
            poxy_dict = get_proxy_ip()
            if poxy_dict:
                print('change IP success ..........')
                for i in range(self.proxy_que.maxsize):
                    self.proxy_que.put(poxy_dict)
            else:
                self.product_ip_proxy(flag)

    @abstractmethod
    def run_thread_list(self):
        self.thread_name_list = []
        self.more_thread_name_list = []

    def run(self):
        """
        parameter:
            thread_name_list   # 开启单线程的任务列表
            more_thread_name_list   # 开启多线程列表
        """
        if self.proxy_flag:
            proxy_t = Thread(target=self.product_ip_proxy)
            self.thread_list.append(proxy_t)

        self.run_thread_list()  # 把各个线程添加到线程列表中
        for t in self.thread_name_list:
            self.thread_list.append(Thread(target=t))

        start_time = datetime.now()
        for t in self.more_thread_name_list:
            for j in range(self.thread_num):
                self.thread_list.append(Thread(target=t))  # 获取项目的详细信息

        for t in self.thread_list:
            t.setDaemon(True)
            t.start()
        for t in self.thread_list:
            t.join()
        end_time = datetime.now()
        print('采集开始时间：：：%s' % start_time)
        print('采集结束时间：：：%s' % end_time)
