from re import S
import requests
from bs4 import BeautifulSoup
from faker import Factory
from sqlalchemy.orm import session
from do.ip_pool import IpPool
from util.StoreUtil import DBbase


class SpiderBase(DBbase):
    def __init__(self, url=""):
        super(SpiderBase, self).__init__()
        self.base_url = url
        self.base_header = {}
        self.ip_pool = []
        self.proxies_objs = []
        self.conf_list = []

    def __get_soup(self, url="", coding="utf8", timeout=5):
        if url == "":
            url = self.base_url
        try:
            resp = requests.get(url=url, headers=self.base_header.update(
                {"User-Agent": self.get_user_agent()}), timeout=timeout)
        except requests.exceptions.ConnectTimeout:
            raise RuntimeError("请求超时")
        except requests.exceptions.ConnectionError:
            raise RuntimeError("请求失败")
        resp.encoding = coding
        soup = BeautifulSoup(resp.text, "lxml")
        return soup

    def read_conf_excel(self):
        pass

    def read_conf_yaml(self, filename):

        pass

    def get_page_links(self, url="", text="?page=", num=10):
        rest = []
        if url == "":
            url = self.base_url
        for i in range(1, num+1):
            rest.append(url + text + str(i))
        return rest

    def get_soups_for_rule(self, url="", rule={}):
        #:param rule 规则字典
        #:param rule["name"]

        def is_child(r):
            return 'child' in r.keys() and type(r["child"]).__name__ == "dict" and r["child"] != {}

        # 获取soup
        if url == "":
            url = self.base_url
        soup = self.__get_soup(url)

        # 规则查询
        soup_obj = soup.find(rule.get("name", ""), rule.get("attrs", {}))
        # 子规则查询
        while is_child(rule):
            rule = rule["child"]
            # 子规则结束查询列表
            if 'end' in rule.keys():
                soup_objs = soup_obj.find_all(
                    rule["name"], rule.get("attrs", {}))

                # 子规则列表子项查询
                if 'child_one' in rule.keys():
                    rule = rule["child_one"]
                    rest = [soup_obj.find_all(
                            rule["name"], rule.get("attrs", {})) for soup_obj in soup_objs]
                    # TODO：子规则子项循环查询
                    # TODO: 子规则子项结束查询
                break
            # 子规则循环查询
            else:
                soup_obj = soup_obj.find(rule["name"], rule.get("attrs", {}))

        return rest

    def get_proxies_objs(self, url="", rule={}):
        print("正在爬取ip代理")
        for soup_obj in self.get_soups_for_rule(url=url, rule=rule):
            self.proxies_objs.append(
                {"http": soup_obj[0].text + ":" + soup_obj[1].text})

        return self.proxies_objs

    def filter_available_proxies(self, timeout=1, proxies_objs=[]):
        print("正在过滤可用代理")
        rest = []
        connect_timeout_num = 0
        proxy_error_num = 0
        read_timeout_num = 0

        if proxies_objs == []:
            proxies_objs = self.proxies_objs
        for proxies_obj in proxies_objs:
            try:
                resp = requests.get(url="http://www.baidu.com",
                                    timeout=timeout, proxies=proxies_obj)
                if resp.status_code == 200:
                    print(proxies_obj)
                    self.ip_pool.append(proxies_obj)
                else:
                    proxy_error_num = proxy_error_num + 1
            except requests.exceptions.ConnectTimeout:
                connect_timeout_num = connect_timeout_num + 1
            except requests.exceptions.ProxyError:
                proxy_error_num = proxy_error_num + 1
            except requests.exceptions.ConnectionError:
                proxy_error_num = proxy_error_num + 1
            except requests.exceptions.ReadTimeout:
                read_timeout_num = read_timeout_num + 1

        print("总数{},连接超时数{},代理错误数{},读取超时数{}".format(len(proxies_objs),
              connect_timeout_num, proxy_error_num, read_timeout_num))

        return rest

    def batch_get_proxies_objs(self):
        def vaild_conf(conf): return 'url' in conf.keys(
        ) and 'page_text' in conf.keys() and 'page_num' in conf.keys() and 'rule' in conf.keys()

        if self.conf_list != []:
            for conf in self.conf_list:
                if vaild_conf(conf):
                    for url in self.get_page_links(url=conf["url"], text=conf["page_text"], num=conf["page_num"]):
                        try:
                            print(url)
                            proxies_objs = demo.get_proxies_objs(
                                url=url, rule=conf["rule"])

                            self.filter_available_proxies(
                                proxies_objs=proxies_objs)
                            print(proxies_objs)
                        except RuntimeError:
                            print("请求超时")
                else:
                    print("配置")

    def get_user_agent(self):
        f = Factory.create()
        return f.user_agent()

    # 将db
    def read_db():
        pass

    def save_db(self):
        session = self.get_session()
        for proxies_obj in self.ip_pool:
            IpPool(address=proxies_obj["http"]).add(session)


class Demo(SpiderBase):
    def __init__(self, *args):
        super(Demo, self).__init__(*args)


if __name__ == "__main__":
    demo = Demo()
    demo.conf_list = [
        {"url": "https://ip.jiangxianli.com/", "page_text": "?page=", "page_num": 1, "rule": {"name": "tbody", "child": {"name": "tr", "end": 1, "child_one": {"name": "td"}}}}]
    demo.batch_get_proxies_objs()
    demo.save_db()
    demo.close_db()
