# coding=utf-8
import os
import random

import requests
from bs4 import BeautifulSoup

"""描述：

通过爬取西刺免费代理IP网站，获取代理服务器列表，并编写接口供商品爬虫调用
"""


class Proxies:

    def __init__(self):
        self.filename = "H:\\project\\python\\spider\\spider\\proxy_list.txt"
        self.proxies_list = self.get_ip_list()

    def get_html(self, url):
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Host': 'www.xicidaili.com',
            'If-None-Match': 'W/"bdf8360c9f3c3c1f64ea412a211cd6b5"',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/68.0.3440.106 Safari/537.36 '
        }
        cookies = {
            "_free_proxy_session": "BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWQ1NGJlZDk0MTI3N2Q0NjFhMTIzYWZhMWY5MWU"
                                   "0MDRhBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMXdXbmRMWUhzK2oyRVRRTTdpanRBRGVzWnBOSEh0"
                                   "S2RyVU9uUEdUQXdEN0U9BjsARg%3D%3D--18a3ec30fc929b0861d11671c7b55cf764ae4f03",
            "Hm_lvt_0cf76c77469e965d2957f0553e6ecf59": "1544508603,1544508996",
            "Hm_lpvt_0cf76c77469e965d2957f0553e6ecf59": "1544509532"

        }
        proxy_html = requests.get(url, headers=headers, cookies=cookies).content.decode("utf-8")
        return proxy_html

    def parse_proxy(self, proxy_html):
        bs = BeautifulSoup(proxy_html, "html.parser")
        print(bs.title)
        ips = bs.find_all('tr')
        ip_list = []
        test_url = "https://www.xicidaili.com/nn/"
        # f = open(self.filename, "w")

        for i in range(2, len(ips)):
            ip_info = ips[i]
            tds = ip_info.find_all('td')
            if len(tds) != 0:
                ip = tds[1].text + ":" + tds[2].text
                proxy_host = "http://" + ip
                proxy_test = {'http': proxy_host}

                try:
                    res = requests.get(test_url, proxies=proxy_test).status_code
                except:
                    print(res)
                    continue
                else:
                    # f.write(ip + '\n')
                    ip_list.append(ip)
        # print(ip_list)
        # f.close()
        return ip_list

    def get_ip_list(self):
        with open(self.filename) as f:
            lines = f.readlines()
        for i in range(len(lines)):
            lines[i] = lines[i].rstrip('\n')
        return lines

    def get_random_ip(self):
        return {'http:': 'http://' + random.choice(self.proxies_list)}

    def spider(self):
        pass

    def update_ip_list(self):
        with open(self.filename, 'r') as f:
            ip_list = f.readlines()
        new_ip_list = []
        for ip in ip_list:
            proxy = {'http': 'http://' + ip.rstrip()}

            try:
                res = requests.get(url="http://httpbin.org/ip", proxies=proxy)
            except:
                print(proxy, "已经失效")
            else:
                print(proxy, "可用", res)
                with open('proxies.txt', 'a') as write_f:
                    write_f.write(ip)
                new_ip_list.append(ip)

        print(len(new_ip_list))


if __name__ == '__main__':
    # url = "https://www.xicidaili.com/nn/{}"
    test = Proxies()
    # ips = []
    # for i in range(10):
    #     html = test.get_html(url.format(i + 1))
    #     ip_list = test.parse_proxy(html)
    #
    #     print(ip_list)
    #     ips = ips + ip_list
    # print(len(ips))
    test.update_ip_list()

    # with open(test.filename, 'w') as f:
    #     for ip in ips:
    #         f.write(ip + '\n')
    # proxy = Proxies()
    # print(proxy.get_random_ip())
