# -*- coding:utf-8 -*-

#No.001
#chromesite 意义：在同一脚本中开启不同属性设置的chromedriver 在爬虫遇到阻碍时自动切换参数
#chromesite 参数：
# useragent-
#ip-ip代理 默认为空 格式为str “http*://IP：port”
#headless-可视化页面 默认为True 格式为bool
#imge-显示图片 默认为False 格式为bool
#linuxenv-Linux环境 默认为False 格式为bool

#返回一个selenium窗口
#使用方法：driver= chromesite(**，**，**)
#        driver.get(url)
#
#        driver.quit()
#爬虫应根据实际

from selenium import webdriver

def chromesite(useragent = "",ip = "",headless = True,imge = False,linuxenv = False):
    chrome_options = webdriver.ChromeOptions()

    if useragent == False:
        chrome_options.add_argument(
            "user-agent:'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'")
    else:
        chrome_options.add_argument("user-agent:"+useragent)

    if headless == True:
        chrome_options.add_argument('--headless')

    if imge == False:
        prefs = {'profile.default_content_setting_values': {'images': 2}}
        chrome_options.add_experimental_option('prefs', prefs)
    if ip:
        chrome_options.add_argument('--proxy-server='+ip)

    chrome_options.add_argument('--no-sandbox')
    if linuxenv == True:
        return webdriver.Chrome("./chromedriver",options=chrome_options)

    return webdriver.Chrome(options=chrome_options)

#No.002
#requestsurl 意义：根据爬虫断续情况更换身份识别参数，如useragent，session，cookie和其他提交表单
#requsetsurl 参数：
#url-请求网址
#cookie-更换session
#pagenum-当前页数（网页特例）属于其他提交表单
#columnId-当前项目数（网页特例）属于其他提交表单
#useragent-
#ip-IP代理 格式{'httpX': 'httpX://ip:port'}

#网页特例应随网页变化而变化，不可照搬
#函数中header params应随网页变化而变化
#爬虫应根据实际

#返回requests获取的网页数据，具体查看requests官网 https://docs.python-requests.org/zh_CN/latest/


import requests

def requestsurl(url, cookie, pagenum, columnId, useragent = "", ip=""):
    if useragent == "":
        useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'

    header = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': 'td_cookie=2570783928; _gscu_1223948176=286460757ybvdr86; _gscbrs_1223948176=1; JSESSIONID=' + cookie + '; Hm_lvt_7030841eb7908f4f3779770e4a018fa8=1628646077,1628662825,1628735732,1628735734; Hm_lvt_04a971ecbbd7d7ba3d296a1439b6e74b=1628646075,1628662825,1628735732,1628735734; Hm_lpvt_04a971ecbbd7d7ba3d296a1439b6e74b=1628735741; Hm_lpvt_7030841eb7908f4f3779770e4a018fa8=1628735741; _gscs_1223948176=t28735735lx5spa20|pv:3',
        'Host': 'hnyzy.hicourt.gov.cn',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': useragent,
    }

    params = {
        'currentPageNo': str(pagenum),
        'columnId': str(columnId),
        'pageSize': '10',
        '_': '1628735740{:0=3}'.format(int(pagenum) + 19),
    }
    if ip:

        result = requests.get(url=url, headers=header, params=params, proxies=ip)
    else:
        result = requests.get(url=url, params=params, headers=header)

    return result



#No.003-1
#IP获取 来自小幻ip https://ip.ihuan.me/ 使用前确定该网站能够正常浏览
#参数：pagenumber-当前页页码 格式str
#返回值：
# pagenumber-下一页代码，用于连续爬取 外部需要添加中间参数传递

# iplist-获取的IP列表 格式：list<dict> [{'httpX': 'httpX://ip:port'},{'httpX': 'httpX://ip:port'}]


from lxml import etree
import time

def openIpihuan(pagenumber):
    driverIp = chromesite(linuxenv = True)
    driverIp.get("https://ip.ihuan.me/"+pagenumber)
    time.sleep(2)
    html = driverIp.page_source
    etr=etree.HTML(html)
    pageip=etr.xpath("//tr/td[1]/a/text() | //tr/td[2]/text()")
    pagenumber = etr.xpath("//ul[@class='pagination']/li[8]/a/@href")[0]
    iplist = []
    for i in range(0,len(pageip),2):
        ipdict = {}
        ipdict["http"] = "http://"+pageip[i]+":"+pageip[i+1]
        iplist.append(ipdict)

    driverIp.quit()
    return pagenumber,iplist


#No.003-2
#验证ip可用性
#参数：
#url：目标网站
#ip：被验证ip 格式{'httpX': 'httpX://ip:port'}

#返回：若IP可用则返回，不可以不返回

# headers应随目标网站变化而变化，

def trydaili(url,ip):
    try:
        headers = {
            "Host": "www.hshfy.sh.cn",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cookie": ""
        }
        r = requests.get(url=url,headers = headers,proxies=ip,timeout = 15)
        if r.status_code == 200:
            return ip
    except Exception as e:
        print(e)

#No.003-3
#使用调用 openIpihuan() 和trydaili()
#参数
#pageip-openIphuan()返回结果-小幻ip网页单页的全部IP代理 格式：list<dict> [{'httpX': 'httpX://ip:port'},{'httpX': 'httpX://ip:port'}]
#usenum-当可用ip总数等于usenum时，退出函数并返回可用ip列表 格式 int 建议为3
#返回 IP列表 格式：list<dict> [{'httpX': 'httpX://ip:port'},{'httpX': 'httpX://ip:port'}]

openurl = "url"
def okip(pageip,usenum):
    iplist = []
    for i in pageip:
        if len(iplist) == usenum:
            return iplist
        print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        print(i)
        a = trydaili(openurl, i)
        if a != None:
            iplist.append(a)
            print(a, "ok")
    return iplist

#No.004
#检查文件编码

import chardet

def detect(file,encodinglist=['gb18030','utf-8','gbk','gb2312','utf-16','big5','euc-jp','euc-kr','latin1','utf-16','utf-8-sig','utf-16-sig'],insert=None):
    """检测文档编码格式"""
    if insert:
        pos=0
        for index,insertcode in insert:
            encodinglist.insert(index+pos,insertcode)
            pos+=1
    for e in encodinglist:
        try:
            with open(file,'r',encoding=e) as f:
                content=f.readlines()
                return e
        except:
            continue
    return None

def chardetect(file):
    """检测文档编码格式"""
    return chardet.detect(open(file,'rb').read())['encoding']

#打开文件,输入地址返回全部文本
def get_encoding(filename):
    if detect(filename):
        encoding=detect(filename)
    elif chardetect(filename):
        encoding=chardetect(filename)
    else:
        encoding='utf-8'
    with open(filename, 'r', encoding=encoding) as f:
        lines = f.readlines()
    return encoding



