import random
from time import sleep

import requests
from bs4 import BeautifulSoup
from lxml import html
from rich import print as rprint
import cloudscraper

from sys import path as path2
path2.append('c:/users/tian/desktop')
from Service import ips, ua

class Req:
    def __init__(self, ipsl=[]):
        if ipsl == []:
            self.ipsl = ips.ips()
        else:
            self.ipsl = ipsl
            
    def req(self, url):
        while True:
            try:
                ip  = random.choice(self.ipsl)
                r = requests.get(url, headers={'user-agent': ua.ua}, timeout=3, proxies={'http': ip})
                r.encoding = 'utf-8'
                code = r.status_code
                if code == 200:
                    break
                elif code == 403:
                    rprint('[red]IP 被禁，切换代理')
                    self.ipsl.remove(ip)
                else:
                    rprint(f'[red]状态码 {code}，请求异常，重连中。。。, url: {url}')
            except Exception as e:
                rprint('报错：', e)
                sleep(2)
        soup = BeautifulSoup(r.text, 'lxml')
        return soup


def req2(url):
    while True:
        try:
            r = requests.get(url, headers={'user-agent': ua.ua}, timeout=3)
            code = r.status_code
            if code == 200:
                break
            elif code == 403:
                rprint('[red]IP 被禁，切换代理')
            else:
                rprint(f'[red]状态码 {code}，请求异常，重连中。。。, url: {url}')
        except Exception as e:
            rprint('报错：', e)
            sleep(2)
    r.encoding = 'utf-8'
    soup = BeautifulSoup(r.text, 'html.parser')
    return soup

def req3(url):
    while True:
        try:
            r = requests.get(url, headers={'user-agent': ua.ua}, timeout=3)
            code = r.status_code
            if code == 200:
                break
            elif code == 403:
                rprint('[red]IP 被禁，切换代理')
            else:
                rprint(f'[red]状态码 {code}，请求异常，重连中。。。, url: {url}')
        except Exception as e:
            rprint('报错：', e)
            sleep(2)
    r.encoding = 'utf-8'
    # HTML = html.fromstring(r.text.encode('utf-8'))    # 2023.8.23 运行程序发现，GCBT爬取的title为乱码，将本行变为下一行后正常。特此记录待日后查明原因
    HTML = html.fromstring(r.text)
    return HTML


def reqSehuatang(url):
    while True:
        try:
            scraper = cloudscraper.create_scraper()
            r = scraper.get(url, headers={'user-agent': ua.ua, 'cookie': 'cPNj_2132_saltkey=AFCyyipl; _safe=vqd37pjm4p5uodq339yzk6b7jdt6oich; cPNj_2132_lastvisit=1680128297; cPNj_2132_atarget=1; cPNj_2132_visitedfid=2; cPNj_2132_lastfp=97e340d0d01aa44c7affd122338a562b; cPNj_2132_lastact=1680218188%09forum.php%09forumdisplay; cPNj_2132_st_t=0%7C1680218188%7C4e750b5fce3d2b87e5c1c8682a38e170; cPNj_2132_forum_lastvisit=D_2_1680218188'}, timeout=3)
            code = r.status_code
            if code == 200:
                break
            elif code == 403:
                rprint('[red]IP 被禁，切换代理')
            else:
                rprint(f'[red]状态码 {code}，请求异常，重连中。。。, url: {url}')
        except Exception as e:
            rprint('报错：', e)
            sleep(2)
    r.encoding = 'utf-8'
    soup = BeautifulSoup(r.text, 'html.parser')
    return soup