# coding: utf-8

from const import *  # 自定义包
from os import system, name
from sys import version, argv

assert (version >= '3'), '请使用Python3版本运行该脚本'

used_packages = {'bs4': 'beautifulsoup4', 'requests': 'requests'}

if name == 'nt':
    assert not system("ping -n 1 www.baidu.com"), "无法访问互联网, 请检查网络连接"
    system('cls')
elif name == 'posix':
    assert not system("ping -c 1 www.baidu.com"), "无法访问互联网, 请检查网络连接"
    system('clear')

while True:
    judge = 1
    for package in used_packages.keys():
        if name == 'posix':
            if system('python3 -c "import {}"'.format(package)):
                judge = 0
                assert not system('sudo python3 -m pip install {}'.format(used_packages[package])), \
                '安装package: {} 失败'.format(used_packages[package])
                system('clear')
        elif name == 'nt':
            if system('python -c "import {}"'.format(package)):
                judge = 0
                assert not system('python -m pip install {}'.format(used_packages[package])), \
                '安装package: {} 失败'.format(used_packages[package])
                system('cls')
    if judge:
        break

import csv
from urllib.parse import quote
from requests import Session as S
from random import choice, randint
from bs4 import BeautifulSoup as bs
from time import sleep, strftime, localtime

def process(a, b, char='#'):
    print('\b'*200, flush=True, end='')
    percent = a / b * 100
    proc = int(a / b * 50)
    print('Process: [ '+char[0]*proc+'.'*(50-proc), flush=True, end='')
    print(' ]  %.2f%%'%percent, flush=True, end='')
    if a == b:
        print('')

class MainProcess():
    def __init__(self, url):
        self.url = quote(url.replace('www.', ''))
        self.c = lambda x: x[:x.find('/', 8)]
        self.child_web = set()
        self.csv_file = open('./'+self.url+'.csv', 'w+', newline='', encoding='gbk')
        self.csv_file = csv.writer(self.csv_file)
        self.s = S()
    
    def start(self):
        print("爬取中...")
        print('Target Url: {}'.format(self.url))
        char = choice(['*', '%', '$', '@', '#'])
        cycle = [1, *[i*10+9 for i in range(0, 101)]]
        for page in cycle:
            n = abs(page - 9) // 10
            try:
                info = self.s.get(CONST_URL+self.url+'&first={}&FORM=PERE2'.format(page), headers=HEADERS)
                assert (info.status_code == 200), "第{}页爬取错误".format(page)
                soup = bs(info.content, 'html.parser')
                url_list = soup.findAll('h2')
                for urls in url_list:
                    try:
                        self.child_web.add(self.c(urls.a.get('href')))
                    except:
                        pass
                sleep(2.5)
            except Exception as e:
                print('\b'*200, end='', flush=True)
                print("Error! {} {}".format(e.__repr__(), ' '*10))
            finally:
                process(n, 100, char)
                if page != cycle[-1]:
                    print('  Total: {}'.format(len(list(self.child_web))), flush=True, end='')
        count = len(list(self.child_web))
        self.child_web = sorted(list(self.child_web))
        print('写入数据中...')
        print('File Name: {}, Total: {}'.format(self.url+'.csv', count))
        self.csv_file.writerow(['总计:', count, '时间:', \
        strftime('%Y-%m-%d %H:%M:%S', localtime()), DATE[strftime('%a', localtime())]])
        char = choice(['*', '%', '$', '@', '#'])
        for i in range(count):
            self.csv_file.writerow(['No.%s'%str(i+1).zfill(len(str(count))), self.child_web[i]])
            process(i, count-1, char)

def main():
    """用法:
    python main.py baidu.com
    """
    assert (len(argv) == 2), "输入有误, 请检查输入"
    s = MainProcess(argv[1])
    s.start()

if __name__ == '__main__':
    main()
