# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @ author = 'zoushunli'
# @ author_email = '409358374@qq.com'
from bs4 import BeautifulSoup
import bs4
import requests
import random
import codecs
import ffmpeg
import re
# import pprint
# from multiprocessing import Pool
import scrapy
# 字符串转字典 ast.literal_eval()
import ast
from selenium import webdriver
# from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver import ActionChains
# from pyquery import PyQuery as pq
# from time import sleep
# import you_get
import os

user_agent = [
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0'
]
headers = {'user_agent': user_agent[random.randint(0, 1)], }

'''
%3A -> :
%2F -> /
%3F -> ?
%3D -> =
%26 -> &
'''


def getHtmlText(url):
    try:
        r = requests.get(url, headers=headers, timeout=10)
        # r.status_code
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        # return r.content
        return r.text
    except Exception as e:
        print('Exception:-->', e)
        return "产生异常"


def getHtmlUrls(htmlText, lst, parser='lxml'):
    # parser = 'html.parser'
    soup = BeautifulSoup(htmlText, parser)
    a = soup.find_all('a')
    for x in a:
        try:
            href = x.attrs['href'].replace('\\', '/')

            if len(href.split('/')) == 7 and href not in lst:
                ret = re.match(r'[\d]+', href.rsplit('/', 1)[1])
                if ret:
                    url = href.rsplit('/', 1)[0] + '/' + ret.group(0)
                    if url not in lst:
                        lst.append(url)
        except:
            continue
    return lst


def praseHtmlText(lst, storeText, parser):
    htmlText = getHtmlText(lst[0])

    storeHtmlText(htmlText, storeText, parser)


def storeHtmlText(uLst, storeText=False, parser='lxml'):
    for each in uLst:
        htmlText = getHtmlText(each)
        soup = BeautifulSoup(htmlText, parser)

        try:
            uTitle = soup.find_all('h1', class_="title-article")[0].text
            # uTitle = re.findall(r'[\u4e00-\u9fa5a-zA-Z0-9]+', uTitle, re.S)
            uTitle = re.findall(r'[^\*"/:?\\|<>]+', uTitle, re.S)
            uName = soup.find_all('a', class_="follow-nickName")[0].text
            uRead = soup.find_all('span', class_="read-count")[0].text
            uCollection = soup.find_all('span', class_="get-collection")[0].text.strip()
            uRecommend = soup.find_all('span', class_="recommend")
            print(each)
            print(uTitle)

            if uRecommend:
                print(uRecommend[0])
                print(uRecommend[0].text.strip())
                print('uTitle-->', uTitle)
        except:
            pass

        if storeText:
            with codecs.open(r'C:\\Users\\zsl\\Desktop\\temp\\\\{0}.txt'.format(uTitle.replace("\\t", "")), 'w+',
                             'utf-8') as uTextContent:
                uText = soup.find_all('div', id="content_views")[0]
                for text in uText.children:
                    if isinstance(text, bs4.element.Tag):
                        uTextContent.write(' \r\n' + text.text)


def main():
    url = 'https://blog.csdn.net/'
    HtmlText = getHtmlText(url)
    uLst = getHtmlUrls(HtmlText, list())

    storeHtmlText(uLst)


if __name__ == '__main__':
    main()
