import requests
from bs4 import BeautifulSoup
import pymysql.cursors
import configparser
import uuid
from pymysql import OperationalError
import os

# 获取链接
def get_connection():
    config = configparser.ConfigParser()
    config.read('./config/db.ini')
    mysql_db=config['mysql']
    host=mysql_db['host']
    port=int(mysql_db['port'])
    user=mysql_db['user']
    password=mysql_db['password']
    db=mysql_db['db']

    connection = pymysql.connect(host=host,
                             port=port,
                             user=user,
                             password=password,
                             db=db,
                             charset='utf8mb4',
                             cursorclass=pymysql.cursors.DictCursor)
    return connection

# 请求页面, 返回html
def download_htm(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
    }
    proxies = {
        "http": "222.189.144.219:9999",
    }
    r = requests.get(url,
                     proxies=proxies,
                     timeout=10,
                     headers=headers)
    if r.status_code == 200:
        return  r.text
    else:
        return False
    
#  解析html, 读取其中的信息
def parse_content(html, cursor, sql):
    soup = BeautifulSoup(html,'html.parser')
    topic = soup.find(id='topic')
    topic_article = topic.find('div', class_='topic-article')
    topic_meta = topic_article.find('div', class_='topic-meta')

    user_code = topic_meta.find('a', class_='topic-username').attrs['href']
    user_code = user_code[4:][:-1]
    user_name = topic_meta.find('a', class_='topic-username').string
    topic_title = topic_article.find('h1').string
    article = topic_article.find('div', class_='article').string
    topic_date = topic_meta.find('span', class_='topic-date').string

    lightgallery = topic_article.find(id='lightgallery')

    light_gallerys = lightgallery.find_all('a')
    imgs = [];
    for a_img in light_gallerys:
        v_for = a_img.attrs['v-for']
        tempImg = v_for[20:-2].split(";")
        if not  len(tempImg) == 0:
            imgs = tempImg

    print( urlId ,user_code, user_name, topic_title, article, topic_date)
    for img in imgs:
        print(img)
        download_save_image(img, user_code)
    # cursor.execute(sql, (topic_title,topic_date, article,imgs,user_code ))

# 保存图片
def download_save_image(imgUrl, user):
    if imgUrl == '':
        return
    fileName = os.path.basename(imgUrl)
    r = requests.get(imgUrl, stream=True)
    if r.status_code != 200:
        return
    # savePath = "C:/Yw-Test/spider/" + user
    savePath = "C:/Yw-Test/spider/"

    # if not os.path.exists(savePath):
    #     os.makedirs(savePath)
    fullSavePath = (savePath + "/" + fileName)
    with open(fullSavePath, 'wb') as f:
        f.write(r.content)

global urlId

if __name__=='__main__':
    id_start = 97700
    id_end = 97790
    # connection = get_connection();
    insertTopicSQL = "insert into `py_xzb_topic` (`id`, `topic_title`, `topic_date`, `article`,`light_gallery`,`user_id`)" \
                     " values ($s, %s, %s, %s, %s, %s)"
    import time
    while id_start <= id_end:
        time.sleep(10)
        urlId = id_start
        xzbUrl = 'https://www.imxingzhe.com/xzb/' + str(id_start)
        html = download_htm(xzbUrl)
        if html:
            # with connection.cursor() as cursor:
            #     parse_content(html, cursor, insertTopicSQL.format(id_start))
            parse_content(html, "", "")
        else:
            print('请求失败')
            break
        id_start += 1
    # connection.close();