import requests
import json
from bs4 import BeautifulSoup
import pymysql.cursors
import configparser
import uuid
from pymysql import OperationalError
import os

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
}


# 创建目录
def make_dirs(dir):
    basePath = "C:/Yw-Test/spider"
    fullPath = basePath + "/" + dir.replace("|","丨")
    if not os.path.exists(fullPath):
        os.makedirs(fullPath)
    return fullPath

#  解析html, 读取其中的信息
def parse_content(html, title):
    soup = BeautifulSoup(html,'html.parser')
    article_main = soup.find('div', class_='article-main')
    imgs = article_main.find_all('img')
    for img in imgs:
        img_src = img.attrs['src']
        save_image(img_src, title)

'''
    imgs = [];
    for a_img in imgs:
        v_for = a_img.attrs['v-for']
        tempImg = v_for[20:-2].split(";")
        if not  len(tempImg) == 0:
            imgs = tempImg
    # print( urlId ,user_code, user_name, topic_title, article, topic_date)
    for img in imgs:
        print(img)
        # download_save_image(img, user_code)
    # cursor.execute(sql, (topic_title,topic_date, article,imgs,user_code ))
'''
# 请求页面, 返回html
def download_htm(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
    }
    proxies = {
        "http": "222.189.144.219:9999",
    }
    r = requests.get(url,
                     proxies=proxies,
                     timeout=10,
                     headers=headers)
    if r.status_code == 200:
        return  r.text
    else:
        return False

# 保存图片
def save_image(imgUrl, title):
    if imgUrl == '':
        return
    r = requests.get(imgUrl, stream=True)
    if r.status_code != 200:
        return
    img_name = os.path.basename(imgUrl)
    dir_path = make_dirs('美骑网-单车美人')

    with open(dir_path + "/" + img_name, 'wb') as f:
        f.write(r.content)

# ip proxy


import time
if __name__=='__main__':
    apiUrl = "http://www.biketo.com/app.php?m=info&a=getNewsList&type=column&id=33"
    page = 0
    while page < 1:
        page = page+1
        params = {"page": page}
        r = requests.get(apiUrl,headers=headers, params=params)
        result = r.json()
        if result['status']:
            dataList = result['data']
            for d in dataList:
                # print(d['title'] + " https://www.biketo.com" + d['newsurl'])
                # make_dirs(d['title'])
                # time.sleep(0.5)
                newsurl = " https://www.biketo.com" + d['newsurl']
                html = download_htm(newsurl)
                if html:
                    parse_content(html, d['title'])
                else:
                    print('请求失败')
                    break

