import os

import requests
from bs4 import BeautifulSoup as soup
import re
# import sqlite3

import time

from concurrent.futures import ThreadPoolExecutor

gUrlRoot = "https://www.lesmao.co/"
gSaveRootDir=u"F://个人 - meitu/www.lesmao.co"
gLogFile = open('runlog.txt','a')
gPageFile = open('downpagelog.txt','a')

gSubThreadNum=1
gPageThreadNum=5
gDownImgThreadNum=4
def save_to_mongo(img_url, title, talbe):
    try:
        sql = "INSERT INTO %s (title,url) VALUES ('%s', '%s')"%(talbe,title,img_url)
        # gconn.execute(sql)
    except Exception:
        None
def save_to_dir(img_url, out_fname):
    again=False
    try:
        r = requests.get(img_url, stream=True, timeout=120)
        if r.status_code == 200:
            open(out_fname, 'wb').write(r.content)  # 将内容写入图片
        else:
            # print('downError','{"url":"%s","path":"%s"}'%(img_url, out_fname),"Exception: {}".format(r.status_code),file=gLogFile,flush=True)
            pass
    except requests.exceptions.RequestException as e:
        again=True
    except Exception as e:
        print('downError', '{"url":"%s","path":"%s"}' % (img_url, out_fname), "Exception: {}".format(r.status_code),
              file=gLogFile, flush=True)
    if not again:
        return

    # 再次一次
    time.sleep(10)
    try:
        r = requests.get(img_url, stream=True, timeout=120)
        if r.status_code == 200:
            open(out_fname, 'wb').write(r.content)  # 将内容写入图片
        else:
            # print('downError','{"url":"%s","path":"%s"}'%(img_url, out_fname),"Exception: {}".format(r.status_code),file=gLogFile,flush=True)
            pass
    except requests.exceptions.RequestException as e:
        print('downError', '{"url":"%s","path":"%s"}' % (img_url, out_fname), "Exception: {}".format(e),
              file=gLogFile, flush=True)
        return

    # del r
def checkDir(news_title):
    char_list = ['*', '|', ':', '?', '/', '<', '>', '"', '\\']
    for i in char_list:
        if i in news_title:
            news_title = news_title.replace(i, "_")
    return news_title

def extract_content(url):
    '''
    请求内容
    '''
    try:
        r = requests.get(url)
        r.encoding="utf8"
        return r.text
    except Exception as e:
        print("openError", url, "Exception: {}".format(e),file=gLogFile,flush=True)
        return ""
    # else:
        # time.sleep(6)

def download_img(para):
    Son_link, l, v = para

    Sonson_link = Son_link
    content = extract_content(Sonson_link)
    if content.__len__() == 0:
        print(Sonson_link)
        return
    # 获取数据解出图片地址
    doc = soup(content, 'html5lib')
    saveDir1 = doc.select('#thread-title h1')[0].text
    saveDir2 = doc.select('#thread-title a')[0].text
    image_son = doc.select('.adw li > img')
    saveDir = os.path.join(gSaveRootDir, checkDir(saveDir2), checkDir(saveDir1))
    if not os.path.exists(saveDir):
        os.makedirs(saveDir)
    for imgi in image_son:
        src = imgi['src']
        savePath = os.path.join(saveDir, src.split('/')[-1])
        if os.path.exists(savePath):
            # 文件存在
            continue
        # save_to_mongo(src, v['title'], v['table'])
        save_to_dir(src, savePath)


# 从第二页开始进行重复爬取数据，同时进行爬
pageTag=[
'https://www.lesmao.co/thread-22883-3-1.html'
,'https://www.lesmao.co/thread-22883-5-1.html'
,'https://www.lesmao.co/thread-22883-4-1.html'
,'https://www.lesmao.co/thread-22905-3-1.html'
,'https://www.lesmao.co/thread-22905-4-1.html'
,'https://www.lesmao.co/thread-22905-5-1.html'
,'https://www.lesmao.co/thread-22905-2-1.html'
,'https://www.lesmao.co/thread-22919-2-1.html'
,'https://www.lesmao.co/thread-22919-3-1.html'
,'https://www.lesmao.co/thread-22919-4-1.html'
,'https://www.lesmao.co/thread-22919-5-1.html'

]
mapPara = [(i, 0, 0) for i in pageTag]

with ThreadPoolExecutor(gDownImgThreadNum) as actuator:
    actuator.map(download_img, mapPara)