import os

import requests
from bs4 import BeautifulSoup as soup
import re
# import sqlite3

import time

from concurrent.futures import ThreadPoolExecutor

gUrlRoot = "https://www.lesmao.co/"
gSaveRootDir=u"F://个人 - meitu/www.lesmao.co"
gLogFile = open('runlog.txt','a')
gPageFile = open('downpagelog.txt','a')

gSubThreadNum=1
gPageThreadNum=5
gDownImgThreadNum=4

# 14风景 15美食
glevelDict = {
    'lesmao': {'http': 'https', 'cname': 'xxx',  'startPage':1, 'endPage':150, 'downSub':True}
    # ,'rihan': {'http': 'https', 'cname': '日韩美女', 'startPage': 1, 'endPage': 95, 'downSub': True}
    # ,'guochan': {'http': 'https', 'cname': '国产美女',  'startPage':1, 'endPage':211, 'downSub':True}
              }


def checkDir(news_title):
    char_list = ['*', '|', ':', '?', '/', '<', '>', '"', '\\']
    for i in char_list:
        if i in news_title:
            news_title = news_title.replace(i, "_")
    return news_title

def getNumForStr(s):
    s1 = re.findall(r"\d+\.?\d*", s)
    if s1.__len__()<1:
        return 0
    return int(s1[0])

def extract_content(url):
    '''
    请求内容
    '''
    try:
        r = requests.get(url)
        r.encoding="utf8"
        return r.text
    except Exception as e:
        print("openError", url, "Exception: {}".format(e),file=gLogFile,flush=True)
        return ""
    # else:
        # time.sleep(6)
def save_to_mongo(img_url, title, talbe):
    try:
        sql = "INSERT INTO %s (title,url) VALUES ('%s', '%s')"%(talbe,title,img_url)
        # gconn.execute(sql)
    except Exception:
        None
def save_to_dir(img_url, out_fname):
    again=False
    try:
        r = requests.get(img_url, stream=True, timeout=120)
        if r.status_code == 200:
            open(out_fname, 'wb').write(r.content)  # 将内容写入图片
        else:
            # print('downError','{"url":"%s","path":"%s"}'%(img_url, out_fname),"Exception: {}".format(r.status_code),file=gLogFile,flush=True)
            pass
    except requests.exceptions.RequestException as e:
        again=True
    except Exception as e:
        print('downError', '{"url":"%s","path":"%s"}' % (img_url, out_fname), "Exception: {}".format(r.status_code),
              file=gLogFile, flush=True)
    if not again:
        return

    # 再次一次
    time.sleep(10)
    try:
        r = requests.get(img_url, stream=True, timeout=120)
        if r.status_code == 200:
            open(out_fname, 'wb').write(r.content)  # 将内容写入图片
        else:
            # print('downError','{"url":"%s","path":"%s"}'%(img_url, out_fname),"Exception: {}".format(r.status_code),file=gLogFile,flush=True)
            pass
    except requests.exceptions.RequestException as e:
        print('downError', '{"url":"%s","path":"%s"}' % (img_url, out_fname), "Exception: {}".format(e),
              file=gLogFile, flush=True)
        return

    # del r

def download_img(para):
    Son_link, l, v = para

    Sonson_link = gUrlRoot + Son_link
    content = extract_content(Sonson_link)
    if content.__len__() == 0:
        print(Sonson_link)
        return
    # 获取数据解出图片地址
    doc = soup(content, 'html5lib')
    image_son = doc.select('.adw li > img')

    for imgi in image_son:
        src = imgi['src']
        savePath = os.path.join(v['savedir'], src.split('/')[-1])
        if os.path.exists(savePath):
            # 文件存在
            continue
        save_to_mongo(src, v['title'], v['table'])
        save_to_dir(src, savePath)

# 按页进行子主题下载
def subject_link(para):
    subTitle,v,pageId=para

    url = 'https://www.lesmao.co/plugin.php?id=group&page=' +  str(pageId)
    print('开始下载 %s 主题,第 %d 页'%(subTitle,pageId),file=gPageFile,flush=True)

    content = extract_content(url)
    if content.__len__() == 0:
        return pageId
    # 获取数据解出图片地址
    doc = soup(content, 'html5lib')
    items=doc.select('#index-pic .photo a')
    for item in items:
        # 打开主题第一页
        Son_link=item['href']
        url=gUrlRoot + Son_link
        son_content = extract_content(url)
        doc2 = soup(son_content, 'html5lib')
        try:
            saveDir1 = doc2.select('#thread-title h1')[0].text
            saveDir2 = doc2.select('#thread-title a')[0].text
            image_main = doc2.select('.adw li > img')
        except IndexError:
            print('openError', url,file=gLogFile,flush=True)
            continue
        # src=image_main['src']
        # alt=checkDir(image_main['alt'])
        # 主题目录
        saveDir = os.path.join(gSaveRootDir,checkDir(saveDir2),checkDir(saveDir1))

        nv={}
        nv['table']=subTitle
        # nv['http']=v['http']
        nv['savedir']=saveDir
        nv['title']=saveDir1
        # 新建主题目录
        if not os.path.exists(saveDir):
            os.makedirs(saveDir)
        for imgi in image_main:
            src = imgi['src']
            savePath = os.path.join(saveDir, src.split('/')[-1])
            save_to_mongo(src, imgi['src'],subTitle)
            save_to_dir(src,savePath)
        #获取页码

        pageTag = doc2.select('.pg a')[:-1]


        # 从第二页开始进行重复爬取数据，同时进行爬
        mapPara = [(i['href'], 0, nv) for i in pageTag]
        # print(mapPara)
        with ThreadPoolExecutor(gDownImgThreadNum) as actuator:
            actuator.map(download_img, mapPara)

    return pageId
# 每一个主题起一个多线程
def subject_page(para):

    k,v=para
    if not v['downSub']:
        return
    #检查目录
    # checkSaveDir(k,v)
    # 获取最大页数

    mapPara = [(k,v,i) for i in range(v['startPage'],v['endPage'])]
    with ThreadPoolExecutor(gPageThreadNum) as actuator:
        for result in actuator.map(subject_link, mapPara):
            pass
#检查子目录
def checkSaveDir(key,subt):
    '''
    :return:
    '''
   #  gconn.execute('''CREATE TABLE IF NOT EXISTS %s
   # (id INTEGER PRIMARY KEY NOT NULL,
   # title      CHAR(256),
   # url        CHAR(256));'''%key)
    saveDir = os.path.join(gSaveRootDir, subt['cname'])
    if not os.path.exists(saveDir):
        os.makedirs(saveDir)
def main():

    mapPara = [[key, subt] for key, subt in glevelDict.items()]
    with ThreadPoolExecutor(gSubThreadNum) as actuator:
        for result in actuator.map(subject_page, mapPara):
            pass

if __name__ == '__main__':
    main()