import requests
from bs4 import BeautifulSoup
import pymysql
import os

web = 'http://www.runrundouhua.com'
base_path = 'E://py'
pic_p = 0

def get(url, time=1):
    try:
        resp = requests.get(url, timeout=30)
        bsobj = BeautifulSoup(resp.content, 'lxml')
        return bsobj
    except:
        if(time == 10):
            print("ERROR!!!")
            print(url)
            return None
        else:
            print("第", time, "次尝试：", url)
            return get(url,time+1)

def download_img(img_url, path, name, time=1):
    try:
        r = requests.get(img_url, stream=True, timeout=30)
        print(img_url)
        if r.status_code == 200:
            if(not os.path.exists(path)):
                os.makedirs(path)
            print(path+"/"+name)
            open(path+"/"+name, 'wb').write(r.content)
        del r
    except:
        if (time == 10):
            print("ERROR!!!")
            print(img_url)
            return None
        else:
            print("第", time, "次尝试：", img_url)
            return download_img(img_url, path, name, time + 1)

def video(text,url):
    pass

def story_save(cate, date, title, url):
    bsobj = get(url)
    if(bsobj != None):
        a_list = bsobj.select('.content > center')
        content = str(a_list[0]).replace('"', "'")
        # print(content)
        # 打开数据库连接
        db = pymysql.connect(host="localhost", port=3306, user="root", password="123456", database="test")

        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()
        sql = 'INSERT INTO `story`(`cate`, `title`, `url`, `content`) VALUES ("' +cate+ '", "'+title+'", "'+url+'", "'+content+'");'
        print(cate, date, title, url)
        try:
            cursor.execute(sql)
            db.commit()
        except:
            db.rollback()

        db.close()

def pic_save(cate, date, title, url):
    bsobj = get(url)
    if(bsobj != None):
        print(url)
        a_list = bsobj.select('.content')
        if(len(a_list) == 0):
            return None
        imgs = a_list[0].find_all("img")
        i=0
        for img in imgs:
            i = i+1
            src = img.get('src')
            # path = base_path+"/"+cate+"/"+date+"/"+title
            path = base_path+"/"+cate+"/"+title
            name = "{:0>4d}".format(i)+"_"+src.split('/')[-1]
            download_img(src, path, name)

def story(cate,url):
    bsobj = get(url)
    if (bsobj != None):
        a_list = bsobj.select('body > div:nth-child(8) > div > ul')
        for a in a_list[0].find_all('a'):
            # print(a)
            date = a.find('span').text
            text = a.text.replace(date, "", 1);
            href = web+a.get('href')

            # 打开数据库连接
            db = pymysql.connect(host="localhost", port=3306, user="root", password="123456", database="test")

            # 使用 cursor() 方法创建一个游标对象 cursor
            cursor = db.cursor()
            sql = 'select * from `story` where `title` = "' + text + '"'
            cursor.execute(sql)
            results = cursor.fetchall()
            db.close()
            if (len(results) > 0):
                print("pass:" + text)
            else:
                story_save(cate, date, text, href)

        page_list = bsobj.select('.page_info')
        next_page = ''
        end_page = ''
        page_href = ''
        for a in page_list[0].find_all('a'):
            # print(a)
            if("下一页" == a.text):
                next_page = web+a.get('href')
                page_href = next_page
            if ("尾页" == a.text):
                end_page = web+a.get('href')

        print()
        print("页码：", cate, next_page, end_page)

        if(next_page == end_page):
            return None
        story(cate, page_href)

def pic(cate,url):
    bsobj = get(url)
    if (bsobj != None):
        a_list = bsobj.select('body > div:nth-child(8) > div > ul')
        if(len(a_list) > 0):
            for a in a_list[0].find_all('a'):
                # print(a)
                global pic_p
                pic_p = pic_p + 1
                date = a.find('span').text
                text = a.text.replace(date, "", 1);
                href = web + a.get('href')
                pic_save(cate, date, "{:0>5d}".format(pic_p)+"_"+text, href)

        page_list = bsobj.select('.page_info')
        if(len(page_list) == 0):
            return None
        next_page = ''
        end_page = ''
        page_href = ''
        for a in page_list[0].find_all('a'):
            # print(a)
            if ("下一页" == a.text):
                next_page = web + a.get('href')
                page_href = next_page
            if ("尾页" == a.text):
                end_page = web + a.get('href')

        print()
        print("页码：", cate, next_page, end_page)

        if (next_page == end_page):
            return None
        pic(cate, page_href)

if __name__ == '__main__':
    url = web
    bsobj = get(url)
    if (bsobj != None):
        cates = ('在线点播','多情小说','激情图区')
        a_list = bsobj.select('#header_box > div')
        uls = a_list[0].find_all('ul')
        for ul in uls:
            lis = ul.find_all('li')
            li_0 = ''

            for li in lis:
                a = li.find('a')
                text = a.text
                href = a.get('href')
                if (href == '/'):
                    for cate in cates:
                        if(cate == text):
                            li_0 = cate
                else:
                    # if(li_0 == cates[0]):
                    #     video(text,url + href)
                    # if(li_0 == cates[1]):
                    #     story(text,url + href)
                    if(li_0 == cates[2]):
                        pic(text,url + href)



