# -!- coding: utf-8 -!-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：sis_title.py
#日期：2019-11-20
#备注：Python爬虫爬取SIS小说目录和链接
CREATE TABLE `SIS_story` (
  `st_name` varchar(100) NOT NULL COMMENT '小说名称',
  `st_author` varchar(100) NOT NULL COMMENT '小说作者',
  `st_link` varchar(200) NOT NULL COMMENT '小说链接',
  `st_title` varchar(100) NOT NULL COMMENT '小说标题',
  `create_time` datetime NOT NULL COMMENT '创建时间',
  PRIMARY KEY (`st_link`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='SIS001网站小说';

'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import requests
from pacong.pachong_config import db,cursor,headers
from pycacho.cachobase.logger import Logger
from pycacho.cachobase.random_data import get_from_list
from bs4 import BeautifulSoup
import re
from pycacho.cachobase.file_deal import an_save_txt,get_dict,get_split_info
from retrying import retry

logger = Logger("sis_title").get_log()
base_site = 'http://172.247.48.'
site_list =['2','3']

# 按文章
def get_ch_page(site,link,p):
    story_dict = get_dict('F:\PythonProject\Python\\file\\story_dict.txt', '\'', ',', ': ')
    url = '{0}-{1}.html'.format(link,p)
    resp = requests.get(url, headers=headers)
    logger.info(url)
    html = resp.text
    soup = BeautifulSoup(html, 'html5lib')  # "lxml"解析器丢失数据
    for chapter in soup.find_all('a'):
        try:
            if 'thread-' in chapter.get('href') and '.html' in chapter.get('href'):
                for name in story_dict:
                    if name in str(chapter):
                        st_author = story_dict[name]
                        st_link = site+ '/forum/' + chapter.get('href')
                        #print(st_link,chapter.get_text())
                        title_to_mysql(name, st_author,st_link, chapter.get_text())
        except:
            pass

# 章节信息存入数据库
def title_to_mysql(st_name,st_author, st_link, st_title):
    sql = "insert into SIS_story (st_name, st_author,st_link, st_title,create_time) " \
            "values ('%s','%s','%s','%s',now());" % (st_name,st_author, st_link, st_title)
    try:
        cursor.execute(sql)
    except Exception as e:
        # 发生错误时回滚
        db.rollback()
        #print(str(e))
    else:
        db.commit()
        logger.info(st_title)

# 按作者
def get_au_page(p):
    site = base_site + get_from_list(site_list, 1)
    url = site + '/forum/forum-322-{0}.html'.format(p)
    resp = requests.get(url, headers=headers)
    #print(url)
    html = resp.text
    soup = BeautifulSoup(html, 'html5lib')  # "lxml"解析器丢失数据
    for chapter in soup.find_all('a'):
        #print(chapter)
        try:
            if 'shen2008' in chapter.get('href'):
                print(url)
                print(chapter)
        except:
            pass


def get_content(out_txt,url,title,p_type):
    resp = requests.get(url, headers=headers, timeout=60, verify=False)
    resp.encoding = 'utf-8'
    html = resp.text
    #print(html)
    soup = BeautifulSoup(html, 'html5lib')
    if p_type == '单页':
        id = re.findall(r'id="postmessage_(\d{0,10})"',html)[0]
        content = soup.find(id='postmessage_' + id).get_text().replace('"', '')
        an_save_txt(out_txt, title)
        an_save_txt(out_txt, content)
    else:
        ids = re.findall(r'id="postmessage_(\d{0,10})"',html)
        #print(ids)
        for id in ids:
            content = soup.find(id='postmessage_'+id).get_text().replace('"','')
            #print(content)
            an_save_txt(out_txt , title)
            an_save_txt(out_txt , content)

def get_title():
    for i in range(40, 40):
        site = base_site + get_from_list(site_list, 1)
        get_ch_page(site,site + '/forum/forum-390',i)    # 征文活动区：33页

    for i in range(490, 490):
        site = base_site + get_from_list(site_list, 1)
        get_ch_page(site,site + '/forum/forum-322',i)    #文学作者区：490页

    for i in range(307, 307):
        site = base_site + get_from_list(site_list, 1)
        get_ch_page(site,site + '/forum/forum-383', i)    # 原创人生区：307页
        #get_au_page(i)


def get_link():
    link_file = 'F:\PythonProject\Python\document\\ls.txt'
    urls = get_split_info(link_file,',')
    new_file = 'F:\PythonProject\Python\\da30.txt'
    for url in urls:
        print(url[0],url[1])
        get_content(new_file, url[0],url[1],'单页')



if __name__ == '__main__':
    get_title()
    #get_content('F:\PythonProject\Python\\a.txt', 'http://38.103.161.228/forum/thread-4663401-1-25.html','')
    #get_link()