import requests as rqs
from bs4 import BeautifulSoup as bs

# 爬取网页数据公共方法
class get_html(object):
    def getcontent(self, murl, charsetcode):
        head = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'accept-encoding': 'gzip, deflate',
            'Upgrade-Insecure-Requests': '1',
            'accept-language': 'zh-CN,zh;q=0.9',
            'cache-control': 'max-age=0',
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
        }
        r = rqs.session().get(url=murl, headers=head)
        r.encoding = charsetcode
        b = bs(r.text, 'lxml')
        return b


# 获取列表显示内容
class get_list(object):
    # 笔趣阁页面内容
    def biquge(self, type):
        biqugeSite = 'https://www.xxbiquge.com/'
        if type == 'coatard':  # 修真小说页面
            biqugeSite += "xclass/2/1.html"
        elif type == 'urban':  # 都市小说页面
            biqugeSite += "xclass/3/1.html"
        elif type == 'history':  # 历史小说页面
            biqugeSite += "xclass/4/1.html"
        elif type == 'webgame':  # 网游小说页面
            biqugeSite += "xclass/6/1.html"
        elif type == 'science':  # 科幻小说页面
            biqugeSite += "xclass/5/1.html"
        elif type == 'girl':  # 女频页面
            biqugeSite += "xclass/7/1.html"
        # elif type == 'complete':  # 完本小说页面
        #     novelweb += "xclass/7/1.html"
        else:  # 默认玄幻小说页面
            biqugeSite += "xclass/1/1.html"
        html_content = get_html().getcontent(biqugeSite, 'utf8').select('div#newscontent')
        res = {}
        if len(html_content) > 0:
            res['current'] = []
            res['rankingList'] = []
            left_content = html_content[0].select('div.l')[0].find_all('li')
            right_content = html_content[0].select('div.r')[0].find_all('li')
            for item in left_content:
                content = item.find_all('span')
                novel_content_l = {}
                novel_content_l['title'] = content[0].select('a')[0].text
                novel_content_l['href'] = 'catalogue/?site=bqg&catalogue=' + content[0].select("a")[0]['href']
                novel_content_l['chapter'] = content[1].select('a')[0].text
                novel_content_l['time'] = content[1].findAll(text=True)[1]
                novel_content_l['author'] = content[2].text
                res['current'].append(novel_content_l)
            for itm in right_content:
                cnt = itm.find_all('span')
                novel_content_r = {}
                novel_content_r['title'] = cnt[0].select('a')[0].text
                novel_content_r['href'] = 'catalogue/?site=bqg&catalogue=' + cnt[0].select("a")[0]['href']
                novel_content_r['author'] = cnt[1].text
                res['rankingList'].append(novel_content_r)
            res['result'] = 'true'
            return res
        else:
            res['result'] = 'false'
            return res


class get_catalogue(object):
    # 小说目录内容
    def catalogue(self, website, site):
        biqugeSite = 'https://www.xxbiquge.com/'
        res = {'data': {'chapters': []}, 'result': 'false'}
        if site == 'bqg':
            print(website)
            html = get_html().getcontent(biqugeSite + website, 'utf8')
            # 头部标题数据
            html_head = html.select('div#maininfo')
            if len(html_head) > 0:
                res['result'] = 'true'
                res['data']['title'] = html_head[0].select('h1')[0].text
                cont = html_head[0].select('p')
                res['data']['author'] = cont[0].text.split("：")[1]
                res['data']['updateTime'] = cont[2].text.split("：")[1]
                res['data']['updateChapter'] = cont[3].select('a')[0].text
                res['data']['abstract'] = cont[4].text
            # 内容部分数据
            html_content = html.select('div#list')
            if len(html_content):
                res['result'] = 'true'
                all_chapter = html_content[0].find_all('dd')
                flag_bqg = 0
                each_item = []
                for item in all_chapter:
                    each_item.append({
                        'href': '/novel/list/detail/?site=bqg&chapter=' + item.select("a")[0]['href'],
                        'chapter': item.select("a")[0].text
                    })
                    if flag_bqg < 1:
                        flag_bqg += 1
                    else:
                        flag_bqg = 0
                        res['data']['chapters'].append(each_item)
                        each_item = []
        elif site == 'ddxs':
            html_head = get_html().getcontent(
                'http://www.23us.so/xiaoshuo/'
                + website.split('/')[-2]+'.html', 'utf8'
            ).select('dl#content')
            if len(html_head) > 0:
                res['result'] = 'true'
                res['data']['title'] = html_head[0].select('h1')[0].text.split(' ')[0]

                cont = html_head[0].select('table#at')[0].find_all('td')
                if len(cont) > 5:
                    res['data']['type'] = cont[0].text.strip()
                    res['data']['author'] = cont[1].text.replace("\xa0\xa0\xa0\xa0", "").strip()
                    res['data']['updateTime'] = cont[5].text.replace("\xa0\xa0\xa0\xa0", "").strip()

                oth = html_head[0].select('dd')[-1].find_all('p')
                if len(oth) > 3:
                    res['data']['abstract'] = oth[1].text.replace('<br/>', '').strip()
                    res['data']['updateChapter'] = oth[-2].text.replace("\xa0\xa0\xa0\xa0", "").strip()
            html_content = get_html().getcontent(website, 'utf8').select('table#at')
            if len(html_content) > 0:
                html_content = html_content[0].find_all('td')
                flag_ddxs = 0
                each_item = []
                for item in html_content:
                    each_item.append({
                        'href': '/novel/list/detail/?site=ddxs&chapter=' + item.select("a")[0]['href'],
                        'chapter': item.select("a")[0].text
                    })
                    if flag_ddxs < 1:
                        flag_ddxs += 1
                    else:
                        flag_ddxs = 0
                        res['data']['chapters'].append(each_item)
                        each_item = []
        return res


class get_detail(object):
    def get_content(self, site, chapter):
        print('get_detail_data:'+site+'--'+chapter)
        res = {'data': {}}
        if site == 'bqg':
            html = get_html().getcontent('https://www.xxbiquge.com' + chapter, 'utf8')
            title = html.select('div.bookname')
            if len(title) > 0:
                res['result'] = 'true'
                res['data']['chapter'] = title[0].select('h1')[0].text
                res['data']['catalogue'] = title[0].select('a')[2]['href']
                res['data']['title'] = title[0].select('a')[2].text
                res['data']['next'] = title[0].select('a')[3]['href']
            content = html.select('div#content')
            if len(content) > 0:
                res['result'] = 'true'
                res['data']['content'] = content[0].text.replace("\xa0\xa0\xa0\xa0", "<br>")

        elif site == 'ddxs':
            html = get_html().getcontent(chapter, 'utf-8').select('div#amain')
            if len(html) > 0:
                res['result'] = 'true'
                tt = html[0].select('dd#footlink')[0].find_all('a')
                if len(tt) > 0:
                    res['data']['previous'] = tt[0]['href']
                    res['data']['catalogue'] = tt[1]['href']
                    res['data']['next'] = tt[2]['href']
                res['data']['chapter'] = html[0].select('h1')[0].text
                res['data']['title'] = html[0].select('dt')[0].select('a')[-1].text
                res['data']['content'] = html[0].select('dd#contents')[0].text.replace("\xa0\xa0\xa0\xa0", "<br>")
            else:
                res['result'] = 'false'
        return res


class search(object):
    def get_content(self, site, name):
        res = {'data': []}
        if site == 'ddxs':
            search_html = get_html()\
                .getcontent('http://zhannei.baidu.com/cse/search?q=' + name
                            + '&s=8053757951023821596&srt=def&nsid=0', 'utf-8')\
                .select('div.result-game-item-detail')
            if len(search_html) > 0:
                try:
                    for item in search_html:
                        first = item.select('a.result-game-item-title-link')
                        second = item.select('div.result-game-item-info')[0].find_all('p')
                        res['data'].append({
                            'title': first[0]['title'],
                            'href': '/novel/list/catalogue/?catalogue=' + first[0]['href'] + '&site=ddxs',
                            'author': second[0].select('span')[1].text.strip(),
                            'type': second[1].select('span')[1].text.strip(),
                            'updateTime': second[2].select('span')[1].text.strip(),
                            'updateChapter': second[3].select('a.result-game-item-info-tag-item')[0].text.strip()
                        })
                    res['result'] = 'true'
                except Exception as err:
                    res['result'] = 'false'
                    res['err'] = err
            return res
        elif site == 'sjg':
            pass
        elif site == 'xxsw':
            pass
        return None