#coding:utf-8

import os
import urllib
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import random

#创建文件夹
def mkdir(path):

    # 去除首位空格
    path=path.strip()
    # 去除尾部 \ 符号
    path=path.rstrip("\\")

    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists=os.path.exists(path)

    # 判断结果
    if not isExists:
        # 如果不存在则创建目录
        # 创建目录操作函数
        os.makedirs(path)

        print (path+' 创建成功')
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        print (path+' 目录已存在')
        return False


#通过url获取BeautifulSoup对象
def getSoup(url):
    ua = UserAgent()

    # 随机生成user agent
    USER_AGENT = ua.google

    my_headers = [
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
        "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
        'Opera/9.25 (Windows NT 5.1; U; en)',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
        "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
    ]

        # 设置请求头
    headers = {
        # heard部分直接通过chrome部分request header部分
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Cookie': '__cdnuid = 2607ff31ad58e60ec60edd859b1b0028',
        'Host': 'www.biquge.com.tw',
        'Pragma': 'no-cache',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
    }
    # proxy = urllib.request.ProxyHandler({'http': "139.227.252.111:46811"})  # 设置proxy
    # opener = urllib.request.build_opener(proxy)  # 挂载opener
    # urllib.request.install_opener(opener)  # 安装opener
    # page = opener.open(url, timeout=100).read()

    req = urllib.request.Request(url=url)
    return BeautifulSoup(urllib.request.urlopen(req).read().decode('gbk'), features='html.parser')


#小说类型数据转化
def dataToDictionaries(novelType):
    if novelType == '玄幻小说' or novelType == '玄幻武侠':
        return 6
    elif novelType == '修真小说':
        return 7
    elif novelType == '都市小说' or novelType == '都市言情':
        return 18
    elif novelType == '历史小说' or novelType == '军事历史':
        return 19
    elif novelType == '网游小说' or novelType == '网游竞技':
        return 20
    elif novelType == '科幻小说':
        return 21
    elif novelType == '恐怖小说' or novelType == '鬼故事':
        return 22
    else:
        return 23