import pymysql
import requests
import json
import re
import random
import time
import gc

header = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,la;q=0.7',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Host': 'book.douban.com',
    'Referer': 'https://book.douban.com/tag/?view=cloud',
    'Upgrade-Insecure-Requests': '1',
}
user_agent = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
    "UCWEB7.0.2.37/28/999",
    "NOKIA5700/ UCWEB7.0.2.37/28/999",
    "Openwave/ UCWEB7.0.2.37/28/999",
    "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
    "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",

    "Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) App leWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53",
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
    'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2 ',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER) ',
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)'
]

regx = [r'<div class="">[\s\S].*?<a[\s\S]*?</div>',
        r'<a.name="(.*?)".class="tag-title-wrapper">',
        r'<td><a href="(.*?)">',
        r'<a href="(.*?)" title=".*?"[\s\S]*?onclick=".*?">',
        r'<div class="intro">([\s\S][^void]*?)</div>']


def requests_url(url, **other):
    try:
        header['User-Agent'] = random.choice(user_agent)
        page = requests.get(url, headers=header)
        page.encoding = 'utf-8'
        return page.text
    except Exception:
        json.dump(other, open("page.txt"), ensure_ascii=False)
        return


def get_info_by_list(info, flag):
    tmp = []
    for i in info:
        if len(i) != 0:
            tmp.append(i)

    tmp_arr = []
    for i in range(0, len(tmp), 2):
        if tmp[i] in '作者':
            flag['author'] = tmp[i + 1]
        elif tmp[i] in '出版社':
            flag['publish'] = tmp[i + 1]
        elif tmp[i] in '出版年':
            flag['Year'] = tmp[i + 1]
        elif tmp[i] in '页数':
            flag['page_num'] = tmp[i + 1]
        elif tmp[i] in '定价':
            flag['price'] = tmp[i + 1]
        elif tmp[i] in 'ISBN':
            flag['ISBN'] = tmp[i + 1]
        else:
            tmp_arr.append({tmp[i]: tmp[i + 1]})
    flag['other'] = tmp_arr


regx_get_book_info = [r'<span property="v:itemreviewed">(.*?)</span>',
                      r'<div.id="info".class="">([\s\S]*?)</div>',
                      r'<a.class="nbg"[\s\S]*?href="(.*?)".title=".*?">',
                      r'<strong class=".*?" property="v:average">(.*?)</strong>',
                      r'(<div class="intro">[\s\S][^void]*?</div>)',
                      r'(<div class="indent" id="dir_.*?_full" style="display:none">[\s\S]*?</div>)',
                      r'tag/(.*?)\?',
                      r'>(.*?)<',
                      r'内容简介']

sub_regx = [r'\n|&nbsp|;| |:']


def init_flag(flag):
    flag['type'] = '',
    flag['lable'] = '',
    flag['url'] = '',
    flag['name'] = '',
    flag['author'] = '',
    flag['publish'] = '',
    flag['Year'] = '',
    flag['page_num'] = '',
    flag['price'] = '',
    flag['ISBN'] = '',
    flag['other'] = '',
    flag['img_url'] = '',
    flag['average'] = '',
    flag['content_validity'] = '',
    flag['author_validity'] = '',
    flag['indent'] = ''


def init():
    weidu = json.load(open('conf/page.txt', 'r', encoding='utf-8'))
    urls = []
    with open('conf/url.json', 'r', encoding="utf-8") as f:
        urls = f.readlines()

    with open('conf/yidu.csv', 'r', encoding='utf-8') as f:
        line = f.readline()
        print(line)
        while line:
            for i in range(0, len(urls)):
                flag = False
                tmp = json.loads(urls[i], encoding='utf-8')
                # print(line.strip() == )
                for k, v in tmp.items():
                    if str(v).strip() == line.strip():
                        del urls[i]
                        flag = True
                        break
                if flag:
                    break
            line = f.readline()
    with open('conf/url.txt', 'w', encoding='utf-8') as f:
        for i in urls:
            f.write(i)
    return weidu['page_index']


def parse_page(index):
    global sql, flag
    with open('conf/url.txt', 'r', encoding='utf-8') as url_file:
        line = url_file.readline()
        while line:
            for k, v in json.loads(line).items():
                print('------------------------', k, v, '------------------------')
                for page_index in range(index, 1000, 20):
                    url = v.format(page_index)
                    page_html = requests_url(url, page_index=page_index)
                    urls = re.findall(regx[3], page_html)
                    conn = pymysql.connect('localhost', 'root', '123', 'pc')
                    cursor = conn.cursor()

                    for book_info_url in urls:
                        try:
                            start = time.time()
                            print(book_info_url)
                            flag = {'type': k, 'label': re.findall(regx_get_book_info[6], v)[0],
                                    'url': book_info_url}
                            init_flag(flag)
                            page_html = requests_url(book_info_url, page_index=page_index)
                            flag['name'] = re.findall(regx_get_book_info[0], page_html)[0]
                            information = re.sub(sub_regx[0], '', re.findall(regx_get_book_info[1], page_html)[0])
                            get_info_by_list(re.findall(regx_get_book_info[7], information), flag)
                            flag['img_url'] = re.findall(regx_get_book_info[2], page_html)[0]
                            flag['average'] = re.findall(regx_get_book_info[3], page_html)[0]
                            information = re.findall(regx_get_book_info[4], page_html)
                            if len(information) == 2:
                                flag['content_validity'] = information[0]
                                flag['author_validity'] = information[1]
                            elif len(information) == 1:
                                if len(re.findall(regx_get_book_info[8], page_html)) == 1:
                                    flag['content_validity'] = information[0]
                                    flag['author_validity'] = ''
                                else:
                                    flag['content_validity'] = ''
                                    flag['author_validity'] = information[0]
                            elif len(information) == 0:
                                flag['content_validity'] = ''
                                flag['author_validity'] = ''
                            try:
                                flag['indent'] = re.findall(regx_get_book_info[5], page_html)[0]
                            except Exception:
                                flag['indent'] = ''
                            sql = 'insert into douban_book_info_new values ("{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}",\'{}\',\'{}\',\'{}\')'.format(
                                flag['type'],
                                flag['label'],
                                flag['url'],
                                flag['name'],
                                flag['author'],
                                flag['publish'],
                                flag['Year'],
                                flag['page_num'],
                                flag['price'],
                                flag['ISBN'],
                                flag['other'],
                                flag['img_url'],
                                flag['average'],
                                flag['content_validity'].replace("'", '"'),
                                flag['author_validity'].replace("'", '"'),
                                flag['indent'].replace("'", '"'))
                            cursor.execute(sql)
                            flag.clear()
                            gc.collect()
                            print(book_info_url, time.time() - start)
                            time.sleep(3 + random.randint(0, 5))
                        except Exception as e:
                            with open('error.txt', 'a', encoding='utf-8') as f:
                                f.write(book_info_url + "\n")
                                f.write(sql + "\n")
                                f.write(json.dumps(flag, ensure_ascii=False))
                    conn.commit()
                    conn.close()
                    gc.collect()
                index = 0
                with open('conf/yidu.csv', 'a', encoding='utf-8') as f:
                    f.write(v + "\n")
                line = url_file.readline()


if __name__ == '__main__':
    page_index = init()
    # parse_page(page_index)
    pass
