# coding:UTF-8
# 豆瓣试读,短评,书评,目录,相关书籍爬虫
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import MySQLdb
from timeit import timeit

import requests
import time
from bs4 import BeautifulSoup as bs

headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
           'Accept-Encoding': 'gzip, deflate, sdch, br',
           'Accept-Language': 'zh-CN,zh;q=0.8',
           'Connection': 'keep-alive',
           'Cache-Control': 'max-age=0'
    , 'Upgrade-Insecure-Requests': '1'
    ,
           'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36'
           }
def author_intro_spider(main_soup,isbn13,db_name):
    author_intro = ""
    try:
        a_intro = main_soup.find_all(attrs={'id': 'link-report'})[0].find_next_siblings(attrs={'class': 'indent'})[0]
        try:
            short = a_intro.find_all(attrs={'class': 'intro'})[0].text.strip()
            author_intro = short
        except:
            pass
        try:
            all = a_intro.find_all(attrs={'class': 'intro'})[1].text.strip()
            author_intro = all
        except:
            pass
        # 存入数据库
        conn = MySQLdb.Connect(
            host='127.0.0.1',
            port=3306,
            user='root',
            passwd = '1234',
            db='more110178',
            charset='utf8'
        )
        cursor = conn.cursor()
        sql = 'update ' + db_name + ' set author_intro=%s where isbn13=%s'
        try:
            cursor.execute(sql, (str(author_intro), str(isbn13)))
            conn.commit()
            conn.close()
        except Exception as e:
            print e
    except Exception as e:
        print e


def douban_spider(id,isbn13,db_name,author):
    """
    爬虫入口
    :param id: 书籍id
    :return:
    """
    api = "https://book.douban.com/subject/"+id
    response = requests.get(api,headers=headers)
    if response.status_code == 200:
        main_soup = bs(response.text, "lxml")
        # 爬取作者简介,部分需要,存储ok
        if author:
            author_intro_spider(main_soup,isbn13,db_name)
        # 爬取试读部分,存储ok
        pre_spider(id,isbn13)

        # 爬取短评论,存储ok
        comments_spider(isbn13, id)

        # 爬取书评,时间长,存储ok
        review_spider(isbn13, id)

        # 爬取相关书籍,存储ok
        refer_book(main_soup,isbn13,id)

        time.sleep(0.5)
    else:
        print id + "page catch fail"
        print response.status_code


def download_page(url):
    response = requests.get(url,headers=headers)
    if response.status_code == 200:
        return response.text
    else:
        return ""


def review_spider(isbn13,id):
    """
    爬取书评
    :param isbn13:书籍isbn
    :param id: id
    :return:
    """
    # 爬取书评,爬取时间较长
    try:
        link5 = None  # 超出5条,存入的id
        title5 = None
        reviews_api = 'https://book.douban.com/subject/' + id + "/reviews"
        reviews_req = requests.get(reviews_api,headers=headers)
        if reviews_req.status_code == 200:
            review_soup = bs(reviews_req.text, "lxml")
            review_div = review_soup.find_all(attrs={'class': 'review-list'})[0].find_all('div', typeof='v:Review')
            number = len(review_div)  # 书评数量
            if number > 5:
                for i in range(0, 5):
                    link = review_div[i].find_all(attrs={'class': 'title-link'})[0].get('href')
                    content = download_page(link)
                    time.sleep(1)  # 在这里停顿
                    content_soup = bs(content, "lxml")
                    title = content_soup.find_all(attrs={'property':'v:summary'})[0].text
                    author = content_soup.find_all(attrs={'property':'v:reviewer'})[0].text
                    full_content = content_soup.find_all(attrs={'class': 'review-content clearfix'})[0].text
                    # 存入数据库
                    conn = MySQLdb.Connect(
                        host='127.0.0.1',
                        port=3306,
                        user='root',
                        passwd = '1234',
                        db='more110178',
                        charset='utf8'
                    )
                    cursor = conn.cursor()
                    sql = 'insert into douban_review values(%s,%s,%s,%s)'
                    try:
                        cursor.execute(sql, (str(isbn13), str(title), str(author),
                                             str(full_content)))
                        conn.commit()
                        conn.close()
                    except Exception as e:
                        print e
                for i in range(5,number):
                    # 超过5条的存link,把title找到
                    link5 = review_div[i].find_all(attrs={'class': 'title-link'})[0].get('href')
                    title5 = review_div[i].find_all(attrs={'class': 'title-link'})[0].text
                    # 存入超过五条的id
                    conn = MySQLdb.Connect(
                        host='127.0.0.1',
                        port=3306,
                        user='root',
                        passwd='1234',
                        db='more110178',
                        charset='utf8'
                    )
                    cursor = conn.cursor()
                    sql = 'insert into douban_review_link values(%s,%s,%s)'
                    try:
                        cursor.execute(sql, (str(isbn13), str(link5), str(title5)))
                        conn.commit()
                        conn.close()
                    except Exception as e:
                        print e

            else:
                for i in range(0, number):
                    link = review_div[i].find_all(attrs={'class': 'title-link'})[0].get('href')
                    content = download_page(link)
                    time.sleep(1)  # 在这里停顿
                    content_soup = bs(content, "lxml")
                    title = content_soup.find_all(attrs={'property': 'v:summary'})[0].text
                    author = content_soup.find_all(attrs={'property': 'v:reviewer'})[0].text
                    full_content = content_soup.find_all(attrs={'class': 'review-content clearfix'})[0].text
                    # 存入数据库
                    conn = MySQLdb.Connect(
                        host='127.0.0.1',
                        port=3306,
                        user='root',
                        passwd = '1234',
                        db='more110178',
                        charset='utf8'
                    )
                    cursor = conn.cursor()
                    sql = 'insert into douban_review values(%s,%s,%s,%s)'
                    try:
                        cursor.execute(sql, (str(isbn13), str(title), str(author),
                                             str(full_content)))
                        conn.commit()
                        conn.close()
                    except Exception as e:
                        print e
        else:
            print "request view fail"

    except Exception as e:
        print e


def pre_spider(id,isbn13):
    api = "https://book.douban.com/subject/" + id + "/reading/"
    response = requests.get(api,headers=headers)
    time.sleep(0.5)
    if response.status_code == 200:
        soup = bs(response.text,"lxml")
        try:
            lis = soup.find_all(attrs={'class':'pro-list'})[0].find_all('li')
            for li in lis:
                title = li.find('a').text
                href =  li.find('a').get('href')
                pre = requests.get(href,headers=headers)
                pre_soup = bs(pre.text,"lxml")
                book_content =  pre_soup.find_all(attrs={'class':'book-content'})[0]
                note = book_content.find_all(attrs={'class':'note'})[0].text.encode('utf-8').strip()
                time.sleep(1)
                # 存入数据库
                conn = MySQLdb.Connect(
                    host='127.0.0.1',
                    port=3306,
                    user='root',
                    passwd = '1234',
                    db='more110178',
                    charset='utf8'
                )
                cursor = conn.cursor()
                sql = 'insert into douban_reading values(%s,%s,%s)'
                try:
                    cursor.execute(sql, (str(isbn13), str(title),str(note)))
                    conn.commit()
                    conn.close()
                except Exception as e:
                    print e
        except:
            pass

def comments_spider(isbn13,id):
    """
    :param isbn13:
    :param id:
    :return: 热门评论20
    """
    try:
        api = "https://book.douban.com/subject/" + id + "/comments/"
        response = requests.get(api,headers=headers)
        time.sleep(0.5)
        soup = bs(response.text, "lxml")
        comment = soup.find_all(attrs={'class': 'comment-item'})
        for li in comment:
            comment_content = li.find('p', 'comment-content').text.strip()  # 评论具体内容
            comment_vote = li.find('span', 'vote-count').text.strip()  # 赞同数目
            comment_info = li.find_all('span', 'comment-info')  # 评价详情,时间和（很差,较差,还行,力荐,推荐)
            author = comment_info[0].find_all('a')[0].text.strip()
            comment_star = comment_info[0].find_all('span')[0].get('title')
            if comment_star != None:
                star = get_star(comment_star)
                comment_time = comment_info[0].find_all('span')[1].text
            else:
                star = None
                comment_time = comment_info[0].find_all('span')[0].text
            # 存入数据库
            conn = MySQLdb.Connect(
                host='127.0.0.1',
                port=3306,
                user='root',
                passwd = '1234',
                db='more110178',
                charset='utf8'
            )
            cursor = conn.cursor()
            sql = 'insert into douban_comment values(%s,%s,%s,%s,%s,%s)'
            try:
                cursor.execute(sql, (str(isbn13), str(author), str(comment_time),
                                     star,str(comment_vote),str(comment_content)))
                conn.commit()
                conn.close()
            except Exception as e:
                print e
    except Exception as e:
        print e


def get_star(comment_star):
    """
    转换评价星级的原始数据
    :param comment_star: 原始数据
    :return:
    """
    if comment_star.encode('utf-8') == "很差":
        return 1
    elif comment_star.encode('utf-8') == "较差":
        return 2
    elif comment_star.encode('utf-8') == "还行":
        return 3
    elif comment_star.encode('utf-8') == "推荐":
        return 4
    elif comment_star.encode('utf-8') == "力荐":
        return 5
    else:
        return 0


def refer_book(main_soup,isbn13,id):
    """
    爬取一本书的相关书籍的id
    :param main_soup: 请求书籍的解析
    :param id: 书籍id
    :return:
    """
    r_id = ""
    try:
        rec_section =  main_soup.find_all(attrs={'id':'db-rec-section'})[0]
        dl = rec_section.find_all('dl')
        dl_num =  len(dl)  # 第六个是clear,第十二个是clear
        if dl_num <= 5:
            for i in range(0,dl_num):
                refer_id = dl[i].find_all('dd')[0].find('a')['href'].split('/')[-2]
                r_id += "&"+ refer_id
        elif dl_num > 5:
            for i in range(0,dl_num):
                if i == 5:
                    pass
                elif i > 10:
                    pass
                else:
                    refer_id = dl[i].find_all('dd')[0].find('a')['href'].split('/')[-2]
                    r_id += "&" + refer_id
        # 存入数据库
        conn = MySQLdb.Connect(
            host='127.0.0.1',
            port=3306,
            user='root',
            passwd = '1234',
            db='more110178',
            charset='utf8'
        )
        cursor = conn.cursor()
        sql = 'insert into douban_refer values(%s,%s)'
        try:
            cursor.execute(sql, (str(isbn13), str(r_id)))
            conn.commit()
            conn.close()
        except Exception as e:
            print e
    except:
        print isbn13 +"no refer book"
# douban_spider('1084336')
#
# douban_spider("1723199","9787111002871")
# 爬虫entry
# 存入log.txt
def log(string):
    with open('log.txt', 'a') as l:
        l.write(string+"\n")
        l.close()

#先从数据库拿到基本数据

# 需要补充作者信息的

conn = MySQLdb.Connect(
        host = '127.0.0.1',
        port = 3306,
        user = 'root',
        passwd = '1234',
        db = 'more110178',
        charset = 'utf8'
    )
cursor = conn.cursor()
sql = "select d_id,isbn13,title from " \
      + "douban_p02" + " where average != ''"
cursor.execute(sql)
log(str(cursor.rowcount))
rs = cursor.fetchall()
count = 0
for row in rs:
    douban_spider(row[0], row[1],'douban_p02',True)
    time.sleep(1.5)
    count += 1
    log(str(count) + "=====================")
    log(row[1] + "success")

for name in ['douban_p105','douban_p114','douban_p200','douban_p208',
             'douban_p214','douban_p216','douban_p220','douban_p225','douban_p302','douban_p309'
                'douban_pt05','douban_pt10']:

    conn = MySQLdb.Connect(
        host = '127.0.0.1',
        port = 3306,
        user = 'root',
        passwd = '1234',
        db = 'more110178',
        charset = 'utf8'
    )
    cursor = conn.cursor()
    sql = "select d_id,isbn13,title from " \
          + name + " where average != ''"
    cursor.execute(sql)
    log(str(cursor.rowcount))
    rs = cursor.fetchall()
    count = 0
    for row in rs:
        douban_spider(row[0], row[1],name,False)
        time.sleep(3.5)
        count += 1
        log(str(count) + "=====================")
        log(row[1] + "success")

