#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 's125_nanoorchis'
__mtime__ = '2017/3/9'
"""

cookieStr="sd_userid=46611477323355677; sd_cookie_crttime=1477323355677; tvfe_boss_uuid=4088ef056bb05328; pac_uid=1_569128050; eas_sid=N1e4j894T6l8q7a6q4x0H2j4N5; RK=fF26s1O6dd; pgv_pvi=751090688; pgv_si=s3762871296; ptui_loginuin=569128050; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; pgv_info=ssid=s3206305046&pgvReferrer=; pgv_pvid=6689437934; o_cookie=569128050; pt2gguin=o0569128050; uin=o0569128050; skey=Me9efoRrBb; ptisp=ctc; ptcz=e21326411328d62054792d62165ca96ae63c5188c3b5131f3d8a8451a6ae0d70; MANYOU_SESSIONID_bf895=5173a011aaba08f219450ef1fb2e9e5f; qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; security_cookiereport=1488973870"
import requests
import re
import pymysql

from bs4 import BeautifulSoup
db = pymysql.connect("localhost", "test20170307", "test20170307", "scalers_forum_db",charset="utf8")
preUrl="http://qgc.qq.com"

#将字符串形式的cookie转换成字典形式的cookies
def convertCookieStrToDict(cookieStr):
    cookies={}
    for key_value_pairs in cookieStr.split("; "):
        key_value_pair=key_value_pairs.split("=")
        cookies[key_value_pair[0]]=key_value_pair[1]
    return cookies

cookies=convertCookieStrToDict(cookieStr)
#起始网址
url="http://qgc.qq.com/314962432"

#保存respnse的内容为html文件
def saveUrl(url):
    fileName="f"+(re.match(r'http://qgc.qq.com/(\d*)',url)).group(1)
    #print (memberNo)
    response = requests.get(url, cookies=cookies)
    # print(response.content.decode(encoding="utf-8"))
    f=open("../Download/"+fileName+".html",'w')
    f.write(response.content.decode(encoding="utf-8"))
    f.close()

#通过页面的内容来判断是否有下一页
def hasPageUrlNextPage(content):
    #是否有下一页字符串
    if content.count('<span>下一页</span>') > 0:
        return True
    else:
        return False
#更新PageListDb，一个page包含多个成员的个人页面网址
def flushPageListDb():
    #先判断是否有未完成的工作，是的话则curUrl为上一次没有完成更新的url
    #不是的话，则curUrl为url。
    hasUnFinishedWork=getPreValue("has_unfinished_work")
    p("hasUnFinishedWork:"+str(hasUnFinishedWork))
    if hasUnFinishedWork:
        curUrl=url
    else:
        curUrl=getPreValue("cur_url")
    p("cur_url:"+getPreValue("cur_url"))
    countinue=True
    while countinue:
        #获得该页面的内容
        response = requests.get(curUrl, cookies=cookies)
        #标记为有未完成工作
        addPreToPreTable("has_unfinished_work", True)
        content = response.content.decode(encoding="utf-8")
        #用正则表达式解析出需要存入的数据，得到一个二维数组或者字典的数组
        soup=BeautifulSoup(content,'lxml')
        resultSet = soup.find_all('div', class_="feed clearfix")
        #遍历得到的数组，将每条数据存入表page_list_table中
        for each in resultSet:
            onePage = {}
            # 最后回复时间
            lastRTime = each.find("li", title="最后回复时间")
            p("lastRTime：" + lastRTime.get_text())
            onePage["last_r_time"] =lastRTime.get_text()
            p(type(onePage["last_r_time"]))
            # 回复数
            lastRNo = each.find("li", title="回复数")
            p("lastRNo：" + lastRNo.get_text())
            onePage["last_r_no"] =int(lastRNo.get_text())
            p(type(onePage["last_r_no"]))
            # 网址，标题
            dict = each.a.attrs
            hrefUrl = preUrl + dict['href']
            p("hrefUrl:" + hrefUrl)
            onePage["href_url"] =hrefUrl
            p(type(onePage["href_url"]))
            hrefUrlHashCode = hash(hrefUrl)
            p("hrefUrlHashCode:" + str(hrefUrlHashCode))
            onePage["href_url_hash_code"] =hrefUrlHashCode
            p(type(onePage["href_url_hash_code"]))
            titleStr = dict['title']
            p("titleStr:" + titleStr)
            onePage["title_str"] =titleStr
            p(type(onePage["title_str"]))
            # 摘要
            descStr = each.dd.get_text()
            p("descStr:" + descStr)
            onePage["desc_str"] =descStr
            p(type(onePage["desc_str"]))
            # 名称
            nameStr = ""
            if titleStr.count("]") > 0:
                nameList = titleStr.split("]")
                # p(nameList[0])
                if nameList[1].count("的") > 0:
                    nameStr = nameList[1].split("的")[0]
            p("nameStr:" + nameStr)
            onePage["name_str"] =nameStr
            p(type(onePage["name_str"]))
            # 论坛ID
            forumId = re.match("http://qgc.qq.com/314962432/t/(\d*)", hrefUrl).group(1)
            p("forumId:" + str(forumId))
            onePage["forum_id"] =int(forumId)
            p(type(onePage["forum_id"]))
            #将该条数据写入数据库
            writeOneReToPageListTable(onePage)
        #如果有下一页，将下一页的网址写入cur_url中，同时将has_unfinished_work置为True
        resultSet = soup.find_all('span')
        nextPageUrl=""
        for each in resultSet:
            if each.get_text().count("下一页") > 0:
                curNode = each.parent
                nextPageUrl = preUrl + curNode.attrs["href"]
        if nextPageUrl.count()>0:
            addPreToPreTable("cur_url", nextPageUrl)
        #如果没有下一页，则将countinue置为False,同时将has_unfinished_work置为False
        else:
            countinue=False
            addPreToPreTable("has_unfinished_work", False)

def writeOneReToPageListTable(onePage):
    #先通过href_url_hash_code判断该记录是否在数据库中。
    cursor=db.cursor()
    cursor.execute("select * from page_list_table where href_url_hash_code = %s",onePage["href_url_hash_code"])
    if cursor.rowcount>0:
        p(onePage["href_url"]+"is already in table!")
    else:
        #id,                                   name_str,   forum_id,            last_r_time,             last_r_no,           href_url,           href_url_hash_code,           title_str,           desc_str
        sqlStr="INSERT INTO page_list_table (name_str,        forum_id,           last_r_time,           last_r_no,           href_url,           href_url_hash_code,           title_str,           desc_str) VALUES (%s, %d, %s, %s, %s, %s, %s, %s)"
        cursor.execute(sqlStr, (onePage["name_str"], onePage["forum_id"],onePage["last_r_time"],onePage["last_r_no"],onePage["href_url"],onePage["href_url_hash_code"],onePage["title_str"],onePage["desc_str"]))
        p("add " + onePage["href_url"]+" success!")
        db.commit()
        cursor.close()

def getPreValue(preName):
    cursor = db.cursor()
    cursor.execute("select pre_value from pre_table where pre_name = %s",preName)
    result=cursor.fetchone()
    if result == None:
        p(preName+"is not in table!")
    else:
        p("result:" + repr(str(result)))
        results=str(result).split("'")
        result=results[1]
        p("result:"+repr(result))
    return result

def createTable(tableName,sqlStr):
    print("start create table: "+tableName)
    cursor=db.cursor()
    cursor.execute("show tables")
    hasCreated=False
    for row in cursor:
        p(row)
        p("row.count(tableName):"+str(row.count(tableName)))
        if (row.count(tableName))>0:
            hasCreated = True
            p("Table "+tableName+" had already been created!")
            break
        p("if end")
    p("hasCreated:" + str(hasCreated))
    if not hasCreated:
        p("execute create")
        cursor.execute(sqlStr)
        p(cursor)
    cursor.close()
    p("finish create table: "+tableName)

def initTables():
    #如果preTable不存在，则创建这个表。表三列，分别是id，pre_name,pre_value
    sqlStr="create table pre_table (id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY,pre_name VARCHAR(255), pre_value VARCHAR(255))"
    createTable("pre_table",sqlStr)
    #如果表page_list_table不存在，则创建这个表。表9列。
    #分别是id, name_str,forum_id,last_r_time,last_no,href_url,href_url_hash_code,title_str,desc_str
    sqlStr="create table page_list_table (id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY,name_str VARCHAR(255),forum_id INT,last_r_time VARCHAR(255),last_no INT(8),href_url VARCHAR(255),href_url_hash_code BIGINT,title_str VARCHAR(255),desc_str VARCHAR(255))"
    createTable("page_list_table",sqlStr)
    #如果has_unfinished_work条目不存在，则创建，初始pre_value为false。
    addPreToPreTable("has_unfinished_work", False)
    addPreToPreTable("cur_url",url)

def addPreToPreTable(preName,preValue):
    cursor=db.cursor()
    sqlStr="select pre_name,pre_value from pre_table where pre_name = %s"
    cursor.execute(sqlStr,preName)
    if cursor.rowcount==0:
        # 如果has_unfinished_work条目不存在，则创建，初始pre_value为false。
        instrtIntoPreTableSql = "INSERT INTO `pre_table` (`pre_name`, `pre_value`) VALUES (%s, %s)"
        cursor = db.cursor()
        cursor.execute(instrtIntoPreTableSql, (preName, preValue))
        db.commit()
        cursor.close()
        p("Add " + preName + " successfully!")
    else:
        p(preName + " was in table.")
        #update pre_table set pre_value = "test" where pre_name = "cur_url";
        sqlStr="update pre_table set pre_value = %s where pre_name = %s"
        cursor.execute(sqlStr,(preValue,preName))
        db.commit()
        cursor.close()
        p("Update " + preName + " successfully!")
    p("cursor.rowcount:"+str(cursor.rowcount))
    cursor.close()



def p(str):
    print(str)

def soupTest():
    #先根据本地文件构造一个soup对象。
    soup=BeautifulSoup(open("f314962432.html"),"lxml")
    #看如何从soup中得到自己想要的信息。
    #输出soup的全部内容
    #print(soup.prettify())
    #需要在一个在的div中进行循环，然后从每个小的div中选取相应的信息。
    #soup后直接跟tag名，输出第一个tag的所有内容。
    # p(soup.head)
    #每个tag有两个重要属性，name和attrs，name是字符串，attrs是字典。
    #获得标签内部的文字用string
    #显示某对象的类型
    #p(type(soup.head.name))
    resultSet=soup.find_all('div',class_="feed clearfix")
    #得到的结果是一个集合
    #p(type(result))
    for each in resultSet:
        #集合的子元素是一个tag
        #p(type(each))
        lastRTime=each.find("li",title="最后回复时间")
        #p(type(lastRTime))
        #for key in lastRTime.attrs:
        #    p(key+":"+lastRTime.attrs[key])
        p("lastRTime："+lastRTime.get_text())
        lastRNo = each.find("li", title="回复数")
        p("lastRNo：" + lastRNo.get_text())
        #lastRTime是一个tag对象，为什么不能用string输出其内容呢？
        dict=each.a.attrs
        hrefUrl=preUrl+dict['href']
        p("hrefUrl:"+hrefUrl)
        hrefUrlHashCode = hash(hrefUrl)
        p("hrefUrlHashCode:" + str(hrefUrlHashCode))
        titleStr=dict['title']
        p("titleStr:"+titleStr)
        descStr=each.dd.get_text()
        p("descStr:"+descStr)
        nameStr = ""
        if titleStr.count("]")>0:
            nameList=titleStr.split("]")
            #p(nameList[0])
            if nameList[1].count("的")>0:
                nameStr=nameList[1].split("的")[0]
        p("nameStr:"+nameStr)
        forumId=re.match("http://qgc.qq.com/314962432/t/(\d*)",hrefUrl).group(1)
        p("forumId:"+str(forumId))
    #获取下一页的地址
    #no=soup.get_text().count('<span>下一页</span>')
    #返回所有内容，然后标签与内容不在一行。
    #p(type(soup.prettify()))
    #p("no:"+str(no))
    #直接返回内容，没有标签
    #p(soup.get_text())
    #找到所有的<span>，然后遍历，找出内容为下一页的tag
    resultSet=soup.find_all('span')
    for each in resultSet:
        if each.get_text().count("下一页")>0:
            curNode=each.parent
            nextPageUrl=preUrl+curNode.attrs["href"]
    p(nextPageUrl)

def insertTest():
    cursor=db.cursor()
    #,
    sqlStr = "INSERT INTO page_list_table (name_str,forum_id,last_r_time,last_no,href_url,href_url_hash_code,title_str) VALUES (%s,%s,%s,%s,%s,%s,%s)"
    #cursor.execute(sqlStr, ("", 1, "12-25", 5, "http://qgc.qq.com/314962432/t/1",-463295053531315348, "[1号帖]ScalersTalk成长会2017重要说明，提问前必看", "1.成长会2017必知必会我假设你来到成长会是真心想要成长进步的，而不是来寻找安慰的，"))
    #对中文字符串，如：a = "浦发银行"，在进行插入操作前做一下编码转换a = a.decode("gbk").encode("utf-8")。然后进行插入操作就没有任何问题了。
    titleStr="[1号帖]ScalersTalk成长会2017重要说明，提问前必看"
    titleUtf8=str(titleStr.encode("utf8"))
    p(titleUtf8)
    #titleStr = titleStr.decode("gbk").encode("utf-8")
    cursor.execute(sqlStr, ("",1,"12-25", 5,"http://qgc.qq.com/314962432/t/1",-463295053531315348, titleUtf8))
    db.commit()
    cursor.close()
#将字符串转换为二进制

def quote_buffer(buf):
    """
    chinese to mysql
    """
    retstr = ''.join(map(lambda c:'%02x'%ord(c), buf))
    retstr = "x'" + retstr + "'"
    return retstr

def strTest():
    title= "[1号帖]ScalersTalk成长会2017重要说明，提问前必看"
    p("type(title):")
    p(type(title))
    p(repr(title))
    #对str对象使用str，不变
    #titleStr=str(title)
    #p(repr(titleStr))
    #用双引号定义的字符串，其类型是str类型，但是str类型是在unicode类型的基础上encode来的，但是python3中的str是没有decode方法的。
    #自己当前的目的是能把中文以某种形式写入mysql，然后能读出来。
    #addPreToPreTable("str_test", title)
    #直接往数据库中写入title，提示：pymysql.err.InternalError:
    # (1366, "Incorrect string value: '\\xE5\\x8F\\xB7\\xE5\\xB8\\x96...'
    # for column 'pre_value' at row 1")
    #如果用"utf8"则会报错
    #title=titleUtf8.decode('utf8')
    #p(repr(title))
    #addPreToPreTable("str_test", titleUtf8)
    #写入提示：pymysql.err.InternalError:
    # (1366, "Incorrect string value: '\\xE5\\x8F\\xB7\\xE5\\xB8\\x96...'
    # for column 'pre_value' at row 1")
    #再encode一次，写入
    #titleUtf8Utf8=titleUtf8.encode('utf8')
    #addPreToPreTable("str_test",titleUtf8Utf8)
    #出错提示：AttributeError: 'bytes' object has no attribute 'encode'
    #对bytes对象使用str方法
    titleUtf8 = title.encode('utf8')
    p(repr(titleUtf8))
    titleUtf8Str=str(titleUtf8)
    p(repr(titleUtf8Str))
    addPreToPreTable("str_test", titleUtf8Str)
    titleUtf8Str=getPreValue("str_test")
    p(repr(titleUtf8Str))
    #尝试更改数据库数据的类型，不用varchar

#p(quote_buffer("[1号帖]ScalersTalk成长会2017重要说明，提问前必看"))
#testDb()
#initTables()
#flushPageListDb()
#saveUrl(url)
#soupTest()
#insertTest()



strTest()
db.close()