#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '更新page_list_table'
__author__ = 's125_nanoorchis'
__mtime__ = '2017/3/9'
"""
import requests
import re
import pymysql
from bs4 import BeautifulSoup
#使用数据库保存中断后继续执行需要的数据
db = pymysql.connect("localhost", "test20170307", "test20170307", "scalers_forum_db",charset="utf8")
#网址前缀
preUrl="http://qgc.qq.com"
#cookieStr="pgv_info=ssid=s7697820000; pgv_pvid=9846196600; pt2gguin=o0569128050; uin=o0569128050; skey=Ms20afuY7t; ptisp=ctc; RK=NF2y81Obdf; ptcz=edc743d195123fdffc86a793a5107b3de11370b9383a51a08cefb5b7faff6b2c; pac_uid=1_569128050; MANYOU_SESSIONID_bf895=9fb9094348e51eccb2410816f41f3d7e; qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; security_cookiereport=1489149578"
cookieStr="sd_userid=46611477323355677; sd_cookie_crttime=1477323355677; tvfe_boss_uuid=4088ef056bb05328; pac_uid=1_569128050; eas_sid=N1e4j894T6l8q7a6q4x0H2j4N5; RK=fF26s1O6dd; pgv_pvi=751090688; pgv_si=s3762871296; ptui_loginuin=569128050; pgv_info=ssid=s5681652212; pgv_pvid=4670341722; o_cookie=569128050; pt2gguin=o0569128050; uin=o0569128050; skey=MccPDuxvnU; ptisp=ctc; ptcz=e21326411328d62054792d62165ca96ae63c5188c3b5131f3d8a8451a6ae0d70; MANYOU_SESSIONID_bf895=48525e22843a9682b72507af97ddff4e; qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; security_cookiereport=1489926546"
#将字符串形式的cookie转换成字典形式的cookies
def convertCookieStrToDict(cookieStr):
    cookies={}
    for key_value_pairs in cookieStr.split("; "):
        key_value_pair=key_value_pairs.split("=")
        cookies[key_value_pair[0]]=key_value_pair[1]
    return cookies

cookies=convertCookieStrToDict(cookieStr)

#起始网址
url="http://qgc.qq.com/314962432"

#输出函数
def p(str):
    print(str)

def getPreValue(preName):
    cursor = db.cursor()
    cursor.execute("select pre_value from pre_table where pre_name = %s",preName)
    db.commit()
    result=cursor.fetchone()
    if result == None:
        p(preName+"is not in table!")
    else:
        #results=str(result).split("'")
        #result=results[1]
        result = str(result).split("'")[1]
    cursor.close()
    return result

def addPreToPreTable(preName,preValue):
    cursor=db.cursor()
    sqlStr="select pre_name,pre_value from pre_table where pre_name = %s"
    cursor.execute(sqlStr,preName)
    if cursor.rowcount==0:
        # 如果has_unfinished_work条目不存在，则创建，初始pre_value为false。
        instrtIntoPreTableSql = "INSERT INTO `pre_table` (`pre_name`, `pre_value`) VALUES (%s, %s)"
        cursor = db.cursor()
        cursor.execute(instrtIntoPreTableSql, (preName, preValue))
        db.commit()
        cursor.close()
    else:
        #p(preName + " was in table.")
        #update pre_table set pre_value = "test" where pre_name = "cur_url";
        sqlStr="update pre_table set pre_value = %s where pre_name = %s"
        cursor.execute(sqlStr,(preValue,preName))
        db.commit()
        cursor.close()
        #p("Update " + preName + " successfully!")
        #p("value of "+preName+" is : "+getPreValue(preName))
    #p("cursor.rowcount:"+str(cursor.rowcount))

def writeOneReToPageListTable(onePage):
    cursor=db.cursor()
    cursor.execute("select * from page_list_table where forum_id = %s",onePage["forum_id"])
    if cursor.rowcount>0:
        #如果某成员的信息已经存在则进行更新
        #建一个数组来保存需要更新的项目
        infos=["name_str","last_r_time","last_no","title_str","desc_str"]
        sqlStr="update page_list_table set name_str = %s ,last_r_time = %s ,last_no = %s,title_str = %s,desc_str = %s where forum_id = %s"
        cursor.execute(sqlStr,(onePage[infos[0]],onePage[infos[1]],onePage[infos[2]],onePage[infos[3]],onePage[infos[4]],onePage["forum_id"]))
    else:
        #不存在则插入
        sqlStr="INSERT INTO page_list_table (forum_id,       last_r_time,           last_no,           href_url,           href_url_hash_code,           title_str,           title_str) VALUES (%s, %s, %s, %s, %s, %s, %s)"
        cursor.execute(sqlStr, (onePage["forum_id"],onePage["last_r_time"],onePage["last_no"],onePage["href_url"],onePage["href_url_hash_code"],onePage["title_str"],onePage["title_str"]))
        p("add " + onePage["href_url"]+" success!")
        db.commit()
        cursor.close()

def storeMemInfo(soup):
    resultSet = soup.find_all('div', class_="feed clearfix")
    # 遍历得到的数组，将每条数据存入表page_list_table中
    for each in resultSet:
        onePage = {}
        # 最后回复时间
        lastRTime = each.find("li", title="最后回复时间")
        # p("lastRTime：" + lastRTime.get_text())
        onePage["last_r_time"] = lastRTime.get_text()
        # p(type(onePage["last_r_time"]))
        # 回复数
        lastRNo = each.find("li", title="回复数")
        # p("lastRNo：" + lastRNo.get_text())
        onePage["last_no"] = int(lastRNo.get_text())
        # p(type(onePage["last_no"]))
        # 网址，标题
        dict = each.a.attrs
        hrefUrl = preUrl + dict['href']
        p("hrefUrl:" + hrefUrl)
        onePage["href_url"] = hrefUrl
        # p(type(onePage["href_url"]))
        hrefUrlHashCode = hash(hrefUrl)
        # p("hrefUrlHashCode:" + str(hrefUrlHashCode))
        onePage["href_url_hash_code"] = hrefUrlHashCode
        # p(type(onePage["href_url_hash_code"]))
        titleStr = dict['title']
        # p("titleStr:" + titleStr)
        onePage["title_str"] = titleStr
        # p(type(onePage["title_str"]))
        # 摘要
        descStr = each.dd.get_text()
        # p("descStr:" + descStr)
        onePage["desc_str"] = descStr
        # p(type(onePage["desc_str"]))
        # 名称
        nameStr = ""
        if titleStr.count("]") > 0:
            nameList = titleStr.split("]")
            # p(nameList[0])
            if nameList[1].count("的") > 0:
                nameStr = nameList[1].split("的")[0]
            elif nameList[1].count("2017") > 0:
                nameStr = nameList[1].split("2017")[0]
        elif titleStr.count("的") > 0:
            nameStr = titleStr.split("的")[0]
        # p("nameStr:" + nameStr)
        onePage["name_str"] = nameStr
        # p(type(onePage["name_str"]))
        # 论坛ID
        forumId = re.match("http://qgc.qq.com/314962432/t/(\d*)", hrefUrl).group(1)
        p("forumId:" + str(forumId))
        onePage["forum_id"] = int(forumId)
        # p(type(onePage["forum_id"]))
        # 将该条数据写入数据库
        writeOneReToPageListTable(onePage)

#更新PageListDb，一个page包含多个成员的个人页面网址
def mainF():
    #大的思路是有下一页时就继续，没有下页时就停止，
    # 同时标记当前没有正在进行的工作，将当前的网址标记为起始网址
    #isCountinue用来控制循环，当没有下一页时，设置为False
    isCountinue=True
    while isCountinue:
        #获得该页面的内容
        #获得当前正在处理的（上一次没有处理完的）网页网址
        curUrl=getPreValue("cur_url")
        p("starting get page: "+curUrl)
        response = requests.get(curUrl, cookies=cookies)
        # print(response.content.decode(encoding="utf-8"))
        f = open("../Download/current.html", 'w')
        f.write(response.content.decode(encoding="utf-8"))
        f.close()

        #标记为有未完成工作
        addPreToPreTable("has_unfinished_work", True)
        content = response.content.decode(encoding="utf-8")
        #用正则表达式解析出需要存入的数据，得到一个二维数组或者字典的数组
        soup=BeautifulSoup(content,'lxml')
        #从页面中获取成员信息，写入数据库中
        storeMemInfo(soup)
        #看是否请求网页错误，如果错误，输出错信息。
        result=soup.find("div",class_="error")
        #p(result==None)
        if not result==None:
            #p(soup.prettify())
            p(result.p)
            p("!!!can not get the page!!!")
            isCountinue = False
        resultSet = soup.find_all('span')
        hasNextPage=False
        for each in resultSet:
            if each.get_text().count("下一页") > 0:
                curNode = each.parent
                nextPageUrl = preUrl + curNode.attrs["href"]
                #p("next page url is:" + getPreValue("cur_url"))
                addPreToPreTable("cur_url", nextPageUrl)
                hasNextPage=True
                addPreToPreTable("has_unfinished_work", True)
                p("has next page!")
        if not hasNextPage:
            p("it is end!")
            isCountinue = False
            addPreToPreTable("cur_url", "http://qgc.qq.com/314962432")
            addPreToPreTable("has_unfinished_work", False)


mainF()
db.close()
'''
#保存respnse的内容为html文件
def saveUrl(url,cookies):
    fileName="f"+(re.match(r'http://qgc.qq.com/(\d*)',url)).group(1)
    #print (memberNo)
    response = requests.get(url, cookies=cookies)
    # print(response.content.decode(encoding="utf-8"))
    f=open("../Download/"+fileName+".html",'w')
    f.write(response.content.decode(encoding="utf-8"))
    f.close()

def saveSoup(soup):
    f = open("../Download/error.html", 'w')
    f.write(str(soup.prettify))
    f.close()
#通过页面的内容来判断是否有下一页
def hasPageUrlNextPage(content):
    #是否有下一页字符串
    if content.count('<span>下一页</span>') > 0:
        return True
    else:
        return False

def createTable(tableName,sqlStr):
    print("start create table: "+tableName)
    cursor=db.cursor()
    cursor.execute("show tables")
    hasCreated=False
    for row in cursor:
        p(row)
        p("row.count(tableName):"+str(row.count(tableName)))
        if (row.count(tableName))>0:
            hasCreated = True
            p("Table "+tableName+" had already been created!")
            break
        p("if end")
    p("hasCreated:" + str(hasCreated))
    if not hasCreated:
        p("execute create")
        cursor.execute(sqlStr)
        p(cursor)
    cursor.close()
    p("finish create table: "+tableName)

def initTables():
    #如果preTable不存在，则创建这个表。表三列，分别是id，pre_name,pre_value
    sqlStr="create table pre_table (id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY,pre_name VARCHAR(255), pre_value VARCHAR(255))"
    createTable("pre_table",sqlStr)
    #如果表page_list_table不存在，则创建这个表。表9列。
    #分别是id, name_str,forum_id,last_r_time,last_no,href_url,href_url_hash_code,title_str,desc_str
    sqlStr="create table page_list_table (id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY,name_str VARCHAR(255),forum_id INT,last_r_time VARCHAR(255),last_no INT(8),href_url VARCHAR(255),href_url_hash_code BIGINT,title_str VARCHAR(255),desc_str VARCHAR(255))"
    createTable("page_list_table",sqlStr)
    #如果has_unfinished_work条目不存在，则创建，初始pre_value为false。
    addPreToPreTable("has_unfinished_work", False)
    addPreToPreTable("cur_url",url)





#initTables()
#flushPageListDb()
'''

