#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 's125_nanoorchis'
__mtime__ = '2017/3/8'
"""

#cookieStr1="sd_userid=46611477323355677; sd_cookie_crttime=1477323355677; tvfe_boss_uuid=4088ef056bb05328; pac_uid=1_569128050; eas_sid=N1e4j894T6l8q7a6q4x0H2j4N5; RK=fF26s1O6dd; pgv_pvi=751090688; pgv_si=s3762871296; ptui_loginuin=569128050; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; pgv_info=ssid=s3206305046&pgvReferrer=; pgv_pvid=6689437934; o_cookie=569128050; pt2gguin=o0569128050; uin=o0569128050; skey=MAOlmO4FpC; ptisp=ctc; ptcz=e21326411328d62054792d62165ca96ae63c5188c3b5131f3d8a8451a6ae0d70; MANYOU_SESSIONID_bf895=5173a011aaba08f219450ef1fb2e9e5f;qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; security_cookiereport=1488638674"
cookieStr="sd_userid=46611477323355677; sd_cookie_crttime=1477323355677; tvfe_boss_uuid=4088ef056bb05328; pac_uid=1_569128050; eas_sid=N1e4j894T6l8q7a6q4x0H2j4N5; RK=fF26s1O6dd; pgv_pvi=751090688; pgv_si=s3762871296; ptui_loginuin=569128050; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; pgv_info=ssid=s3206305046&pgvReferrer=; pgv_pvid=6689437934; o_cookie=569128050; pt2gguin=o0569128050; uin=o0569128050; skey=Me9efoRrBb; ptisp=ctc; ptcz=e21326411328d62054792d62165ca96ae63c5188c3b5131f3d8a8451a6ae0d70; MANYOU_SESSIONID_bf895=5173a011aaba08f219450ef1fb2e9e5f; qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; security_cookiereport=1488973870"
import requests
import re
import pymysql
from bs4 import BeautifulSoup
db = pymysql.connect("localhost", "test20170307", "test20170307", "scalers_forum_db")
#将字符串形式的cookie转换成字典形式的cookies
def convertCookieStrToDict(cookieStr):
    cookies={}
    for key_value_pairs in cookieStr.split("; "):
        key_value_pair=key_value_pairs.split("=")
        cookies[key_value_pair[0]]=key_value_pair[1]
    return cookies

cookies=convertCookieStrToDict(cookieStr)
#起始网址
url="http://qgc.qq.com/314962432"

#保存respnse的内容为html文件
def saveUrl(url):
    fileName="f"+(re.match(r'http://qgc.qq.com/(\d*)',url)).group(1)
    #print (memberNo)
    response = requests.get(url, cookies=cookies)
    # print(response.content.decode(encoding="utf-8"))
    f=open("../Download/"+fileName+".html",'w')
    f.write(response.content.decode(encoding="utf-8"))
    f.close()

#通过页面的内容来判断是否有下一页
def hasPageUrlNextPage(content):
    #是否有下一页字符串
    if content.count('<span>下一页</span>') > 0:
        return True
    else:
        return False
#更新PageListDb，一个page包含多个成员的个人页面网址
def flushPageListDb():
    #先判断是否有未完成的工作，是的话则curUrl为上一次没有完成更新的url
    #不是的话，则curUrl为url。
    hasUnFinishedWork=getPreValue("has_unfinished_work")
    p("hasUnFinishedWork:"+str(hasUnFinishedWork))
    if hasUnFinishedWork:
        curUrl=url
    else:
        curUrl=getPreValue("cur_url")
    p("cur_url:"+getPreValue("cur_url"))
    countinue=True
    while countinue:
        #获得该页面的内容
        response = requests.get(url, cookies=cookies)
        content = response.content.decode(encoding="utf-8")
        #用正则表达式解析出需要存入的数据，得到一个二维数组或者字典的数组
        #遍历得到的数组，将每条数据存入表page_list_table中
        #如果有下一页，将下一页的网址写入cur_url中，
        #如果没有下一页，则将countinue置为False

def getPreValue(preName):
    cursor = db.cursor()
    cursor.execute("select pre_value from pre_table where pre_name = %s",preName)
    result=cursor.fetchone()
    p("result:"+str(result))
    results=str(result).split("'")
    result=results[1]
    p("result:"+result)
    return result

def createTable(tableName,sqlStr):
    print("start create table: "+tableName)
    cursor=db.cursor()
    cursor.execute("show tables")
    hasCreated=False
    for row in cursor:
        p(row)
        #这段没有执行，原因？因为参数输入错误
        print("row.count(tableName):"+str(row.count(tableName)))
        if (row.count(tableName))>0:
            hasCreated = True
            p("Table "+tableName+" had already been created!")
            break
        p("if end")
    p("hasCreated:" + str(hasCreated))
    if not hasCreated:
        p("execute create")
        cursor.execute(sqlStr)
        p(cursor)
    cursor.close()
    p("finish create table: "+tableName)

def initTables():
    #如果preTable不存在，则创建这个表。表三列，分别是id，pre_name,pre_value
    sqlStr="create table pre_table (id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY,pre_name VARCHAR(255), pre_value VARCHAR(255))"
    createTable("pre_table",sqlStr)
    #如果has_unfinished_work条目不存在，则创建，初始pre_value为false。
    addPreToPreTable("has_unfinished_work", False)
    addPreToPreTable("cur_url",url)

def addPreToPreTable(preName,preValue):
    cursor=db.cursor()
    sqlStr="select pre_name,pre_value from pre_table where pre_name = %s"
    cursor.execute(sqlStr,preName)
    if cursor.rowcount==0:
        # 如果has_unfinished_work条目不存在，则创建，初始pre_value为false。
        instrtIntoPreTableSql = "INSERT INTO `pre_table` (`pre_name`, `pre_value`) VALUES (%s, %s)"
        cursor = db.cursor()
        cursor.execute(instrtIntoPreTableSql, (preName, preValue))
        db.commit()
        cursor.close()
        p("Add " + preName + " successfully!")
    else:
        p(preName + " was in table.")
    p("cursor.rowcount:"+str(cursor.rowcount))
    cursor.close()


def p(str):
    print(str)

def soupTest():
    #先根据本地文件构造一个soup对象。
    soup=BeautifulSoup(open("f314962432.html"),"lxml")
    #看如何从soup中得到自己想要的信息。
    #输出soup的全部内容
    #print(soup.prettify())
    #需要在一个在的div中进行循环，然后从每个小的div中选取相应的信息。
    #soup后直接跟tag名，输出第一个tag的所有内容。
    # p(soup.head)
    #每个tag有两个重要属性，name和attrs，name是字符串，attrs是字典。
    #获得标签内部的文字用string
    #显示某对象的类型
    #p(type(soup.head.name))
    resultSet=soup.find_all('div',class_="feed clearfix")
    #得到的结果是一个集合
    #p(type(result))
    for each in resultSet:
        #集合的子元素是一个tag
        #p(type(each))
        lastRTime=each.find("li",title="最后回复时间")
        #p(type(lastRTime))
        #for key in lastRTime.attrs:
        #    p(key+":"+lastRTime.attrs[key])
        p("最后回复时间："+lastRTime.get_text())
        lastRNo = each.find("li", title="回复数")
        p("回复数：" + lastRNo.get_text())
        #lastRTime是一个tag对象，为什么不能用string输出其内容呢？
        dict=each.a.attrs
        hrefUrl="http://qgc.qq.com"+dict['href']
        p("hrefUrl:"+hrefUrl)
        titleStr=dict['title']
        p("titleStr:"+titleStr)
        descStr=each.dd.get_text()
        p("descStr:"+descStr)
        nameStr = ""
        if titleStr.count("]")>0:
            nameList=titleStr.split("]")
            #p(nameList[0])
            if nameList[1].count("的")>0:
                nameStr=nameList[1].split("的")[0]
        p("nameStr:"+nameStr)
        forumId=re.match("http://qgc.qq.com/314962432/t/(\d*)",hrefUrl).group(1)
        p("forumId:"+str(forumId))
    #获取下一页的地址
        #继续

#testDb()
#initTables()
#flushPageListDb()
#saveUrl(url)
soupTest()

def soupTest1():
    text='''<div class="feed clearfix">
            <ol>
                <li title="最后回复时间"><img src="http://dzqun.gtimg.cn/plaza/images/icon_time.png" align="absmiddle"/> 17:55</li>
                <li title="回复数"><img src="http://dzqun.gtimg.cn/plaza/images/icon_reply.png" align="absmiddle"/> 48</li>
            </ol>
                            <p><img src="http://shp.qlogo.cn/qgc_avatar/0005Pr5v0/0005Pr5v0/50" alt="[S436]芝士就是力量-待" title="[S436]芝士就是力量-待"/></p>
            <dl>
                <dt><a href="/314962432/t/436" title="[A1702]芝士就是力量的2017持续能力训练记录-20170228更新"> [A1702]芝士就是力量的2017持续能力训练记录-20170228更新</a></dt>
                <dd>0.个人基本情况介绍     事项      请填写内容        称呼       芝士就是力量      </dd>
                                </dl>
        </div>'''
    soup=BeautifulSoup(text,"lxml")
    lastRTime = soup.find("li", title="最后回复时间")
    p(type(lastRTime))
    p(type(lastRTime.get_text()))
    p(lastRTime.get_text())

#soupTest1()