#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '根据成长帖地址抓取成长帖所有内容并结构化'
__author__ = 's125_nanoorchis'
__mtime__ = '2017/3/19'
"""
import requests
import re
import pymysql
import os
import chardet
import time
import urllib.request
from bs4 import BeautifulSoup
db = pymysql.connect("localhost", "test20170307", "test20170307", "scalers_forum_db",charset="utf8")

def p(str):
    print(str)
#网址前缀
preUrl="http://qgc.qq.com"
#cookieStr="pgv_info=ssid=s7697820000; pgv_pvid=9846196600; pt2gguin=o0569128050; uin=o0569128050; skey=Ms20afuY7t; ptisp=ctc; RK=NF2y81Obdf; ptcz=edc743d195123fdffc86a793a5107b3de11370b9383a51a08cefb5b7faff6b2c; pac_uid=1_569128050; MANYOU_SESSIONID_bf895=9fb9094348e51eccb2410816f41f3d7e; qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; security_cookiereport=1489149578"
cookieStr="sd_userid=46611477323355677; sd_cookie_crttime=1477323355677; tvfe_boss_uuid=4088ef056bb05328; pac_uid=1_569128050; eas_sid=N1e4j894T6l8q7a6q4x0H2j4N5; RK=fF26s1O6dd; pgv_pvi=751090688; pgv_si=s3762871296; ptui_loginuin=569128050; pgv_info=ssid=s5681652212; pgv_pvid=4670341722; o_cookie=569128050; pt2gguin=o0569128050; uin=o0569128050; skey=MccPDuxvnU; ptisp=ctc; ptcz=e21326411328d62054792d62165ca96ae63c5188c3b5131f3d8a8451a6ae0d70; MANYOU_SESSIONID_bf895=48525e22843a9682b72507af97ddff4e; qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; security_cookiereport=1489926546"
cookieStr="sd_userid=46611477323355677; sd_cookie_crttime=1477323355677; tvfe_boss_uuid=4088ef056bb05328; pac_uid=1_569128050; eas_sid=N1e4j894T6l8q7a6q4x0H2j4N5; RK=fF26s1O6dd; pgv_pvi=751090688; pgv_si=s3762871296; ptui_loginuin=569128050; uniqueuid=c0ff22fd9a923c3b48f899b8d5f7daf4; pgv_info=ssid=s5681652212; pgv_pvid=4670341722; o_cookie=569128050; pt2gguin=o0569128050; uin=o0569128050; skey=MAxCg7T4r6; ptisp=ctc; ptcz=e21326411328d62054792d62165ca96ae63c5188c3b5131f3d8a8451a6ae0d70; MANYOU_SESSIONID_bf895=48525e22843a9682b72507af97ddff4e; qqUser=%7B%22uin%22%3A569128050%2C%22nickName%22%3A%22%5Cu65ad%5Cu6865%5Cu6b8b%5Cu96ea%22%7D; security_cookiereport=1490072564"
#将字符串形式的cookie转换成字典形式的cookies
def convertCookieStrToDict(cookieStr):
    cookies={}
    for key_value_pairs in cookieStr.split("; "):
        key_value_pair=key_value_pairs.split("=")
        cookies[key_value_pair[0]]=key_value_pair[1]
    return cookies

cookies=convertCookieStrToDict(cookieStr)

'''
#创建表
CREATE TABLE `scalers_table` (
  `id` mediumint(9) NOT NULL AUTO_INCREMENT,
  `title` varchar(255) DEFAULT NULL,
  `url` varchar(255) DEFAULT NULL,
  `parent_url` varchar(255) DEFAULT NULL,
  `level` varchar(255) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=677 DEFAULT CHARSET=utf8
'''

#判断某个网址是否在数据库中
def isUrlInTable(url):
    cursor=db.cursor()
    cursor.execute("select * from scalers_table where url=%s",url)
    if cursor.rowcount>0:
        cursor.close()
        return True
    else:
        cursor.close()
        return False
#获得url_level_0的title
def get_title_for_url_level_0(url):
    if False:
        return ("url: " + url + "is wrong!")
    else:
        response = requests.get(url, cookies=cookies)
        p("status_code:" + str(response.status_code))
        content_str = response.content.decode(chardet.detect(response.content)["encoding"], "ignore")
        soup = BeautifulSoup(content_str, "lxml")
        title = soup.title.text.lstrip().rstrip().replace("/", "-")
        soup.clear()
        return title

#将url_level_0存入数据库
def get_url_level_0():
    url_level_0 = "http://qgc.qq.com/314962432"
    title = get_title_for_url_level_0(url_level_0)
    cursor = db.cursor()
    #如果网址不在数据库中则插入s
    if not isUrlInTable(url_level_0):
        cursor.execute("insert into scalers_table (title,url,level) values(%s,%s,%s)",(title,url_level_0,0))
    else:
        cursor.execute("update scalers_table set title=%s,level=%s where url=%s", (title, 0, url_level_0))
    db.commit()
    cursor.close()

#从数据库中获得url_level_0
def get_url_level_0_from_table():
    cursor=db.cursor()
    cursor.execute("select url from scalers_table where level=0")
    url_levle_0 = cursor.fetchone()[0]
    cursor.close()
    return url_levle_0

def get_title_for_url_level_1(soup):
    title = soup.title.text.lstrip().rstrip().replace("/", "-")
    page=soup.find("strong",class_="current").text.lstrip().rstrip()
    title=title+"第"+str(page)+"页"
    return title

def save_url_in_table(cur_url,title,level):
    cursor=db.cursor()
    cursor = db.cursor()
    # 如果网址不在数据库中则插入s
    if not isUrlInTable(cur_url):
        cursor.execute("insert into scalers_table (title,url,level) values(%s,%s,%s)", (title, cur_url, level))
    else:
        cursor.execute("update scalers_table set title=%s,level=%s where url=%s", (title, level, cur_url))
    db.commit()
    cursor.close()

#获取下页的网址
def get_next_url_from_url_level_1(soup):
    next_url = preUrl+soup.find("strong", class_="current").next_sibling.attrs["href"]
    return next_url

def get_soup_for_url(url):
    response = requests.get(url, cookies=cookies)
    p("status_code:" + str(response.status_code))
    content_str = response.content.decode(chardet.detect(response.content)["encoding"], "ignore")
    soup = BeautifulSoup(content_str, "lxml")
    p("title: " + soup.title.text)
    return soup

def get_url_level_1():
    #当前正在处理的url
    cur_url="http://qgc.qq.com/314962432"
    while True:
        #获取url_level_0的标题，为title+第几页
        soup=get_soup_for_url(cur_url)
        title = get_title_for_url_level_1(soup)
        # 保存cur_url
        save_url_in_table(cur_url,title,1)
        # 从cur_url中获取下页的网址next_url
        next_url=get_next_url_from_url_level_1(soup)
        soup.clear()
        p(title)
        p(next_url)
        #如果没有下一页，就退出循环
        #将下一页的网址置为cur_url
        cur_url=next_url
        #没有下页时出错则退出循环

def get_url_level_2():
    #循环level1，依次读取每个人的网页地址
    cursor=db.cursor()
    cursor.execute("select url from scalers_table where level=1")
    for each_url_level_1 in cursor.fetchall():
        url_level_1=each_url_level_1[0]
        soup = get_soup_for_url(url_level_1)
        resultSet = soup.find_all('div', class_="feed clearfix")
        # 遍历得到的数组，将每条数据存入表page_list_table中
        for each in resultSet:
            # 网址，标题
            dict = each.a.attrs
            url_level_2 = preUrl + dict['href']
            p(url_level_2)
            title=dict['title'].lstrip().rstrip()
            save_url_in_table(url_level_2, title, 2)
        soup.clear()

def save_url_level_2():
    cursor=db.cursor()
    cursor.execute("select url from scalers_table where level=2 order by url")

    for each_url in cursor.fetchall():
        id=re.search("/t/(\d+)",each_url[0]).group(1)
        os.chdir("../Download")
        dir = "member" + str(id)
        if not os.path.exists(dir):
            os.mkdir(dir)
        os.chdir("./"+dir)
        p("current dir is: "+dir)
        cur_url=each_url[0]
        while True:
            response = requests.get(cur_url, cookies=cookies)
            content_str=response.content.decode(encoding="utf-8")
            soup = BeautifulSoup(content_str, "lxml")
            p("current page title is: "+soup.title.text.lstrip().rstrip())
            if soup.find("strong",class_="current")==None:
                page_no=1
            else:
                page_no=soup.find("strong",class_="current").text.lstrip().rstrip()
            file_name="page_"+str(page_no)+".html"
            f=open(file_name,'w')
            p("start save file: "+file_name)
            f.write(soup.prettify())
            f.close
            if soup.find("span", string=re.compile(".*下一页.*"))==None:
                break
            else:
                cur_url=preUrl+soup.find("span", string=re.compile(".*下一页.*")).parent.attrs["href"]
            soup.clear()
            p("finish save file: " + file_name)
        os.chdir("../../test")
    cursor.close()

#下载所有的成长帖，网络不好的时候慎用。暂时没有记录当前进度。
def downForumPage():
    #get_url_level_0()
    #get_url_level_1()
    #get_url_level_2()
    save_url_level_2()

#downForumPage()
#接下来开始尝试多线程部分
