#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import re
import requests
from bs4 import BeautifulSoup

import logging
logging.basicConfig(level=logging.INFO)


def cookieStr_To_Dict(cookieStr):
    """
    将 document.cookie 获取的 cookie 字符串转换为字典形式 {cookie_name:cookie_value}
    :param: cookieStr
    :return: cookieDict
    """
    # 分割为新的列表，去掉'; '
    cookieStr = cookieStr.split('; ')
    # 定义正则式，将=两边分开，存为一个tuple
    name_value = re.compile(r'^(.*?)=(.*?)$')
    # 将每一个tuple的两个值转为key和value，存入dict, key为cookie的名字，value为cookie的值
    cookieDict = {}
    for item in cookieStr:
        cookieDict[name_value.match(item).groups()[0]] = name_value.match(item).groups()[1]
 
    return cookieDict

def get_pageUrls_of_aMember(idMember, myCookies):
    """
    获取一个会员的帖子的全部页面的 url list
    :param idMember: 成员的会员号，即页面地址id，我的是555
    :param myCookies: 我的cookies 字典形式
    :return url list: 
    """
    base_url = r'http://qgc.qq.com'
    member_url = base_url + r'/314962432/t/' + str(idMember)
    response = requests.get(member_url, cookies=myCookies)

    # "html.parser" 是 Python标准库 的 HTML 解析器
    # 对获取到的页面内容(response.content)进行解析
    soup = BeautifulSoup(response.content, "html.parser")
    
    # 找到名为class="page"的<div>标记, 在<div>内迭代所有<a>标记
    # 每个<a>里的<href>标记，即是需要找到的所有网页链接
    # 或直接找到 class=c_tx 的 <a> tag
    # page_tag = soup.find_all(lambda tag: tag.name == 'div' and tag.get('class') == ['page'])
    a_tag = soup.find_all(lambda tag: tag.name == 'a' and tag.get('class') == ['c_tx'])
    # logging.info(a_tag)

    # urls 为会员的帖子的全部页面的链接列表
    urls = [member_url]

    for a in a_tag:
        url = base_url + a.get('href')
        urls.append(url)
    
    return urls

def get_pageContent_of_aMember(idMember, pageUrls, myCookies):
    """
    获取会员帖子全部页面的内容，并保存文件(先创建会员文件夹，再存放文件)
    :param idMember: 会员号
    :param pageUrls: 会员的帖子的全部页面的 url list, 函数get_pageUrls_of_aMember()的返回值
    :param myCookies: 我的cookie 字典形式
    """
    dirname = 'member' + '[' + 'S' + str(idMember) + ']'
    os.mkdir(dirname)
    name = dirname + '/' + '[' + 'S' + str(idMember) + ']' + '-page-'
    html = ""
    
    for index in range(len(pageUrls)):
        response = requests.get(pageUrls[index], cookies=myCookies)
        filename = name + str(index) + '.html'
        with open(filename, 'wb') as fd:
            for chunk in response.iter_content(chunk_size=128):
                fd.write(chunk)
    
def get_members(nMember, myCookies):
    """
    获取有编号（发布过帖子）的成员的帖子链接，如 http://qgc.qq.com/314962432/t/555
    :param nMember: 会员数量
    :param myCookies: 我的cookie 字典形式
    """
    base_url = r'http://qgc.qq.com' + r'/314962432/t/'
    member_url = []
    for n in range(9, nMember): 
        temp_url = base_url + str(n)
        response = requests.get(temp_url, cookies=myCookies)
        if response.status_code == requests.codes.ok:
            member_url += temp_url
            # logging.info(temp_url)

    return member_url
        
if __name__=='__main__':
    myCookieStr = r"o_cookie=2822650346; pgv_info=pgvReferrer=&ssid=s1621300561; tvfe_boss_uuid=303f54fad5b0ed1a; pgv_pvid=8430805614; uniqueuid=5b0ff44903734ca1ebfe7cd0fecb4c8a; ptcz=d4d12a7cbcfb184a6659f5cb7a650a931914d0d013ece87ff6ee62679bc85dc6; pt2gguin=o2822650346; uin=o2822650346; skey=@AXvk7gusS; qqUser=%7B%22uin%22%3A2822650346%2C%22nickName%22%3A%22%5Cu51ac%5Cu5929%5Cu7684%5Cu70b9%5Cu70b9%22%7D; MANYOU_SESSIONID_bf895=27c1dd26b435653b66b9a1636cd90fc2; security_cookiereport=1488670874"
    myCookieDict = cookieStr_To_Dict(myCookieStr)

    for key, value in myCookieDict.items():
        logging.info('%s = %s' % (key, value))

    # get_members(1447, myCookieDict)



    # page_urls = get_pageUrls_of_aMember(74, myCookieDict)
    # # logging.info(page_urls)
    # get_pageContent_of_aMember(74, page_urls, myCookieDict)

    # html = html.encode("utf-8")
    # name = r's555-1.html'
    # with open(name, 'wb') as f:
    #     f.write(html)
    
