#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'S811_shawn'
__mtime__ = '2017/2/7'
# code is far away from bugs with the god animal protecting
    I love animals. They taste delicious.
              ┏┓      ┏┓
            ┏┛┻━━━┛┻┓
            ┃      ☃      ┃
            ┃  ┳┛  ┗┳  ┃
            ┃      ┻      ┃
            ┗━┓      ┏━┛
                ┃      ┗━━━┓
                ┃  神兽保佑    ┣┓
                ┃　永无BUG！   ┏┛
                ┗┓┓┏━┳┓┏┛
                  ┃┫┫  ┃┫┫
                  ┗┻┛  ┗┻┛
"""

import requests   ###requests 比urllib2简单,可以通过状态码和head对返回值进行处理
## import Cookie
import os
import sys
sys.path.append('..')
from bs4 import BeautifulSoup

from .support.cookieUtil import convertCookieStrToDict


import logging

logging.debug('This is debug message')
logging.info('This is info message')
logging.warning('This is warning message')

logging.basicConfig(level=logging.DEBUG,
                    format='%(pathname)s %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s %(process)d ',
                    datefmt='%a, %d %b %Y %H:%M:%S',
                    filename='scalersLog.log',
                    filemode='w')


# 登录之后的cookie
# cookieStr 获取方式：1.打开chrom浏览器登录 scalers 群论坛。 2.打开开发者工具 输入 document.cookie 输出的字符串即对应的qq论坛cookie
cookieStr = "msuid=ixd1pfm0igetvlpzr96czn5luixzyxhmndvgayqb; tvfe_boss_uuid=566b50d9fdc5bbce; o_cookie=4865368; pac_uid=1_4865368; 314962432_4865368_credit=%E5%8F%91%E8%A1%A8%E4%B8%BB%E9%A2%98_2; pgv_info=ssid=s3500092560; pgv_pvid=6951177248; pt2gguin=o0004865368; uin=o0004865368; skey=MeoUFQ14kC; ptisp=ctc; RK=GIduR/uLXd; ptcz=fb14c0478423d149664d299e294e3022c54d863e52c9383b486df3ce86ff88a1; rv2=80923E3506A552AB1C6B713B6FD53C4B5CEA656D4A33E0A3C0; property20=D265D79B5CA27332B4267CA67062293D42F7B52E5C6D6712D2059A1616B76EF6EA4AD19612469ACC; qq_photo_key=2bfb00689f26b0ff699b4d183e1ef50c; qqUser=%7B%22uin%22%3A4865368%2C%22nickName%22%3A%22shawn%22%7D; uniqueuid=a7badc8eca57a3ce3a1964bb58fb9ea4; security_cookiereport=1487407314; MANYOU_SESSIONID_bf895=071dfe93920f544f77a27cc67512226f"
cookies = convertCookieStrToDict(cookieStr)

##打开论坛存储网页到文件
def fetchAndStoreForumPost(start, sliceLength, mutex, rootdir):

    baseurl = "http://qgc.qq.com/309916014/t"

    ## FIXME 目录存在的还就需要删除 －－－－－－完善
    ##
    for mIndex in range(start, start + sliceLength):  # 成长会成员数

        if (mIndex > 502):
            print('larger than 502...')

        memberdir = "member" + str(mIndex)
        curDir = "/Users/shawn/eclipse_workspace_pdev_group/pdev_v1/scalerforum/src/scalers/scalersForumPost/"

        logging.info("thread execute record " + str(mIndex))
        print("thread execute record " + str(mIndex))

        if mutex.acquire(1): ## 如果不加锁会出现,member1/member02/member03/member05的情况
            if not os.path.exists(memberdir):
                os.mkdir(memberdir)
            os.chdir(memberdir)
            curDir = curDir + "/" + memberdir +"/"
            os.chdir("..")
        mutex.release()

        url = baseurl + str(mIndex)

        content = getOnePage(url, cookies)

        ## finished.md 做完成员爬取完的标识,不需要重新爬一次
        finishedFileName = curDir + 'finished.md'

        memberPostNoExistFileName = curDir + "notExist.md"
        # if os.path.isfile(finishedFileName):
        #     logging.info('member' + str(mIndex) + ' is finished,dont need to get again')
        #     ## print('member' + str(i) + ' is finished,dont need to get again')
        #     continue

        if os.path.isfile(memberPostNoExistFileName):
            logging.info('member' + str(mIndex) + ' not exist,dont need to get again')
            continue

        fetchFirstPost(baseurl, curDir, finishedFileName, mIndex, memberPostNoExistFileName)
        # fetchOneMemberPost(baseurl, curDir, finishedFileName, mIndex, memberPostNoExistFileName)
        # print('write page Ok,curDir' + curDir)
        logging.info('write member {0} ok,curDir={1}'.format(mIndex,curDir))


## 获取一个成员的帖子
def fetchFirstPost(baseurl, curDir, finishedFileName, mIndex, memberPostNotExist):
    pageIndex = 1

    fileName = curDir + 'page_' + str(pageIndex) + ".html"
    ## 文件已经导入进来的话不需要重新导入,直接跳过
    if os.path.isfile(fileName):
        return

    if pageIndex == 1:
        url = baseurl + "/" + str(mIndex)

    print('write url=' + url)
    content = getOnePage(url, cookies)
    if not isMemberPostExist(content):
        writePage("this member not exist.", pageIndex, memberPostNotExist)

    writePage(content, pageIndex, fileName)

    if not isHasNextPage(content):
        writePage("finished...", pageIndex, finishedFileName)
        # print('not hasNext,url' +url)
        logging.info('not hasNext,url' + url)
    else:
        # print('hasNext,url=' + url)
        logging.info('hasNext,url=' + url)
    pageIndex += 1



def writePage(content,i,fullName):
    f = open(fullName, 'w')
    f.write(content)
    f.close()


def getOnePage(url, cookies):
    r = requests.get(url, cookies=cookies)
    ##probelm TypeError: must be str, not bytes request  返回的是bytes么？
    ## website.encode(encoding="gb2312")
    return (r.content.decode(encoding="utf-8")) ## r.content.decode(encoding="utf-8") -> bytes 解码成 str


def isHasNextPage(content):
    if content.count('<span>下一页</span>') > 0:
        return True
    else:
        return False

def isMemberPostExist(content):
    soup = BeautifulSoup(content,'html.parser',from_encoding='utf-8')
    links = soup.find_all('div',attrs={"class":"error"})
    # if links is None:
    #     print("error class not exist=")
    #     return
    if not links:
        print("error class is empty.")
        return True
    for link in links:
        b = (link.text).find('主题不存在或已被删除')  ## (link.text).find('主题不存在') 返回int型 Can't convert 'int' object to str implicitly
        print("link.text=" + str(b))
        if (b == 1):
            return False
            ## url = 'http://qgc.qq.com/309916014/t/200' 没有error
            ## 2016年12月31日 测试场景:testIsMemberExist