# -*- coding: utf-8 -*-
import re
import os
import requests
import time
import sys
from bs4 import BeautifulSoup


class SisSpider(object):

    url_root = 'http://68.168.16.147/forum/'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'}
    areas = {'xinv': 'Western picture galleries | 西洋靓女骚妹',
             'katong': 'Animation Art Gallery  | 动漫卡通游戏贴图区',
             'dongfang': 'Asian Beauty Collection  | 东方靓女集中营'}
    novels = {'wenxue': '文学作者区',
              'yuanchuang': '原创人生区'}

    def __init__(self):
        self.session = requests.Session()
        if sys.platform == "win32":
            self.download_root = r'D:\temp\sis'
        else:
            self.download_root = r'/home/pi/sis'
        self.timeout = 5
        self.login()

    def login(self):
        url = os.path.join(self.url_root, 'logging.php?action=login')
        data = {'62838ebfea47071969cead9d87a2f1f7': 'style7en',
                'c95b1308bda0a3589f68f75d23b15938': 'zhangjia',
                'cookietime': '2592000',
                'formhash': 'e6f8a3af',
                'loginfield': 'username',
                'loginsubmit': 'true',
                'questionid': '0'}
        self.session.post(url, data=data, headers=self.headers, timeout=self.timeout)

    def save_all_picture(self, page_num=1):
        # 保存area板块下所有的图片，第page_num页之前
        for area in self.areas:
            # 通过板块定位到板块的链接
            url1 = os.path.join(self.url_root, 'index.php')
            try:
                r1 = self.session.get(url1, headers=self.headers, timeout=self.timeout)
            except:
                print '[connect %s fail...]' % url1
                continue

            soup = BeautifulSoup(r1.content, 'html.parser', from_encoding="gbk")
            tag = soup.find('a', text=self.areas[area])
            link1 = tag['href']
            # print link1 # forum-64-1.html

            # 在此板块找前N页的帖子地址
            for i in xrange(1, page_num + 1):
                link2 = os.path.join(self.url_root, re.sub(r'(\d+).html', str(i) + '.html', link1, 1))
                # print link2  # http://68.168.16.147/forum/forum-64-3.html

                # 根据帖子链接得到图片
                try:
                    r2 = self.session.get(link2, headers=self.headers, timeout=self.timeout)
                except:
                    print '[connect %s fail...]' % link2
                    continue
                soup = BeautifulSoup(r2.content, 'html.parser', from_encoding="gbk")
                tz_tag = soup.find_all('a', text=re.compile(r'\[\d+P\]'))
                for each in tz_tag:
                    url2 = os.path.join(self.url_root, each['href'])
                    self.save_picture(each.text, url2, area)
                    # print url2, each.text  # http://68.168.16.147/forum/thread-9466752-1-1.html 美若天仙[26P]

    def save_picture(self, title, url, area):
        # 通过帖子地址保存帖子中所有的图片
        directory = os.path.join(self.download_root, area, title)
        # print directory
        if os.path.isdir(directory):
            return
        os.makedirs(directory)

        try:
            r1 = self.session.get(url, headers=self.headers, timeout=self.timeout)
        except:
            print '[connect %s fail...]' % url
            return

        soup = BeautifulSoup(r1.content, 'html.parser', from_encoding="gbk")
        img_tag = soup.find_all('img', src=re.compile(r'http.*'))
        for index, each in enumerate(img_tag):
            img_url = each['src']
            try:
                r2 = self.session.get(img_url, headers=self.headers, timeout=self.timeout)
                filename = os.path.join(directory, str(index) + os.path.splitext(img_url)[-1])
                print time.strftime('%Y-%m-%d %H:%M:%S'), filename
                with open(filename, 'wb') as img_file:
                    img_file.write(r2.content)
            except:
                print '[connect %s fail...]' % img_url
                continue

    def save_all_novel(self, page_num=1):
        # 保存area板块下所有的内容，第page_num页之前
        for e in self.novels:
            # 通过板块定位到板块的链接
            url1 = os.path.join(self.url_root, 'index.php')
            try:
                r1 = self.session.get(url1, headers=self.headers, timeout=self.timeout)
            except:
                print '[connect %s fail...]' % url1
                continue

            soup = BeautifulSoup(r1.content, 'html.parser', from_encoding="gbk")
            tag = soup.find('a', text=self.novels[e])
            link1 = tag['href']
            # print link1  # forum-383-1.html

            # 在此板块找前N页的帖子地址
            for i in xrange(1, page_num + 1):
                link2 = os.path.join(self.url_root, re.sub(r'(\d+).html', str(i) + '.html', link1, 1))
                # print link2  # http://68.168.16.147/forum/forum-322-2.html

                # 根据帖子链接得到文字
                try:
                    r2 = self.session.get(link2, headers=self.headers, timeout=self.timeout)
                except:
                    print '[connect %s fail...]' % link2

                soup = BeautifulSoup(r2.content, 'html.parser', from_encoding="gbk")
                tz_tag = soup.find_all('a', text=re.compile(ur'^【'))
                for each in tz_tag:
                    url2 = os.path.join(self.url_root, each['href'])
                    self.save_novel(each.text, url2, e)
                    # print url2, each.text  # http://68.168.16.147/forum/thread-9474782-1-3.html 【他是谁】5-6章

    def save_novel(self, title, url, area):
        # 通过帖子地址保存帖子中所有的内容
        directory = os.path.join(self.download_root, self.novels[area].decode('utf-8'))
        if not os.path.exists(directory):
            os.makedirs(directory)

        try:
            r1 = self.session.get(url, headers=self.headers, timeout=self.timeout)
        except:
            print '[connect %s fail...]' % url
            return

        soup = BeautifulSoup(r1.content, 'html.parser', from_encoding="gbk")
        # tag = soup.find('div', class_="t_msgfont", style="font-size:14pt")
        filename = os.path.join(directory, title + '.txt')
        print time.strftime('%Y-%m-%d %H:%M:%S'), filename
        with open(filename, 'wb') as fobj:
            fobj.write(soup.text.encode('utf-8'))

if __name__ == '__main__':
    sp = SisSpider()
    sp.save_all_picture(1)
    sp.save_all_novel(1)
