#! /usr/bin/env python 
# -*- coding: utf-8 -*-

import urllib2
import urllib
import cookielib
import socket
import socks
import re
import sys
import os
import time
import ConfigParser

#for setting
g_location = "./"
g_name = None
g_password = None

#for filter
g_included_tag = []
g_excluded_tag = []

#for socks5
g_proxy_ip = None
g_proxy_port = None

g_prefix = "http://www.pixiv.net"
id_list = []


def login_pixiv():
    cookieJar = cookielib.MozillaCookieJar('cookie.txt')
    #cookieJar = cookielib.CookieJar('cookie.txt')
    global opener
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))

    data={
        "mode":"login",
        "pixiv_id":g_name,
        "pass":g_password
    }
    post_data = urllib.urlencode(data)

    headers ={
        "Host": "www.pixiv.net",
        "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0",
        "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Referer":"http://www.pixiv.net/novel/show.php?id=3669707"
    }

    try:
        if g_proxy_ip != None and g_proxy_port != None:
            socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, g_proxy_ip, g_proxy_port)
            socket.socket = socks.socksocket

        req = urllib2.Request('http://www.pixiv.net/login.php', post_data, headers)
        result = opener.open(req)
        cookieJar.save(ignore_discard=True)
    except Exception,e:
        print e


def get_page(web_url):
    tried = 0
    connected = False

    while not connected:
        try:
            print "get page %s" % web_url
            u = opener.open(web_url, timeout=60.00)
            buffer = u.read()
            connected = True

        except urllib2.URLError, e:
            print "bad URL"
            return None

        except socket.timeout, e:
            print "socket timeout"
            tried += 1
            if tried > 4:
                sys.exit(1)
            time.sleep(30)

        except:
            print "unknown error"
            if tried > 2:
                sys.exit(1)
            else:
                tried += 1
            time.sleep(30)
            #try:
                #u = urllib2.urlopen(web_url)
            #except Exception,e:
                #print e
            #sys.exit(1)

    return buffer


def save_to_txt(start_id, end_id, main_title, series_title, text):
    title = main_title.replace('/', ' ')
    title = title.replace('\\', ' ')
    title = title.replace(':', ' ')

    if start_id != end_id:
        file_name = g_location + start_id + "_" + end_id + " - " + title + ".txt"
    else:
        file_name = g_location + start_id + " - " + title + ".txt"

    try:
        if start_id != end_id:
            handle = open(file_name.decode('UTF-8'), 'a')
        else:
            handle = open(file_name.decode('UTF-8'), 'w')
    except IOError, inst:
        print "        Create file %s failed." % (file_name)
        print '        Cause: ', inst.errno, inst.strerror
        return

    try:
        head = "[" + series_title + "]\n" + "====================\n"
        handle.write(head.decode('UTF-8').encode('GB18030', 'ignore'))
        handle.write(text.decode('UTF-8').encode('GB18030', 'ignore'))
        handle.write("\n\n\n\n\n")
    except IOError:
        print "        Write %s failed." % (series_title)

    handle.close()


def process_text_page(buf):
    to_continue = False

    start = buf.find("<span class=\"tags-container\"><ul class=\"tags\">")
    if start < 0:
        print "Cannot find tag list start in page."
        return (None, None)

    tag_buf = buf[start:]
    end = tag_buf.find("</ul></span>")
    if end < 0:
        print "Cannot find tag list end in page."
        return (None, None)

    tag_buf = tag_buf[:end]
    for item in g_included_tag:
        if tag_buf.find(item) > 0:
            to_continue = True
            break

    for item in g_excluded_tag:
        if tag_buf.find(item) >= 0:
            to_continue = False
            break

    if to_continue != True:
        print "Tag does not match."
        return (None, None)

    start = buf.find("<title>")
    if start < 0:
        return (None, None)
    end = buf.find("</title>")
    title_buf = buf[start : end]
    end = title_buf.rfind("/")
    name = title_buf[7 : end]

    start = buf.find("id=\"novel_text\">")
    if start < 0:
        return (None, None)
    else:
        buf = buf[start: -1]
    end = buf.find("</textarea>")
    text = buf[16 : end]

    return (name, text)


def process_novel_page(buf_id, buf):
    start = buf.find("<ul class=\"type-series\">")
    if start < 0:
        series = 0
    else:
        series = 1
        end = buf.find("<li style=\"list-style-type:none;", start)
        buf_tmp = buf[start+7 : end]
        series_list = buf_tmp.split("<a href=\"")

    series_novel_list = []

    (main_name, text) = process_text_page(buf)
    if main_name is None:
        return

    if series == 0:
        print "    Start to save %s - %s" % (buf_id, main_name)
        save_to_txt(buf_id, buf_id, main_name, main_name, text)
        return

    else:
        series_num = len(series_list)
        if series_num < 2:
            print "    Start to save %s - %s" % (buf_id, main_name)
            save_to_txt(buf_id, buf_id, main_name, main_name, text)
            return

        print "    Start to process %s - %s" % (buf_id, main_name)
        print "        Series detected. Continue to parse..."

        for i in range(1, series_num):
            item = series_list[i]
            end = item.find("&amp;uarea=series")
            series_name = item[:end]
            series_url = g_prefix + series_name

            id_start = series_url.find("id=")
            id_num =  series_url[id_start+3 : len(series_url)]

            buf = get_page(series_url)
            (name, text) = process_text_page(buf)
            if name is None:
                continue

            try:
                id_list.index(id_num)
            except:
                id_list.append(id_num)

            v = {'novel_id': id_num, 'novel_name': name, 'novel_text': text}
            series_novel_list.append(v)

        series_novel_list = sorted(series_novel_list, key=lambda d:int(d['novel_id']))

        earlist = series_novel_list[0]
        latest = series_novel_list[-1]

        #delete earlist id file in diretory
        for file_name in os.listdir(g_location):
            find = file_name.find(earlist['novel_id'])
            if find >= 0:
                print "        Delete old file: %s" % (g_location+file_name)
                os.remove(g_location+file_name)
                break

        for cur in series_novel_list:
            print "            Start to save series %s - %s" % (cur['novel_id'], cur['novel_name'])
            save_to_txt(earlist['novel_id'], latest['novel_id'], earlist['novel_name'], cur['novel_name'], cur['novel_text'])


def process_member_main_page(buf):
    start = buf.find("<title>")
    if start < 0:
        return None

    end = buf.find("</title>")
    title_buf = buf[start : end]

    end = title_buf.rfind('」')
    if end > 0:
        name = title_buf[len("<title>「") : end]

        split = name.find('/')
        if split > 0:
            name = name[0 : split]

        split = name.find('　')
        if split > 0:
            name = name[0 : split]

        split = name.find('@')
        if split > 0:
            name = name[0 : split]
    else:
        name = None

    return name


def process_member_novel_page(buf):
    none = buf.find("class=\"novel-item\"")
    if none > 0:
        return (0, None)

    start = buf.find("class=\"_novel-item\"")
    if start < 0:
        return (0, None)

    buf = buf[start:len(buf)]

    novel_list_per_page = buf.split("<div class=\"novel-contents\">")

    num_of_id = 0
    list_of_id = []

    for i in range(1, len(novel_list_per_page)):
        one_item = novel_list_per_page[i]

        to_continue = False
        for item in g_included_tag:
            tag = one_item.find(item)
            if tag >= 0:
                to_continue = True
                break
        for item in g_excluded_tag:
            tag = one_item.find(item)
            if tag >= 0:
                to_continue = False
                break

        if to_continue != True:
            continue

        start = one_item.find("<a href=\"/novel/show.php?id=")
        if start >= 0:
            one_item = one_item[start + len("<a href=\"/novel/show.php?id="): len(one_item)]
            end = one_item.find("\">")
            if end > 0:
                id_num = one_item[0 : end]
                list_of_id.append(id_num)
                num_of_id = num_of_id + 1

    return (num_of_id, list_of_id)


def load_config():
    try:
        config = ConfigParser.SafeConfigParser()
        config.read("config.ini")

        global g_name, g_password, g_location
        g_name = config.get("login", "name")
        g_password = config.get("login", "password")
        g_location = config.get("path", "directory")
        print "name=%s pwd=%s path=%s" % (g_name, g_password, g_location)

        global g_included_tag, g_excluded_tag
        included_list = config.get("filter", "included")
        if included_list != "":
            g_included_tag = included_list.split('|')
            print_str = "included_tag="
            for item in g_included_tag:
                print_str += "\"" + item + "\""
                print_str += " "
            print print_str

        excluded_list = config.get("filter", "excluded")
        if excluded_list != "":
            g_excluded_tag = excluded_list.split('|')
            print_str = "excluded_tag="
            for item in g_excluded_tag:
                print_str += "\"" + item + "\""
                print_str += " "
            print print_str

        global g_proxy_ip, g_proxy_port
        ip = config.get("socks5", "ip")
        port = config.get("socks5", "port")
        if ip != "" and port != "":
            g_proxy_ip = ip
            g_proxy_port = int(port)
            print "socks5 ip=%s port=%d" % (g_proxy_ip, g_proxy_port)

    except:
        print "Lack config.ini file or config file corrupt."
        sys.exit(1)


if __name__ == '__main__':

    if len(sys.argv) != 3:
        print "Input pixiv [m/n/b] id. n is novel id, m is member id, c is member id + bookmark."

    else:
        load_config()

        login_pixiv()

        single_novel = 0
        to_parse_id_list = []
        to_parse_num = 0

        if sys.argv[1] == 'm' or sys.argv[1] == 'b':
            member_id = sys.argv[2]
            buf = get_page("http://www.pixiv.net/member.php?id=" + member_id)
            member_name = process_member_main_page(buf)

            if member_name == None:
                print "Member %d is not valid\n" % (member_id)
                sys.exit(1)
            if sys.argv[1] == 'b':
                member_name += "-bookmark"

            directory_name = member_id + "-" + member_name;
            g_location = g_location + directory_name + "/"
            if os.path.exists(g_location) == False:
                print "Can not find directory %s. Create it." % (directory_name)
                os.makedirs(g_location)
            else:
                #check directory
                print "Find directory %s" % (g_location)

                files = os.listdir(g_location)
                files.sort()
                for file_name in files:
                    end = file_name.find(" - ")
                    if end > 0:
                        print "Find file %s" % (file_name)
                        file_id = file_name[0 : end]
                        split = file_id.find('_')
                        if split > 0:
                            file_start_id = file_id[0 : split]
                            file_end_id = file_id[split+1 : len(file_id)]
                            id_list.append(file_start_id)
                            id_list.append(file_end_id)
                        else:
                            id_list.append(file_id)

                id_list = sorted(id_list, key=lambda d:int(d), reverse=True)

            page = 1
            while True:
                if sys.argv[1] == 'm':
                    buf = get_page("http://www.pixiv.net/novel/member.php?id=" + member_id + "&p=" + str(page))
                else:
                    buf = get_page("http://www.pixiv.net/novel/bookmark.php?order=&rest=show&id=" + member_id + "&p=" + str(page))
                if buf == None:
                    break

                (novel_num, novel_list) = process_member_novel_page(buf)
                if novel_num == 0:
                    break
                else:
                    to_parse_num = to_parse_num + novel_num
                    to_parse_id_list = to_parse_id_list + novel_list

                page = page+1

            if len(to_parse_id_list) > 0:
                to_parse_id_list = sorted(to_parse_id_list, key=lambda d:int(d))

        elif sys.argv[1] == 'n':
            single_novel = 1
            to_parse_num = 1
            to_parse_id_list.append(sys.argv[2])

        else:
            print "Input pixiv [m/n/b] id. n is novel id, m is member id, c is member id + bookmark."
            sys.exit(1)

        id_list_len = len(id_list)
        if id_list_len > 0:
            latest_id = int(id_list[0])
        else:
            latest_id = 0

        for i in range(0, to_parse_num):
            id_num = to_parse_id_list[i]

            if single_novel == 1 or int(id_num) > latest_id:
                try:
                    id_list.index(id_num)
                except:
                    id_list.append(id_num)
                    buf = get_page("http://www.pixiv.net/novel/show.php?id=" + id_num)
                    process_novel_page(id_num, buf)

