#!/usr/bin/env python
# --*-- coding: UTF-8 --*--
import sys      # 为了参数
import urllib2  # 发送URL的包
import httplib
import demjson  # json的解码、编码
import time, datetime;  # 时间
import MySQLdb;
from HTMLParser import HTMLParser # 解析HTML

#==因为一个bug，所以有了下面的两句话。Bug是：httplib.IncompleteRead: IncompleteRead(5876 bytes read)
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'

class Page:
    def __init__(self, code = None, name = None, url = None):
        self.code = code;
        self.name = name;
        self.url = url;
    def __str__(self):
        return "Page[" + str(self.code) + ", " + str(self.name) + ", " + str(self.url) + "]";

class Image:
    def __init__(self, name = None, pageId = None, url = None):
        self.name = name;
        self.pageId = pageId;
        self.url = url;
    def __str__(self):
        return "Image[" + str(self.name) + ", " + str(self.pageId) + ", " + str(self.url) + "]";

class CrawlerDB:
    def __init__(self):
        # 打开数据库连接
        self.db = MySQLdb.connect("localhost","app","firstapp","crawler");

    def savePage(self, page):
        # 使用cursor()方法获取操作游标
        cursor = self.db.cursor();
        sql = "insert into pages(code, name, url) values(%d, '%s', '%s')" % (page.code, page.name, page.url);
        # 使用execute方法执行SQL语句
        cursor.execute(sql);

        if cursor.rowcount != 1:
            # @temp by peter:
            print("这是错误的，需要异常处理");

        self.db.commit();
        cursor.close();
        return True;

    def getPageId(self, code):
        cursor = self.db.cursor();
        sql = "select id from pages where code = %d" % code;
        cursor.execute(sql);
        # 使用 fetchone() 方法获取一条数据库。
        data = cursor.fetchall();

        if len(data) != 1:
            # @temp by peter:
            print("这是错误的，需要异常处理");

        result = data[0][0];
        cursor.close();
        return result;

    def saveImage(self, image):
        cursor = self.db.cursor();
        sql = "insert into images(name, page_id, url) values('%s', %d, '%s')" % (image.name, image.pageId, image.url);
        cursor.execute(sql);

        if cursor.rowcount != 1:
            # @temp by peter:
            print("这是错误的，需要异常处理");

        self.db.commit();
        cursor.close();
        return True;

    def close(self):
        # 关闭数据库连接
        self.db.close();

class MyHTMLParser(HTMLParser):
    def __init__(self, needTitle):
        HTMLParser.__init__(self);
        self.isTarget = False;
        self.isTitle = False;
        self.needTitle = needTitle;
        self.images = [];
    def handle_starttag(self, tag, attrs):
        if tag == "ul" and len(attrs) > 0:
            for attr in attrs:
                if attr[0] == 'id' and attr[1] == 'hgallery':
                    self.isTarget = True;
                    break;
        elif tag == "img" and len(attrs) > 0 and self.isTarget:
            for attr in attrs:
                if attr[0] == 'src':
                    self.images.append(Image(attr[1].rpartition("/")[2], None, attr[1]));
        elif tag == "h1" and len(attrs) > 0:
            for attr in attrs:
                if attr[0] == "id" and attr[1] == "htilte":
                    self.isTitle = True;

    def handle_endtag(self, tag):
        if tag == "ul" and self.isTarget:
            self.isTarget = False;
        elif tag == "h1" and self.isTitle:
            self.isTitle = False;

    def handle_data(self, data):
        if self.isTitle and self.needTitle:
            self.page.name = data;
            self.needTitle = False;

def save(db, page, images):
    if len(images) == 0:
        print("Error: 没有此页面或者是被屏蔽了！");
        return;

    db.savePage(page);
    pageId = db.getPageId(page.code);

    for image in images:
        image.pageId = pageId;
        db.saveImage(image);

def out(page, images):
    print(page);

    if len(images) == 0:
        print("Error: 被屏蔽了！");
    for image in images:
        print("\t" + str(image));

def main(startCode, limit):
    db = CrawlerDB();

    try:
        for i in range(limit):
            idx = 1;
            code = startCode + i;
            page = Page(startCode + i, None, "http://www.zngirls.com/g/%d" % code);
            parser = MyHTMLParser(True if idx == 1 else False);
            parser.page = page;
            preSize = 0;

            while True:
                url = "http://www.zngirls.com/g/%d/%d.html" % (code, idx);
                request = urllib2.Request(url);
                request.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/52.0.2743.116 Chrome/52.0.2743.116 Safari/537.36');
                respose = urllib2.urlopen(request);
                parser.feed(respose.read());

                if len(parser.images) == preSize:
                   break;

                preSize = len(parser.images);
                idx += 1;

            print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "\tOver the code: " + str(parser.page.code));
            save(db, parser.page, parser.images)
            # out(parser.page, parser.images)
            print;
            sys.stdout.flush();

    finally:
        db.close();

# startCode = 10000;
startCode = 21994; # 21858; #21687;
limit = 250; # 22104
# max = 22104 21857 21687 21288 21369 21394 21616 21395 21622 21857
if len(sys.argv) >= 2:
    arg1  = int(sys.argv[1]);

    if arg1 > startCode:
        startCode = arg1;

if len(sys.argv) >= 3:
    limit = int(sys.argv[2]);

main(startCode, limit);
