# -*- coding: UTF-8 -*- 
from urllib2 import urlopen, Request, HTTPError
import threading
import time
from BeautifulSoup import BeautifulSoup
import re
import sys
import sqlite3
from worker import WorkerManager

headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.0.4) Gecko/2008102920 Firefox/3.0.4 (.NET CLR 3.5.30729)',
           'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
           'Accept-Language': 'zh-cn,zh;q=0.5',
           'Accept-Charset': 'gb2312,utf-8;q=0.7,*;q=0.7',
           'Keep-Alive': '300',
           'Connection': 'keep-alive',
           'Cache-Control': 'max-age=0'}

class Spider(object):
    def __init__(self, city, max_work_thread=2):
        self.city = city
        self.max_work_thread = max_work_thread
        self.url = 'http://www.edeng.cn/data/china/%s/' % self.city
        self.cate_names = (
            'job',
            'fashion',
            'house',
            'business',
            'useditem',
            'community',
            'personal',
            'bizservice',
            'service',
        )
    
    def start(self):
        #获得分类列表
        self.get_cate_list()
        
        #分配任务给不同线程(线程池)
        wm = WorkerManager(self.max_work_thread)
        for url in self.cate_list:
            wm.add_job(self.get_cate_content, url)
        wm.start()
        wm.wait_for_complete()

    def get_cate_list(self):
        self.cate_list = [self.url + item + '/' for item in self.cate_names]
        print self.cate_list
        
    def get_cate_content(self, cate_url):
        try:
            #创建数据库连接
            conn = sqlite3.connect('./email.s3db', isolation_level=None)
            
            #如果没有email表，则创建它
            cursor = conn.cursor()
            cursor.execute("CREATE TABLE IF NOT EXISTS [email] (\
                            [id] INTEGER  NOT NULL PRIMARY KEY,\
                            [email] VARCHAR(128)  NULL\
                            )")
            conn.commit()
            cursor.close()
            
            #获得最新页的索引值
            req = Request(url=cate_url, headers=headers)
            doc = urlopen(req).read()
            p = re.compile(cate_url + 'index\d+.html', re.MULTILINE)
            max_page_index_url = p.search(doc).group()
            p = re.compile('\d+')
            max_page_index = int(p.search(max_page_index_url).group()) + 1
            
            for i in reversed(range(1, max_page_index + 1)):
                req = Request(url=cate_url + 'index%d.html' % i, headers=headers)
                doc = urlopen(req).read()
                p = re.compile('(' + self.url + '(\w+/)+\d+.html)', re.MULTILINE)
                content_list = p.findall(doc)

                #如果有匹配的内容URL列表，则继续处理每个内容页
                if content_list:
                    for detail_url, url2 in content_list:
                        #print detail_url
                        try:
                            req = Request(url=detail_url, headers=headers)
                            detail_page = urlopen(req).read()
                            p = re.compile(r'(([A-Za-z0-9]+_+)|([A-Za-z0-9]+\-+)|([A-Za-z0-9]+\.+)|([A-Za-z0-9]+\++))*[A-Za-z0-9]+@((\w+\-+)|(\w+\.))*\w{1,63}\.[a-zA-Z]{2,6}', re.MULTILINE)
                            m = p.search(detail_page)
                            if m:
                                email = m.group()
                                print email.ljust(30) + 'thread_name:' + threading.currentThread().getName()
                                
                                #存入到数据库中
                                c = conn.cursor()
                                c.execute("INSERT INTO email(email) values('%s')" % email)
                                conn.commit()
                                c.close()
                        except:
                            print sys.exc_info()
                            continue
                else:
                    return
        except :
            print sys.exc_info()
            return 