#coding=utf-8

import sys,os
import re
import socket
import time
from urllib import urlopen,urlretrieve
import urllib2
import hashlib

class Crawler:
    def open_read(self,url,proxy='0'):
        if proxy=="0":
            socket.setdefaulttimeout(200) #try except @line 30: read()
            try:
                page = urlopen(url)
                #print "connecting "+url
                content = page.read().decode("gbk","ignore").encode("utf-8")
                #print "open link success"
                page.close()
                return content
            except IOError:
                print "timeout, link="+url
        else:
            try:
                proxy_support = urllib2.ProxyHandler({'http':proxy})
                opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                return urllib2.urlopen(url).read()
            except:
                print "proxy error"

    def initSets(self,done_set_file):
        self.wm_domain='http://www.bdwm.net/bbs/'
        self.data_dir = 'F:\\IS10\\grad\\data\\'
        f = open(self.data_dir+done_set_file,'r')
        self.url_set = set()
        self.url_swap_set = set()
        self.done_set= set()
        for i in f.read():
            self.done_set.add(i)
        f.close
    def saveDoneSet(self,done_set_file):
        f = open(self.data_dir+done_set_file,'w')
        f.write(' '.join(self.done_set))
        f.close
    def doPage(self,url):
        '''
            save in-page text
            load in-page link
            update done_set
        '''
        hash_url = 'A'+hashlib.md5(url).hexdigest()
        f = open(self.data_dir+hash_url,'w')
        t = self.getHtmlText(self.open_read(url))
        f.write(t)
        f.close()
        #self.done_set.add(hash_url)
    def getHtmlText(self,html):
        r = re.compile(r'>(.*?)<').findall(html)
        return ' '.join(r)
    def getHtmlLinks(self,html):
        r = re.compile(r'href=.(.*?)\'').findall(html)
        for i in r:
            self.url_swap_set.add(str(i))
    def run(self,url):
        html = self.open_read(url)
        self.getHtmlLinks(html)
        self.url_set = self.url_set-self.done_set
        cnt = 0
        while cnt < 10:
            if len(self.url_set) < 2 and len(self.url_swap_set) < 2:
                return
            if len(self.url_set) < 2:
                self.url_set, self.url_swap_set=self.url_swap_set, self.url_set
            for i in self.url_set:
                url = self.wm_domain + i
                self.doPage(url)
                cnt = cnt + 1
            self.saveDoneSet('done.some')

class WmCrawler:

    crawler = Crawler()
    def openread(self,url):
        return self.crawler.open_read(url)
    def removeWMFrame(self,pagetext):
        rp = re.compile(r'站内信件(.*?)【').search(pagetext)
        if rp:
            return rp.group(1)
        else:
            rp = re.compile(r'站内信件(.*?)※ 来源').search(pagetext)
        if rp:
            return rp.group(1)
        else:
            rp = re.compile(r'转信(.*?)【').search(pagetext)
        if rp:
            return rp.group(1)
        else:
            rp = re.compile(r'转信(.*?)※ 来源').search(pagetext)
        if rp:
            return rp.group(1)
        else:
            return ''
    def crawl_board(self,name_board,url='0'):
        if url == '0':
            html_board = self.openread('http://www.bdwm.net/bbs/bbsdoc.php?board='+str(name_board))
        else:
            html_board = self.openread(url)
        f = open('F:\\IS10\\grad\\data\\'+'A'+str(name_board),'w')
        url_wire = re.compile('bbscon.*?dig=\"').search(html_board).group(0)
        link_flag = 3
        t = time.time()
        cnt = 0
        while link_flag == 3:
            try:
                html_wire = self.openread('http://www.bdwm.net/bbs/'+url_wire)
                text_wire = re.compile(r'>([^<]*)<').findall(html_wire)
                link_flag = len(re.compile(r'bbscon').findall(html_wire))
                #f.writelines(' '.join(text_wire).replace('\n',' '))
                #print self.removeWMFrame(' '.join(text_wire).replace('\n',' '))
                f.write(self.removeWMFrame(' '.join(text_wire).replace('\n',' '))+'\n')
                url_wire = re.compile(r'bbscon.php[^\"]*').search(html_wire).group(0)
            except:
                print sys.exc_info()[0],sys.exc_info()[1]
                print url_wire
                link_flag = 2
                f.flush()
            cnt = cnt + 1
            if cnt == 20000:
                break
        f.close()
        print time.time()-t
                
if __name__ == "__main__":
    #dbg = Crawler()
    #dbg.initSets('done.wm.web')
    dbg = WmCrawler()
    bd = 'Windows ypjh'.split() 
    #print bd[5:9]    

    for i in bd:
        dbg.crawl_board(i)
    


'''
AcademicInfo ACM_Algo ADAgent AdvancedEdu Advertising AnHui Anniversary \
Anthropology AoA Application Arbitration Archaeology Architecture ASCIIArt \
Astrology Astronomy Auditory Automobile Aviation Badminton Banquet Baseball \
Basketball BBShelp BBSInfo bbslists BDQN BDSJC BDXK Beauty BeiJing BFA \
Billiards Bioinformatics Biology Blessing Board Boy Bridge Brush_Art \
Campus CampusInfo CAPU CCC ccdra CCME CESE Chemistry Chess \
Chimusic Chinese ChongQing Chorus Christianity cinemapku CIO_Forum CIS \
ClassicalMusic cnAdmin cnBM cntest COE COES Collection Collectors \
ColorShow Comic Communications Complain Computer Countryroad Courses \
CPlusPlus CR CS CSArch Dance dance_company demography Detective DHXY \
diary DigitalWorld DIY Drama Dream DVDZone ECCA ECNU Econometrics \
Economiclovers Economics Ecosociety EDA EduLDA EECS Electronics Emprise \
EnglishTest EnglishWorld Englishwriting Fairytales FamilyLife FantasyWorld \
Feeling Fitness Food Football FormulaOneZone FreeRunning Freshman Friend \
Friends FuJian Futurama Future Game GanSu Geology Geophysics Ghost Girl \
GIS GoToUniv Graduation Greenlife Green_hut GSE GSM GuangDong GuitarPKU \
GuoXue HaiNan HappyLife Hardware Harmonica Health HeNan Herbalism Heroes \
HHA HiFi HipHop History HK_MC_TW Homepage House HSC HuBei HuNan HuXiangCulture \
IBMThinkPad ICST ID IM IME Innovation Intern ITrade JapanResearch Java JiangSu \
JiangXi jingwu JNFQ Job Job_Post Job_Servant Joke Kaoyan KoreanSalon LAAPKU \
Law leagueforum Leisure_Game LetsChat LiaoNing LifeScience Linux Literature \
losefat LostFound lottery Love loveheart LZU MA Marxism Master Mathematics \
MathTools Mechanics Melancholy Memory Mentality Meteo Military MNWH \
MobileDigital ModernMusicClub Movie MUD Music_box Muthos NetResources \
Networking NewAgemusic NIE Notebook notepad NSD NUDT Olympic OPETech \
Orchestra OriginalFire Oversea ParttimeJob PCA PCIA PersonalCorpus \
PetsEden PHD Philo Philosophy PhotoView Physics Pictures PieBridge \
Piefriends PKUdevelopment PKUER PKUFA PKUHistory \
PKULeagueschool PKUPA PKUPI PKUSIFEA PKUTV pkuveg PKU_Announce \
PKU_Feedback PKU_OSFA PKU_PE PKU_Suggest PKU_TuanWei Poems Politics \
PopMusic post Programming Psychology PUMA Radio Reader RedCross Relatives \
RelicFans Renju Riddle RockMusic Romance RPKU rules RuralChina SANGUO SAPA \
ScentedWorld Science ScienceFiction SCM SecondHand SecretGarden SESS SFL \
SG ShaanXi ShanDong ShangHai ShanXi Shopping SiChuan Single SIS Skate SL \
SLP SMS Software Sports SPS SSM SST Stock StoneStory Story StudentUnion \
STWT sunshine Swimming sysop sysreport SZPKU TableTennis Tea TeacherStudent \
Tennis Test Thesis TianJin TRA Travel TrendyLife Triangle TryYourBest TV \
Urban Video_Game Virus_Security VisualBasic Volleyball Volunteers vote \
WanLiu Water WeClub WellBeing WenYan WesternMusic Windows wine WMClub \
WorldHeritage WUXIE XiangSheng XinWenZX XSKC YASP ypjh YTHT Yueju YunNan \
ZheJiang
'''
