# coding: utf-8 
import os
import sys
import urllib2
import time
import random
import re
import types
import uploader
from xml.etree import ElementTree as ET

class eee_crawler:

    def __init__(self, startdir):
        self.startdir = startdir
        self.indent = ' ' * 4
        self.metans = "http://schemas.microsoft.com/taxonomy/2003/1"
        self.contentns = "http://support.microsoft.com/common/schemas/kbpub/3/"
        self.nspat = re.compile(r"\{.*?\}(\w*)")
        self.titlepat = [(re.compile("易宝典：(.*)".decode("utf-8").encode("utf-8")), 1), (re.compile('(.*)\(|（\s*MVP|上|下.*\)|）'.decode('utf-8').encode('utf-8')), 1)] 
        self.tagmap = {
                "GRAPHIC" : "img",
                "NUM_LIST" : "ol",
                "WWW_LINK" : "a",
                "KB_LINK" : "a",
                "ALPHA_LIST" : "ol",
                }
        self.blacklist = set(["OUTPUT", 'BOOKREF', 'BOOKMARK'])
        self.stop = False
        self.u = uploader.uploader()
        self.titles = {}
        self.loadimgmap()
        self.loadtitlemap()

    def __del__(self):
        pass

    def loadimgmap(self, mapfilename = "imgmap.txt"):
        self.imgmapfilename = mapfilename
        self.imgmap = {}

        try:
            file = open(self.imgmapfilename, "r")
        except:
            return

        try:
            lines = file.readlines()
            for l in lines:
                k, v = l.strip().split('\t', 1)
                if k in self.imgmap:
                    raise ValueError
                self.imgmap[k]=v
        except Exception, e:
            print e
        finally:
            file.close()
        #print '\n'.join(['%s -> %s' %(k,v) for k,v in self.imgmap.items()])

    def loadtitlemap(self, mapfilename = "titlemap.txt"):
        self.titlemapfilename = mapfilename
        self.titlemap = {}

        try:
            file = open(self.titlemapfilename, "r")
        except Exception, e:
            print e
            return

        try:
            lines = file.readlines()
            for l in lines:
                k, v = l.strip().split('\t', 1)
                if k in self.titlemap:
                    raise ValueError
                self.titlemap[k]=v
                #print 'titlemap[%s] = "%s"' %(k, v.decode('utf-8').encode('cp936'))
        except Exception, e:
            print e
        finally:
            file.close()

    def writeimgmap(self):
        try:
            os.rename(self.imgmapfilename, "last"+self.imgmapfilename)
        except:
            pass
        file = open(self.imgmapfilename, "w")
        file.write('\n'.join(['%s\t%s' %(k, v) for k,v in sorted(self.imgmap.items())]))
        file.close()

    def writetitles(self):
        file = open("titles.txt", "w")
        file.write('\n'.join(['%s\t%s' %(k, v) for k,v in sorted(self.titles.items())]))
        file.close()

    def getContent(self, url, data = None, proxies = None):
        std_headers = {
                'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.1) Gecko/2008070208 Firefox/3.0.1',
                'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
                'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
                'Accept-Language': 'en-us,en;q=0.5',
                }

        if proxies == None:
            print "\t" + "-> Get:["+url+"]"
        else:
            print "\t" + "-> Get:[" + url + "] via "+proxies['http']

        content = None
        retry_num = 3
        while retry_num > 0:
            try:
                if data:
                    data = urllib.urlencode(data)
                if proxies is not None:
                    proxy_support = urllib2.ProxyHandler(proxies)
                    opener = urllib2.build_opener(proxy_support)
                    urllib2.install_opener(opener)

                request = urllib2.Request(url, data, std_headers)
                response = urllib2.urlopen(request)
                content = response.read()
                break
            except urllib2.URLError, e:
                content = None
                str1 = "\t"+"-> Get:["+url+"] failed"
                if hasattr(e, 'reason'):
                    str2 = "error reason:"+str(e.reason)
                elif hasattr(e, 'code'):
                    str2 = "error code:"+str(e.code)
                else:
                    str2 = "exception:"+str(e)
                print str1+", "+str2
                break
            except Exception, e:
                print e
                content = None
                retry_num -= 1
                print "\t"+"-> Get:["+url+"] failed,"+str(retry_num)+" times left"
                time.sleep(random.randrange(3,12,1))

        return content

    def __transToHtml(self, root, html, txt, level):
        # stop flag
        if self.stop:
            return

        # tag
        try:
            origtag = self.nspat.search(root.tag).group(1)
        except:
            origtag = root.tag
        if origtag in self.blacklist:
            return
        tag = self.tagmap.get(origtag, origtag)

        # text
        if root.text is not None and isinstance(root.text, types.StringTypes):
            text = root.text
        else:
            text = ""
        if text.encode("utf-8").find("期待着您的宝贵意见".decode("utf-8").encode("utf-8")) != -1 \
                or text.encode("utf-8").find("我使用中还遇到别的问题怎么办".decode("utf-8").encode("utf-8")) != -1 \
                or text.encode("utf-8").find("社区解决方案内容免责声明".decode("utf-8").encode("utf-8")) != -1  \
                or text.encode("utf-8").find("视频演示".decode("utf-8").encode("utf-8")) != -1  \
                :
            self.stop = True
            return

        # tail
        if root.tail is not None and isinstance(root.tail, types.StringTypes):
            tail = root.tail
        else:
            tail = ""

        # starttag line
        children = root.getchildren()
        start = []
        start.append(self.indent*level+"<%s" %(tag, ))
        for k,v in root.items():
            try:
                k = self.nspat.search(k).group(1)
            except:
                pass
            if tag == "img" and k == "src":
                remotepic = "http://support.microsoft.com/library/images/support/kbgraphics/"+v.replace('\\', '/')
                # local pic ok?
                if remotepic not in self.imgmap:
                    picdir = os.path.join(os.path.abspath(self.startdir), 'pics')
                    localpath = os.path.join(picdir, "%s_%s" %(id, remotepic[remotepic.rfind('/')+1:]))
                    if not os.path.exists(localpath):
                        img = self.getContent(remotepic)
                        if img:
                            if not os.path.exists(picdir):
                                os.makedirs(picdir)
                            f = open(localpath, "wb")
                            f.write(img)
                            f.close()
                            self.imgmap[remotepic] = localpath
                            #v = '../../'+localpath[localpath.rfind('\pics')+1:].replace('\\', '/')
                            time.sleep(0.1)
                        else:
                            raise Exception
                    else:
                        self.imgmap[remotepic] = localpath
                        #v = '../../'+localpath[localpath.rfind('\pics')+1:].replace('\\', '/')
                else:
                    localpath = self.imgmap[remotepic].strip()
                    if not os.path.exists(localpath):
                        print "local img file '%s' not exists" %(localpath,)
                        raise Exception
                    #v = '../../'+localpath[localpath.rfind('\pics')+1:].replace('\\', '/')
                # pic in wenwen?
                if localpath not in self.imgmap:
                    resultUrl = self.u.upload(localpath)
                    print "%s -> %s" %(remotepic, resultUrl)
                    self.imgmap[localpath] = resultUrl
                if os.path.splitext(localpath)[1] == '.png' and os.path.splitext(self.imgmap[localpath])[1] != '.png':
                    v = os.path.splitext(self.imgmap[localpath])[0]+'.png'
                else:
                    v = self.imgmap[localpath]
            elif tag == "a":
                if origtag == 'KB_LINK' and k == 'contentid':
                    k = 'href'
                    v = 'http://support.microsoft.com/kb/%s/' %(v,)
                elif k == 'href' and not v.startswith(r'http://'):
                    if not v.startswith('/'):
                        v = 'http://support.microsoft.com/'+v
                    else:
                        v = 'http://support.microsoft.com'+v
                if v.startswith('http'):
                    v = "http://wenwen.soso.com/z/UrlAlertPage.e?sp=S" + v
            start.append(' %s="%s"' % (k, v))
        if tag == 'a':
            start.append(' target="_blank"')
        if len(children) == 0 and text == "":
            start.append("/>")
        else:
            start.append(">")
        starttag = ''.join(start)
        if tag == "SBODY":
            html.append(self.indent*level+"<br/>"*0)
        elif tag == "img":
            html.append(self.indent*level+"<div>")
        html.append(starttag)

        if text != "":
            if tag != "img":
                html.append(self.indent*(level+1)+text)
                txt.append(text)

        if len(children) > 0 or text != "":
            for child in children:
                self.__transToHtml(child, html, txt, level+1)

            end = []
            end.append(self.indent*level + "</%s>" %(tag, ))
            endtag = ''.join(end)
            html.append(endtag)

        if tag == "img":
            html.append(self.indent*level+"</div>")

        if tail != "":
            html.append(self.indent*(level+1)+tail)
            txt.append(tail)

    def transToHtml(self, root):
        html = []
        txt = []
        level = 0
        self.stop = False
        self.__transToHtml(root, html, txt, level)
        return ('\n'.join(html), '\n'.join(txt))

    def parseXml(self, fileName, id):
        self.id = id
        print "parse %s %s" %(id, fileName,)
        root = ET.parse(fileName)

        node = root.find("{%s}metadata/{%s}profile/{%s}gds/{%s}status" %(self.metans, self.metans, self.metans, self.metans))
        if node and node.text != "Live":
            print "status:", node.text
            return None
        
        # core profile
        question = []

        # title must exist
        node = root.find("{%s}metadata/{%s}profile/{%s}core/{%s}title" %(self.metans, self.metans, self.metans, self.metans))
        if node is None:
            print "no title"
            return None
        else:
            #title = ''.join(node.text.encode("utf-8").split('(MVP撰稿)'.decode("utf-8").encode("utf-8")))
            title = node.text.encode("utf-8")
            for pat in self.titlepat:
                print "<<<title:", title.decode('utf-8').encode('cp936')
                try:
                    matchtitle = pat[0].search(title).group(pat[1])
                    if matchtitle:
                        title = matchtitle
                except:
                    pass
                print ">>>title:", title.decode('utf-8').encode('cp936')
            if str(id) in self.titlemap:
                #print "%s -> %s" %(title.decode("utf-8").encode('cp936'), self.titlemap[str(id)].decode('utf-8').encode('cp936'))
                title = self.titlemap[str(id)]
            #print "title:", title
            question.append("TITLE:%s" %(title,))
            self.titles[id] = title

        question.append("ALTERNATIVES:")

        # keywords
        node = root.find("{%s}metadata/{%s}profile/{%s}gds/{%s}keywords" %(self.metans, self.metans, self.metans, self.metans))
        if node is None:
            keywords = ""
            print "no keywords"
        else:
            keywords = node.text.encode("utf-8")
            #print "keywords:", keywords
        keywords = '\t'.join(keywords.strip().split(' '))
        question.append("KEY WORDS:%s" %(keywords,))

        question.append("TAGS:")

        # abstract
        node = root.find("{%s}metadata/{%s}profile/{%s}core/{%s}description" %(self.metans, self.metans, self.metans, self.metans))
        if node is not None and isinstance(node.text, types.StringTypes):
            abstract = node.text.encode("utf-8")
            #print "abstract:", abstract
        else:
            abstract = ""
            print "no abstract"
        question.append("ABSTRACT:%s" %(abstract,))

        question = '\n'.join(question).strip()
        qfilename = os.path.join(os.path.dirname(fileName), "question.txt")
        qfile = open(qfilename, "w")
        qfile.write(question)
        qfile.close()

        elementpath = "{%s}metadata/{%s}profile/{%s}gds/{%s}contentNamespace" %(self.metans, self.metans, self.metans, self.metans)
        node = root.find(elementpath)
        if node is not None:
            self.contentns = node.text.strip()
            #print "contentns:", self.contentns

        elementpath = "{%s}content/{%s}KB/{%s}SECTION" %(self.metans, self.contentns, self.contentns)
        sections = root.findall(elementpath)
        if len(sections) == 0:
            print "no section"
            return None
        #print sections

        html = []
        txt = []
        for section in sections:
            sbody = section.find("{%s}SBODY" %(self.contentns,) )
            if sbody is None:
                print "no body", "in", section
                continue

            subhtml, subtxt = self.transToHtml(sbody)
            html.append('\n'+subhtml)
            txt.append('\n'+subtxt)

        return (''.join(html), ''.join(txt))

if __name__ == "__main__":
    # test 1: crawling by id
    if len(sys.argv) < 2:
        print "Usage: %s id_file", sys.argv[0]
        sys.exit()

    idFilename = sys.argv[1]
    idFile = open(idFilename, "r")
    ids = []
    for line in idFile:
        ids.append(line.strip().split('\t')[0])
    idFile.close()

    oldcwd = os.getcwd()
    crawler = eee_crawler(oldcwd)
    try:
        for id in ids:
            subdir = os.path.join("data", str(id))
            try:
                os.makedirs(subdir)
            except:
                pass
            os.chdir(subdir)
            # avoid repeated crawling
            xmlfilename = str(id)+".xml"
            if not os.path.exists(xmlfilename):
                url = "http://support.microsoft.com/common/fetchxml.aspx?scid=kb;zh-cn;"+str(id)
                content = crawler.getContent(url)
                if content and content.startswith(r'<?xml version="1.0" encoding="utf-8"?>'):
                    xmlfile = open(xmlfilename, "w")
                    xmlfile.write(content)
                    xmlfile.close()
                    time.sleep(1)
            
            orightmlfilename = os.path.splitext(xmlfilename)[0]+".htm"
            if not os.path.exists(orightmlfilename):
                orightmlfile = open(orightmlfilename, "w")
                orightmlfile.write('<script>window.location = "http://support.microsoft.com/kb/%s/zh-cn"</script>' %(id,))
                orightmlfile.close()

            if os.path.exists(xmlfilename):
                html,txt = crawler.parseXml(xmlfilename, id)
                #htmlfile = open(os.path.splitext(xmlfilename)[0]+".htm", "w")
                htmlfile = open(os.path.join(os.path.dirname(xmlfilename), "answer.htm"), "w")
                htmlfile.write(html.encode("utf-8"))
                htmlfile.close()
                txtfile = open(os.path.join(os.path.dirname(xmlfilename), "plain_answer.txt"), "w")
                txtfile.write(txt.encode("utf-8"))
                txtfile.close()

            os.chdir(oldcwd)
    except Exception, e:
        print e
        pass
    finally:
        os.chdir(oldcwd)
        crawler.writeimgmap()
        crawler.writetitles()

    # test 2:parse xml, translate into html
    #if len(sys.argv) < 2:
    #    print "Usage: %s xml_file", sys.argv[0]
    #    sys.exit()
    #xmlfile = sys.argv[1]
    #crawler = eee_crawler()
    #html = crawler.parseXml(xmlfile)
    #htmlfile = open(os.path.splitext(xmlfile)[0]+".htm", "w")
    #htmlfile.write(html.encode("utf-8"))
    #htmlfile.close()
    
