#!/usr/bin/env python
# -*- coding: utf8 -*-
import mediawiki, googleapi
from google.appengine.ext import db
import models, datetime, re
from globalfunc import *
from retrivenewpage import retriveNewPages
from google.appengine.api import memcache
import settings

reImageC = re.compile(u'\[\[(file|image|media|图像|文件|档案|媒体|圖像|檔案|媒體):.*?\]\]', re.I)
reBoldC = re.compile('<b>(.*?)<\/b>')

def _removeWikicode(text, re_dotall = False, remove_quote = True, debug = False):
    if not text:
        return ""

    text = text.replace('&lt;', '<')
    text = text.replace('&gt;', '>')

    text = re.sub('(?i)<\s*/?\s*[a-zA-Z]+[\s\S]*?>', '', text)
    text = re.sub('(?i)<(/\s*)?br(\s*/)?>', '', text)
    text = re.sub('<!--([\s\S]*?)-->', '\\1', text)

    # remove URL
    text = re.sub('(ftp|https?)://[\w/.,;:@&=%#\\\?_!~*\'|()\"+-]+', ' ', text)

    # remove Image tags
    text = reImageC.sub("", text)

    # replace piped wikilink
    text = re.sub("\[\[[^\]]*?\|(.*?)\]\]", "\\1", text)

    # remove unicode and polytonic template
    text = re.sub("(?i){{(unicode|polytonic)\|(.*?)}}", "\\1", text)

    if re_dotall:
       flags = "(?xsim)"
       # exclude wikitable
       text = re.sub('(?s){\|.*?^\|}', '', text)
    else:
       flags = "(?xim)"

    text = re.sub("""
    %s
    (
        <ref[^>]*?\s*/\s*>     | # exclude <ref name = '' / > tags
        <ref.*?>.*?</ref>      | # exclude <ref> notes
        ^[\ \t]*({\||[|!]).*?$ | # exclude wikitable
        </*nowiki>             | # remove <nowiki> tags
        {{[^\{\}]*?}}          | # remove (not nested) template
        <math>.*?</math>       | # remove LaTeX staff
        [\[\]]                 | # remove [, ]
        ^[*:;]+                | # remove *, :, ; in begin of line
        <!--                   |
        -->                    |
    )
    """ % flags, "", text)

    text = re.sub("\{\{[^\{\}]*?\}\}", '', text)
    text = re.sub("'{2,}(.*?)'{2,}", '\\1', text)
    text = re.sub("={2,}(.*?)={2,}", '\\1', text)
    
    text = re.sub('[\r\n]+', '\n', text)
    
    return text

def _splitSentence(text):
    sentences = []
    paras = text.split('\n')
    for para in paras:
        if not para.strip() or len(para) < 50:
            continue
        stemp = re.split(u'[。？！\?\!\.《〈〉》（）\s]', para)
        sentences.extend([s.strip() for s in stemp if s.strip() and len(s) > 15])
    return sentences

def _copyviocheck(text, gapi, ret):
    length = len(text)
    if length > 60:
        text = text[:60]
        length = 60
    gdata = gapi.searchWeb(text)
    for page in gdata:
        content = re.sub(u'</b>\s*([，、,\(\)])\s*<b>', '\\1', page['content'])
        match = reBoldC.findall(content)
        for item in match:
            if not re.sub('[\x20-\x7f]', '', item):
                continue
            similarity = float(len(item)) / float(length)
            if similarity > 0.45 and len(item) > 15:
                d = {'source': item,
                     'similarity': similarity
                    }
                if ret.has_key(page['url']):
                    ret[page['url']].append(d)
                else:
                    ret[page['url']] = [d]
                #cacheUrls.append()
    return ret

def checkPage(pagelimit = 1, searchlimit = 10):
    gapi = googleapi.GoogleAPI('zh-CN', settings.googleapikey)
    query = models.NewPage.all()
    query.filter('checked =', False)
    query.order('pubDate')
    site = mediawiki.login()
    pagedatas = query.fetch(pagelimit)
    count = 0
    for pagedata in pagedatas:
        if pagedata.checked:
            continue
        page = site(pagedata.title)
        if page.patrolled or not page.exists:
            pagedata.delete()
            continue
        text = _removeWikicode(pagedata.content)
        sentences = _splitSentence(text)
        result = {}
        slimit = searchlimit
        for sentence in sentences:
            slimit -= 1
            _copyviocheck(sentence, gapi, result)
            if not slimit:
                slimit = searchlimit
                break
        cv = models.Copyvio(pgtitle = pagedata.title,
                            simpdesc = '',
                            fulldesc = '')
        fromwikipedia = False
        for url, desc in result.items():
            url = url.replace('%25', '%').replace('%3D', '=').replace('%3F', '?').replace('%26', '&')
            if 'wikipedia.org' in url.lower():
                urltemp = re.split('/(wiki|zh|zh-hans|zh-hant|zh-cn|zh-tw|zh-hk|zh-sg)/', url.lower())
                if len(urltemp) > 1:
                    urltemp = urltemp[2]
                    title1 = urltemp.replace('%', '\\x')
                    title2 = repr(pagedata.title.encode('utf8'))[1:-1].lower()
                    if title1 != title2:
                        fromwikipedia = True
                    else:#= False
                        continue
                else:
                    fromwikipedia = True
            if len(desc) < 2:
                continue
            cv.simpdesc += u'<br />* %s' % url
            cv.fulldesc += u'* %s\n' % url
            for d in desc:
                cv.fulldesc += u'*# 相似文本：%s\n*#: 相似度：%s\n' % (d['source'], d['similarity'])
        
        if not fromwikipedia and cv.simpdesc:
            
            
            reppage = site('Wikipedia:頁面存廢討論/疑似侵權')
            cv.put()
            now = datetime.datetime.now()
            
            cv_log = memcache.get('copyvio_log')
            cv_art = memcache.get('copyvio_article')
            cv_rep = memcache.get('copyvio_report')
            
            logpage = site('Wikipedia:頁面存廢討論/疑似侵權/机器人检查/%d年%d月%d日' % (now.year, now.month, now.day))
            logsummary = pagedata.title
            logcontent = u'{{subst:User:P-bot/copyvionotice\n|title=%s\n|user=%s\n|simpdesc=\n%s\n|fulldesc=\n%s\n}}' % (pagedata.title, pagedata.creator, cv.simpdesc, cv.fulldesc)
            logcontent = logcontent.replace('h', '&#104;').replace('H', '&#72;')
            logrev = mediawiki.Revision(logcontent, logsummary, bot = True)
            
            artpage = site(pagedata.title)
            artsummary = u'机器人：本条目疑似侵犯版权'
            
            reppage = site('Wikipedia:頁面存廢討論/疑似侵權')
            repcontent = u'{{subst:CopyvioVFDRecord|%s|bot=%d年%d月%d日#%s}}' % (pagedata.title, now.year, now.month, now.day, pagedata.title)
            reprev = mediawiki.Revision(repcontent, bot = True)
            if (not cv_rep) and (not cv_art) and (not cv_log):
                memcache.set('copyvio_log', True)
                logpage.appendRevision(logrev, newsection = True)
                memcache.set('copyvio_log', False)
                memcache.set('copyvio_article', True)
                artoldrev = artpage.recentRevision
                artcontent = '{{Copyvio/bot|OldRevision=%d|time=%d-%02d-%02d|url=\n%s\n}}\n' % (artoldrev.revisionId, now.year, now.month, now.day, cv.simpdesc)
                artcontent = artcontent.replace('h', '&#104;').replace('H', '&#72;').replace('<br />', '\n') + artoldrev.content
                artnewrev = mediawiki.Revision(artcontent, artsummary, bot = True)
                artpage.appendRevision(artnewrev)
                memcache.set('copyvio_article', False)
                memcache.set('copyvio_report', True)
                reppage.appendRevision(reprev, newsection = True)
                memcache.set('copyvio_report', False)
            elif cv_rep:
                memcache.set('copyvio_report', True)
                repoldrev = reppage.recentRevision
                if not pagedata.title in repoldrev.content:
                    reppage.appendRevision(reprev, newsection = True)
                memcache.set('copyvio_report', False)
            elif cv_art:
                memcache.set('copyvio_article', True)
                artoldrev = artpage.recentRevision
                if not '{{Copyvio/bot|OldRevision=' in artoldrev.content:
                    artcontent = '{{Copyvio/bot|OldRevision=%d|time=%d-%02d-%02d|url=\n%s\n}}\n' % (artoldrev.revisionId, now.year, now.month, now.day, cv.simpdesc)
                    artcontent = artcontent.replace('h', '&#104;').replace('H', '&#72;').replace('<br />', '\n') + artoldrev.content
                    artnewrev = mediawiki.Revision(artcontent, artsummary, bot = True)
                    artpage.appendRevision(artnewrev)
                memcache.set('copyvio_article', False)
                memcache.set('copyvio_report', True)
                reppage.appendRevision(reprev, newsection = True)
                memcache.set('copyvio_report', False)
            else:
                memcache.set('copyvio_log', True)
                logoldrev = logpage.recentRevision
                if not pagedata.title in logoldrev.content:
                    logpage.appendRevision(logrev, newsection = True)
                memcache.set('copyvio_log', False)
                memcache.set('copyvio_article', True)
                artoldrev = artpage.recentRevision
                artcontent = '{{Copyvio/bot|OldRevision=%d|time=%d-%02d-%02d|url=\n%s\n}}\n' % (artoldrev.revisionId, now.year, now.month, now.day, cv.simpdesc)
                artcontent = artcontent.replace('h', '&#104;').replace('H', '&#72;').replace('<br />', '\n') + artoldrev.content
                artnewrev = mediawiki.Revision(artcontent, artsummary, bot = True)
                artpage.appendRevision(artnewrev)
                memcache.set('copyvio_article', False)
                memcache.set('copyvio_report', True)
                reppage.appendRevision(reprev, newsection = True)
                memcache.set('copyvio_report', False)
        count += 1
        cv_log = memcache.get('copyvio_log')
        cv_art = memcache.get('copyvio_article')
        cv_rep = memcache.get('copyvio_report')
        if (not cv_rep) and (not cv_art) and (not cv_log):
            pagedata.checked = True
            pagedata.put()

c = getOffset('Retrive')
if c % 5 == 0:
    retriveNewPages()
c += 1
if c > 1000:
    c = 0
putOffset('Retrive', c)
checkPage()