#coding=utf-8
'''

从sis读取数据并保存到目录

fid
    yyyymm
        dd
        
文件:
    pid.post
        [
            {author: '', content: ''},
            ...
        ]
        
    pid.index, pid.comment.index
        word,flag,count
     
执行:
    
   crawl:
       
       1) 读取帖子内容
       2) 写入.post文件, 写入.index文件
       
   scan:
       
       1) 扫描待下载的post 
    
   download:
       
       1) 读取传入的.post文件
       2) 下载其中资源
       
   sync:
   
       1) 存在.suc文件
       2) 
       
   
TODO:

    图片下载需要处理302转接的情况: 如
    http://thumbsnap.com/i/8VELKPIn.jpg
    
西唯美253
东唯美186
    
Created on 2013-2-2

@author: Administrator
'''
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(os.path.join(__file__, '../../'))))
from ripper.parser.AisexParser import AisexParser
from ripper.bencode import getTorrentInfo #@UnresolvedImport
from ripper.syncconfig import sync_db_host, sync_db_user, sync_db_pwd, sync_db_name

from ftplib import FTP
from optparse import OptionParser
from ripper.handler import HttpHandler
from ripper.handler.images import Thumbnail
from ripper.parser.SisParser import SisParser
import datetime
import hashlib
import json
import pprint
import random
import time
import urlparse
import psutil
#sys.path.append('/root/ripper')
#pth = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../')



DOWNLOAD_FAIL_TO_ABANDON = 2   # post任务重试次数
DOWNLOAD_IMAGE_FAIL_TO_ABANDON = 2 # 每个图片重试次数
DOWONLOAD_TIMEOUT = 60*5       # 每个post任务的超时

spam = ['img', 'src', 'http', 'www', 'info', 'jpg', 'border', 'jpeg',
         'onclick', 'zoom', 'this', 'onload', 'attachimg', 'load', 'alt', 'br',
         'jdghbvdscsjhkiekyvrfgh', 'jkhfdgnyverb', 'dkjfhgd',
         'target', 'blank', 'font', 'image', 'nbsp', ]

TFSPAM = ['padding_file', u'封杀SIS001', u'知道真相']
POSTSPAMS = ['ai_read.post', ]
POST_TITLE_SPAM = ['共襄盛舉', ]
dataroot = '/home/datas'
ftphost = 'fuckavmm.com'
error_log = 'errors.txt'

if os.path.exists(dataroot) == False:
    os.mkdir(dataroot)
    

FLAG_PROGRESS   = '.progress'    # 资源正在下载
FLAG_ERROR      = '.error'       # 资源下载出错
FLAG_SUC        = '.success'     # 资源下载完成
FLAG_TRANSFERED_FILE = '.transfered'  # 资源ftp传输到文件服务器完成
FLAG_TRANSFERED_DATA = '.ftransfered'  # 资源mysql传输到文件服务器完成
FLAG_TRANSFERED_RSS  = '.rss'  # rss生成完成

def checkdir(fid, yyyymm, dd, _dataroot):
    fileroot = os.path.join(_dataroot, fid)
    if not os.path.exists(fileroot):
        os.mkdir(fileroot)
    fileroot = os.path.join(fileroot, yyyymm)
    if not os.path.exists(fileroot):
        os.mkdir(fileroot)
    fileroot = os.path.join(fileroot, dd)
    if not os.path.exists(fileroot):
        os.mkdir(fileroot)
    return fileroot

def fetchpost(url, btype, fid):
    '''  抓取页面  '''
#    import jieba.posseg as pseg
    needProxy = False
    if os.name == 'nt':
        needProxy = True
    
    parser = None
    if 'sis' == btype:
        parser = SisParser(None, needProxy=needProxy)
    elif 'aisex' == btype :
        parser = AisexParser(None, needProxy=needProxy)
        
    for post in parser.parse_obj_list(url, fid):
        fid = post['forumId']
        datestr = post['postDate']
        purl = post['postDetailUrl']
        
        ymd = datestr.split('-')
        yyyymm = ymd[0] + '%02d' % int(ymd[1])
        dd = '%02d' % int(ymd[2])
        fileroot = checkdir(fid, yyyymm, dd, dataroot)
        pid = parser.getpid(purl)    # 帖子id
#        ppage = purl.split('-')[2]  # 帖子当前页
#        pfpage = purl.split('-')[3] # 帖子在版面中第几页
        filename = '%s.post' % pid
        filename = os.path.join(fileroot, filename)
        
        # 如果已经存在.post文件, 则跳过不抓取
        if os.path.exists(filename):
            continue
        
        ct = None
        try:
            ct = parser.get_all_content(purl)
            #垃圾过滤
            for spm in POST_TITLE_SPAM:
                if spm in post['title']:
                    continue
        except Exception, err:
            # 解析时出现问题
            if not os.path.exists(error_log):
                with open(error_log, 'w') as f:
                    f.write('%s, %s \n' % (err, purl) )
            else:
                with open(error_log, 'a') as f:
                    f.write('%s, %s \n' % (err, purl) )
            
            continue
        
        post['content'] = ct
        
#        # index
#        idxfile = os.path.join(fileroot, '%s.index' % pid)
#        # 索引主楼标题和内容
#        content = post['titleSimple'] + ct[0]['content']
#        result = indexcontent(content, pseg)
#        with open(idxfile, 'w') as f2:
#            for wd, count in result:
#                f2.write('%s,%s,%s,%d\n' % (fid, pid, wd, count) )
#        # 索引评论
#        if len(ct) > 1:
#            idxfile = os.path.join(fileroot, '%s.comment.index' % pid)  
#            content = ''.join(map(lambda a: a['content'], ct[1:]))
#            result = indexcontent(content, pseg)
#            with open(idxfile, 'w') as f3:
#                for wd, count in result:
#                    f3.write('%s,%s,%s,%d\n' % (fid, pid, wd, count) )
        # 下载种子
        if is_download_forum(fid):
            fdir = os.path.join(fileroot, 'resource')
            if not os.path.exists(fdir):
                os.mkdir(fdir)
            names = parser.get_torrents(purl, pid, fdir)
            if names != []:
                torrentdict = post.setdefault('torrents', {})
                for fname in names:
                    # 种子信息
                    tfilename = os.path.join(fdir, fname)
                    flist, mbsize = getTorrentInfo(tfilename)
                    info = {'files': flist, 'size': mbsize}
                    torrentdict[fname] = info
        # 保存内容
        with open(filename, 'w') as f1:
            f1.write(json.dumps(post, ensure_ascii=False))
          
def is_download_forum(fid):
        return int(fid) in (230, 27, 143, 229, 58, 231, 20, 25, 77, 16, 4, 5, 11, 6, 345)

def indexcontent(content, pseg):
    wdct = {}
    cuts = list(pseg.cut(content))
    flags = ['i', 'n', 'a', 'nr']
#        if int(fid) in (229, 77):
#            flags.append('eng')
    for w in cuts: 
        word = w.word
        if  word == 'br' or \
            w.flag not in flags:
            continue
        if word in spam:
            continue
        incr(wdct, word+','+w.flag)

    result = sorted(wdct.iteritems(), key=lambda a: a[1], reverse=True)
    return result
    
def incr(dct, key, num=1, default=0):
    v = dct.setdefault(key, default)
    v += num
    dct[key] = v
    
def gen_download_resource(fid=None):
    '''  扫描需要下载的资源  
     
     if a file with suffix FLAG_PROGRESS exists, ignore. 
    '''
    posts = []
    scanroot = dataroot
    
    if fid is not None:
        scanroot = os.path.join(scanroot, fid)
        
    for dirpath,_, filelist in os.walk(scanroot):
        for f in filelist:
            dirpath = os.path.abspath(dirpath)
            fname = os.path.join(dirpath, f)
            fname = fname.replace('\\', '/')
            if '.post' in fname:
                progressfile = fname.replace('.post', FLAG_PROGRESS)
                errorfile = fname.replace('.post', FLAG_ERROR)
                sucfile = fname.replace('.post', FLAG_SUC)
                
                # 下载完成的
                if os.path.exists(sucfile):
                    continue
                # 多次出错放弃的
                if os.path.exists(errorfile):
                    continue
                
                # 未开始下载的
                if os.path.exists(progressfile) == False:
                    posts.append(fname)
                else:
                    # 下载过一次并未完成的
                    # 出错次数小于设定值的
                    ct = open(progressfile).read().split('\n')
                    if len(ct)  > 1: # 有fail记录
                        ct = ct[0]
                        retrys = int(ct)
                        # 任务重试次数超过限制
                        if retrys > DOWNLOAD_FAIL_TO_ABANDON :
                            # 根据内容判断是否失败
                            if isdownloadfailed(fname) == True:
                                with open(errorfile, 'w') as fff:
                                    fff.write('damn')
                                os.remove(progressfile)
                            else:
                                # 根据规则判断这头已经是个下载成功的帖子了
                                with open(sucfile, 'w') as f:
                                    f.write('ok')
                                os.remove(progressfile)
                        else:
                            posts.append(fname)
    
    # 输出到屏幕
    for p in posts:
        print p
            
def isdownloadfailed(postfile):
    '''判断帖子是否下载失败.  '''
    # 图片帖子大于3张,
    # 下载帖子大于1张
    # 是否所有图片大小相同(很可能失效)
    obj = readpost(postfile)
    imgs = obj['content'][0].get('localimages', {})
    fid = obj['forumId']
    min_img_num = 3
    if is_download_forum(fid) == True:
        min_img_num = 1
    if len(imgs) < min_img_num:
        return True
    size_set = list(set(map(lambda a: a['size'], imgs.values())))
    if len(size_set) == 1 and len(imgs) > 2:
        if '.gif' in ''.join(map(lambda a: a['filename'], imgs.values())):
            return True
    return False

def readpost(postfile):
    fpostfile = open(postfile)
    obj = json.loads(fpostfile.read())
    fpostfile.close()
    return obj

def download_resource(postfile):
    '''  下载 资源  '''
    
    def saveObj(obj):
        # 保存内容
        with open(postfile, 'w') as f1:
            f1.write(json.dumps(obj, ensure_ascii=False))
            
    pid = os.path.basename(postfile).split('.')[0]
    fdir = os.path.dirname(postfile)
    fdir = os.path.join(fdir, 'resource')
    if not os.path.exists(fdir):
        os.mkdir(fdir)
    handler = HttpHandler.HttpHandler(fdir)
    allimgs = [f for f in os.listdir(fdir) if '.jpg' in f]
    
    # images
    fpostfile = open(postfile)
    obj = json.loads(fpostfile.read())
    fpostfile.close()
    download_errors = obj.setdefault('errors', {}) # 每个文件下载出错次数
    imgs = obj['content'][0]['images']
    
    # 非图区图片大于15张的取前15张
    if len(imgs) > 5:
        fid, _, _ = get_info_from_path(postfile)
        if is_download_forum(fid):
            imgs = imgs[:5]
        
    imgs = list(set(imgs))
    random.shuffle(imgs)
    localimgs = obj['content'][0].setdefault('localimages', {})
    total = len(imgs)
    ct = 0
    fails = []
    for i, imgurl in enumerate(imgs):
        if download_errors.get(imgurl, 0) >= DOWNLOAD_IMAGE_FAIL_TO_ABANDON:
            fails.append(imgurl)
            continue
        if is_image_downloaded(imgurl, allimgs) == True:
            ct +=1
            print '%s skip %d/%d' % (imgurl, ct, total)
            continue
        fname, fl, isize = handler.getImage_safe(imgurl, retrys=3)
        w, h = isize
        if fl == -1:
            print 'dowload failed : ', imgurl
            fails.append(imgurl)
            incr(download_errors, imgurl)
            saveObj(obj)
            continue
        tw, th = create_thumbnail(fname, fdir)
        ct +=1
        
        # 保存图片信息
        imginfo = {}
        imginfo['filename'] = os.path.basename(fname)
        imginfo['width'] = w
        imginfo['height'] = h
        imginfo['twidth'] = tw
        imginfo['theight'] = th
        imginfo['size'] = fl
        localimgs[i] = imginfo
        saveObj(obj)
        print '%s images downloaded %d/%d' % (pid, ct, total)
        
    
    # 记录运行结果 
    progressfile = postfile.replace('.post', FLAG_PROGRESS)
    sucfile = postfile.replace('.post', FLAG_SUC)
    if len(fails) > 0:
        # 有错误记录
        counter = 0
        if os.path.exists(progressfile):
            ct = open(progressfile).read()
            counter = ct.split('\n')[0]
            counter = int(counter)
        with open(progressfile, 'w') as f:
            counter += 1
            f.write('%d\n' % counter)
            fails = '\n'.join(fails)
            f.write(fails)
    else: 
        with open(sucfile, 'w') as f:
            f.write('OK')
        if os.path.exists(progressfile):
            os.remove(progressfile)
    
    print 'exiting..'
#    interrupt_main()
#    raise StopThreadException()
    sys.exit(9)
        
def get_info_from_path(pfile):
    filepath = os.path.dirname(pfile)
    filepath = filepath.replace('\\', '/')
    droot = filepath.split('/')
    droot = [i for i in droot if i != '']
    fid, yyyymm, dd = droot[-3],droot[-2],droot[-1]
    return fid, yyyymm, dd

class StopThreadException(Exception):
    def __init__(self):
        Exception('')
    
def is_image_downloaded(imgurl, imgs):
    prefix = gethash(imgurl)
    _imgs = [f for f in imgs if prefix in f]
    for name in _imgs:
        if 'crop_' in name:
            return True
    return False 

def gethash(s): 
    return hashlib.md5(s).hexdigest()

# 缩略图    
def create_thumbnail(org, destDir):
    try:
        org = os.path.join(destDir, org)
        destName = 'crop_' + os.path.basename(org)
        t = Thumbnail(org,destDir,destName)    
        return t.create(True)
    except Exception, err:
        print err
        return 0, 0
    
def sync(gen_list=False, postfile=None, fid=None):
    
    ''' 通过ftp同步到资源服务器 '''
    
    # 生成列表
    if True == gen_list:
        droot = dataroot
        if fid is not None:
            droot = os.path.join(droot, fid)
        for dirpath, _, flist in os.walk(droot):
            dirpath = os.path.abspath(dirpath)
            for fname in flist:
                fname = os.path.join(dirpath, fname)
                if FLAG_SUC in fname:
                    transfedfile = fname.replace(FLAG_SUC, FLAG_TRANSFERED_FILE)
#                    print transfedfile
                    if os.path.exists(transfedfile) == False:
                        print fname.replace(FLAG_SUC, '.post').replace('\\', '/')
        return 
    
    if None == postfile:
        print 'Error: need a .post file to perform syncronization.'
        exit(0)
        
    pid = os.path.basename(postfile).split('.')[0]
    fid, yyyymm, dd = get_info_from_path(postfile)
    fpostfile = open(postfile)
    obj = json.loads(fpostfile.read())
    fpostfile.close()
    
    # ftp目录存储的结构是:
    # /public_html/datas/fid/yyyy/mm/dd/
    print 'loginftp...'
    ftp = FTP(ftphost, 'ihamster', '123z123')
    print 'welcome.'
    print 'switching dir...'
    ftp.cwd('public_html/datas')
    
    # Change directories - create if it doesn't exist
    def chdir(dir): 
        if directory_exists(dir) is False: # (or negate, whatever you prefer for readability)
            try:
                ftp.mkd(dir)
            except Exception,ex:
                print ex
                pass
        ftp.cwd(dir)
    
    # Check if directory exists (in current location)
    def directory_exists(dir):
        filelist = []
        ftp.retrlines('LIST',filelist.append)
        for f in filelist:
            if f.split()[-1] == dir and f.upper().startswith('D'):
                return True
        return False
    
    # fid, yyyymm, dd
    chdir(fid)
    chdir(yyyymm)
    chdir(dd)
    print 'cwd done.'
    imgs = obj['content'][0].get('localimages', {})
    total = len(imgs)
    print '%s has %d images to transfer... ' % (pid, total)
    imgs = sorted(imgs.iteritems(), key=lambda a: int(a[0]))
    transfered = []
    for i, fname in enumerate(imgs):
        fname = fname[1]['filename']
        cropname = 'crop_'+fname
        cropname1 = 'cropsmall_'+fname
        fullname = os.path.join(os.path.dirname(postfile), 'resource', fname)
        fullname_crop = os.path.join(os.path.dirname(postfile), 'resource', cropname)
        fullname_crop1 = os.path.join(os.path.dirname(postfile), 'resource', cropname1)
        if not os.path.exists(fullname):continue
        print '%s => remote' % fullname
        fp = open(fullname, 'rb')
        ftp.storbinary('STOR ' + fname, fp)
        transfered.append(fullname)
        fp.close()
        if os.path.exists(fullname_crop):
            fp = open(fullname_crop, 'rb')
            ftp.storbinary('STOR ' + cropname, fp)
            transfered.append(fullname_crop)
            fp.close()
        if os.path.exists(fullname_crop1):
            fp = open(fullname_crop1, 'rb')
            ftp.storbinary('STOR ' + cropname1, fp)
            transfered.append(fullname_crop1)
            fp.close()
        print 'ftp: uploading %d/%d' % (i, total)
        i += 1
        
    if 'torrents' in obj:
        for torrentfile in obj['torrents'].keys():
            fullname = os.path.join(os.path.dirname(postfile), 'resource', torrentfile)
            if not os.path.exists(fullname):continue
            imgs.append(torrentfile)
            fp = open(fullname, 'rb')
            ftp.storbinary('STOR ' + torrentfile, fp)
            fp.close()
            transfered.append(fullname)
    ftp.close()
    # 删除文件
    for f in transfered:
        print 'removing' ,f
        try:
            os.remove(f)
        except Exception,ex:
            print ex
            pass
    # 传输完成标志
    with open(postfile.replace('.post', FLAG_TRANSFERED_FILE), 'w') as transfredfile:
        transfredfile.write('ok')
    print 'post# %s done.' % pid
    
    
def import_data(fids=None, islist=False):
    '''  向前端数据库导入数据  
    '''
    import MySQLdb
    from datetime import date
    
    
    # 遍历数据目录收集采集成功的post
    postfiles = []
    posts = []
    for fid in fids.split(','):
        droot = dataroot
        fid = fid.strip()
        droot = os.path.join(droot, fid)
        if not os.path.exists(droot):continue
        for yyyymm in os.listdir(droot):
            yyyymm = os.path.join(droot, yyyymm)
            for dd in os.listdir(yyyymm):
                dd = os.path.join(yyyymm, dd)
                for fname in os.listdir(dd):
                    if FLAG_SUC in fname:
                        fname = os.path.join(dd, fname)
                        postfile = fname.replace(FLAG_SUC, '.post')
                        transferedfile = postfile.replace('.post', FLAG_TRANSFERED_DATA)
                        if os.path.exists(transferedfile) == False:
                            if os.path.basename(postfile)  in POSTSPAMS: continue
                            postfiles.append(postfile)
                            if islist == True:
                                print postfile
                            with open(postfile, 'r') as pf:
                                pftext = pf.read()
                                posts.append(json.loads(pftext))
                                
    if True == islist:return
    print 'total %d posts to import... ' % len(posts)
    # 导入数据
    posttpl = 'insert into post values (%(pid)s,%(titleSimple)s,%(postDate)s,%(forumId)s,%(catid)s,%(author)s,%(content)s,%(code)s,%(postDetailUrl)s,%(preview)s,%(tw)s,%(th)s )'
    replytpl = 'insert into reply values (%(rid)s,%(pid)s,%(author)s,%(content)s,%(iorder)s,%(fid)s )'
    imgtpl = 'insert into image values (%(iid)s,%(filename)s,%(pid)s,%(width)s,%(height)s,%(twidth)s,%(theight)s,%(size)s )'
    torrenttpl = 'insert into torrent values (%(tid)s,%(pid)s,%(fid)s,%(filename)s,%(size)s )'
    torrentfiletpl = 'insert into torrent_files values (%(tfid)s,%(filename)s,%(suffix)s,%(pid)s ,%(fid)s )'
    
    postobjs = []
    replyobjs = []
    imageobjs = []
    torrentobjs = []
    torrentfileobjs = []
    for i, o in enumerate(posts):
        # post
        obj = {}
        pid = os.path.basename(postfiles[i]).split('.')[0]
        obj['pid'] = pid
        obj['forumId'] = o['forumId']
        if pid =='sis_4806670':
            pid = pid + '_' + obj['forumId']
            obj['pid'] = pid
        y, m, d = map(lambda a:int(a), o['postDate'].split('-'))
        obj['postDate'] = date(y, m, d)
        obj['titleSimple'] = o['titleSimple']
        obj['catid'] = o.get('tag', '')
        obj['author'] = o['content'][0]['author']
        obj['content'] = o['content'][0]['content']
        obj['code'] = get_post_id_code(pid)
        obj['postDetailUrl'] = o['postDetailUrl']
        # 预览 取大小最大的图片
        fn, tw, th = getpreviewimge(o)
        obj['preview'] = fn
        obj['tw'] = tw
        obj['th'] = th
        postobjs.append(obj)
        
        # replys
        if len(o['content']) > 1:
            iorder = 0
            for reply in o['content'][1:]:
                robj = {'rid': None, 'pid': pid, 'iorder': iorder, 'fid': o['forumId']}
                robj['author'] = reply['author']
                robj['content'] = reply['content']
                iorder += 1
                replyobjs.append(robj)
        
        # image
        # 图片需要按原始顺序插入
        resourceroot = os.path.dirname(postfiles[i])
        resourceroot = os.path.join(resourceroot, 'resource')
        if 'localimages' in o['content'][0] : 
            # 排序
            imgs = sorted(o['content'][0]['localimages'].items(), key=lambda a:int(a[0]))
            for _img in imgs:
                img = _img[1]
                ifile = os.path.join(resourceroot, img['filename'])
#                img['size'] = os.path.getsize(ifile)
                img['iid'] = None # auto increment
                img['pid'] = pid
                img['filename'] = ifile.replace(dataroot, '').replace('\\', '/')
                imageobjs.append(img)
        
        # torrent 种子
        if 'torrents' in o :
            for torrentfile, tfiles in o['torrents'].items():
                tobj = {'tid': None, 'fid': o['forumId'], 'pid': pid}
                tfile = os.path.join(resourceroot, torrentfile)
                tobj['filename'] = tfile.replace(dataroot, '').replace('\\', '/')
                tobj['size'] = tfiles['size']
                if tobj['size'] == None:
                    tobj['size'] = ''
                torrentobjs.append(tobj)
                if tfiles['files'] == None: continue
                for fname in tfiles['files']:
                    isspam = False
                    for spam in TFSPAM:
                        if spam in fname:
                            isspam = True
                            break
                    if isspam:continue 
                    fobj = {'tfid': None, 'fid': o['forumId'], 'pid': pid}
                    fobj['filename'] = fname
                    fobj['suffix'] = fname.split('.')[-1]
                    torrentfileobjs.append(fobj)
        
    print 'total %d replies to import...' % len(replyobjs)
    print 'total %d images to import...' % len(imageobjs)
    print 'total %d torrents to import...' % len(torrentobjs)
    print 'total %d torrent_files to import...' % len(torrentfileobjs)

    db = MySQLdb.connect(sync_db_host, sync_db_user, sync_db_pwd, sync_db_name)
    db.set_character_set('utf8')
#    dbc.execute('SET NAMES utf8;')
#    dbc.execute('SET CHARACTER SET utf8;')
#    dbc.execute('SET character_set_connection=utf8;')
    
    cur = db.cursor()
    print 'transfering data...'
    for pobj in postobjs:
        try:
            cur.executemany(posttpl, [pobj, ])
        except Exception, ex:
            print ex
            continue
    cur.executemany(replytpl, replyobjs)
    cur.executemany(imgtpl, imageobjs)
    cur.executemany(torrenttpl, torrentobjs)
    cur.executemany(torrentfiletpl, torrentfileobjs)
    for pf in postfiles:
        with open(pf.replace('.post', FLAG_TRANSFERED_DATA), 'w') as ff:
            ff.write('ok')
    db.commit()
    cur.close()
    db.close()
    print 'sync data done.'
    
def getpreviewimge(o):
    if 'localimages' in o['content'][0] : 
        # 大小排序
        if len(o['content'][0]['localimages']) == 0 :return '',0 ,0
        imgs = sorted(o['content'][0]['localimages'].items(),reverse=True,\
                       key=lambda a:int(a[1]['size']))
        pv = 'crop_' + imgs[0][1]['filename']
        tw = imgs[0][1]['twidth']
        th = imgs[0][1]["theight"]
        ymd = o['postDate'].split('-')
        yyyymm = ymd[0] + '%02d' % int(ymd[1])
        dd = '%02d' % int(ymd[2])
        fid = o['forumId']
        fn = '/'.join((fid, yyyymm, dd, pv))
        fn = '/' + fn
        return fn, tw, th
    else:
        return '',0 ,0
    
def get_post_id_code(pid):
    return gethash(pid+'___hash__str')

#def printimage(postfile):
#    postfile = postfile.replace(FLAG_ERROR, '.post')
#    with open(postfile) as f:
#        post = json.loads(f.read())
#        imgs = post['content'][0].get('localimages', {})
#        for img in imgs.values():
#            print os.path.join(os.path.dirname(postfile), 'resource', img['filename'])\
#                    , img['width'], img['height'], img['size']
#
#def checkftpdeletefailimage(imgpath):
#    # /home/datas/fid/yyyymm/dd/resource/basename
#    filename = os.path.basename(imgpath)
#    obj = {}
#    dirroot = os.path.join(os.path.dirname(imgpath), '..')
#    for f in os.listdir(dirroot):
#        if '.post' in f:
#            f = os.path.join(dirroot ,f)
#            with open(f, 'r') as ff:
#                obj = json.loads(ff.read())
#            for imginfo in obj['content'][0].get('localimages', {}).values():
#                if filename == imginfo['filename']:
#                    print  f
#                    break
#        if obj != {}:
#            break
#    pprint.pprint(obj)

def exportrss(fids, num):
    import PyRSS2Gen
    if None == num:
        num = -1
    CAT_AISA_CEN = u'亚洲有码影片'
    CAT_AISA_GALL = u'亚洲图片'
    CAT_AISA_SIS = u'亚洲ss'
    CATS = {230: CAT_AISA_CEN, 4: CAT_AISA_CEN, 64: CAT_AISA_GALL, 230:CAT_AISA_SIS, 58: CAT_AISA_SIS}
    rssfile = 'export.xml'
    droot = dataroot
    # 遍历数据目录收集采集成功的post
    postfiles = []
    posts = []
    for fid in fids.split(','):
        fid = fid.strip()
        droot = os.path.join(droot, fid)
        if not os.path.exists(droot):continue
        for yyyymm in os.listdir(droot):
            yyyymm = os.path.join(droot, yyyymm)
            for dd in os.listdir(yyyymm):
                dd = os.path.join(yyyymm, dd)
                for fname in os.listdir(dd):
                    if FLAG_SUC in fname:
                        fname = os.path.join(dd, fname)
                        postfile = fname.replace(FLAG_SUC, '.post')
                        transferedfile = postfile.replace('.post', FLAG_TRANSFERED_FILE)
                        # ftp完成的post
                        if os.path.exists(transferedfile) == True:
                            rssfile_ = postfile.replace('.post', FLAG_TRANSFERED_RSS)
                            if os.path.exists(rssfile_) == True: continue
                            postfiles.append(postfile)
                            print postfile
                            with open(postfile, 'r') as pf:
                                pftext = pf.read()
                                posts.append(json.loads(pftext))
    rss = PyRSS2Gen.RSS2(
        title = "Andrew's PyRSS2Gen feed",
        link = "#",
        description = "The latest news about PyRSS2Gen, a "
                      "Python library for generating RSS2 feeds",
        lastBuildDate = datetime.datetime.now(),
        items = [])
    for ii, o in enumerate(posts[:int(num)]):
        if ii % 3000 == 0 and 0 != ii:
            rss.write_xml(open(rssfile+'.'+str(ii), "w"))
            del rss.items[:]
        fid, yyyymm, dd = get_info_from_path(postfiles[ii])
        yyyy = yyyymm[:4]
        mm = yyyymm[4:]
        fid = o['forumId']
#        categories = [ PyRSS2Gen.Category(CATS[int(fid)], 'fuckavmm.com'), ]
        categories = [ CATS[int(fid)], ]
        title = o['titleSimple']
        ct = o['content'][0]['content']
        previewfn, _, _ = getpreviewimge(o)
        if 'localimages' in o['content'][0]:
            images = o['content'][0]['localimages']
            imgs = []
            for i in images.values():
                tpl = '<img src="http://img2.fuckavmm.com/%s" />'
                fullname = '/datas/%s/%s/%s/%s' % (fid, yyyymm, dd, i['filename'])
                imgs.append(tpl % fullname)
            imgtext = '\n<br/>'.join(imgs)
            ct = ct + '\n<br/>' + imgtext
        # fullfilename, filename, size 
        ttext = '<br/><br/><br/><a href="http://img3.fuckavmm.com/%s">%s(%s)</a>'
        if 'torrents' in o :
            fname, finfo = o['torrents'].items()[0]
            fullname = '/datas/%s/%s/%s/%s' % (fid, yyyymm, dd, fname)
            size = finfo['size']
            ttext = ttext % (fullname, u'点击下载种子文件', size)
            ct += ttext
        item = PyRSS2Gen.RSSItem(
         title = title,
         link = "#",
         categories = categories,
         description = ct,
        enclosure = PyRSS2Gen.Image('http://img2.fuckavmm.com/datas/'+previewfn, '', ''),
         guid = PyRSS2Gen.Guid("http://www.dalkescientific.com/news/030906-PyRSS2Gen.html"),
         pubDate = datetime.datetime(int(yyyy), int(mm), int(dd), 0, 0))
        rss.items.append(item)
        pfile = postfiles[ii]
        rssfile_ = pfile.replace('.post', FLAG_TRANSFERED_RSS)
        with open(rssfile_, 'w') as rf:
            rf.write('ok')

    rss.write_xml(open(rssfile, "w"))
    
if __name__ == '__main__':
#    print isdownloadfailed('K:\\home\\datas\\230\\201302\\14\\sis_4790708.post')
#    exit(0)
    usage = "usage: %prog [options] scan [-f] | download -i| sync -i {data [ls] -f| list}|rss -f -n"
    parser = OptionParser(usage)
    
    parser.add_option("-f", "--forumid", dest="fid",
                      help="forumid")
    parser.add_option("-p", "--page", dest="page",
                      help="page index starts from 1")
    parser.add_option("-n", "--num", dest="num",
                      help="top n for rss export")
    parser.add_option("-i", "--input", dest="input",
                      help="E:/datas/64/201301/23/4761542.post")
    (options, args) = parser.parse_args()
    
    # download resource
    if 'scan' in args:
        gen_download_resource(options.fid)
        exit(0)
        
    if 'download' in args:
        postfile = options.input
        if postfile == None:
            print 'need postfile '
            exit(0)
        download_resource(postfile)
        exit(0)
    
    # upload resource
    if 'sync' in args:
        # 生成list
        if 'list' in args :
            genlist = True
            sync(genlist, fid=options.fid)
            exit(0)
        
        # 上传数据
        if 'data' in args:
            fid = options.fid
            islist = False
            if 'ls' in args:
                islist = True
            import_data(fids=fid, islist=islist)
            exit(0)
        
        postfile = options.input
        sync(postfile=postfile)
        exit(0)
        
    # rss
    if 'rss' in args:
        fid = options.fid
        num = options.num
        exportrss(fid, num)
        exit(0)
    
#    if 'images' in args:
#        printimage(options.input)
#        exit(0)
#    if 'showpost' in args:
#        checkftpdeletefailimage(options.input)
#        exit(0)

    fids = options.fid
    page = options.page
    if page == None:
        page = '1'
    
    for fid in fids.split(','):
        fid = fid.strip()
        aifids = (16, 4, 5, 11, 6)
        btype = 'sis'
        furl = 'http://sexinsex.net/bbs/forum-%s-%s.html'
        if int(fid) in aifids:
            btype = 'aisex'
            furl = 'http://www.aisex.com/bt/thread.php?fid=%s&page=%s'
    #    furl = 'http://sexinsex.net/bbs/forumdisplay.php?fid=%s&page=%s'
        furl = furl % (fid, page)
        fetchpost(furl, btype, fid)
    
    # time python fetch.py scan |awk 'BEGIN{10000*srand();} {printf "%s %s\n", rand(), $0}'  | sort -k1n | awk '{gsub($1FS,""); print $0}'| xargs -n1 -P15 python fetch.py download -i
    # download -i K:\\home\\datas\\64\\201301\\23\\sis_4761542.post
    # 309 手机综合区
    '''
    
    
    
    
    
    
    
    
    
    
    jieba:中出, 无毛
                           
    
    '''
    
    
