#!/usr/bin/env python
#coding=utf-8

import os
import sys
import time
import cookielib
import urllib
import urllib2
from BeautifulSoup import BeautifulSoup

__version__ = '0.2'
__author__ = 'libsoft'

class pymarry:
    # the path and filename to save your cookies in
    COOKIEFILE = 'cookies.lwp'    
    LIGHT = {'light_1':'绿', 'light_2':'黄', 'light_3':'红'}
    LOGIN = False
    
    def __init__(self, proxyinfo=None):
        """
        proxyinfo - a dictionary contains proxy info.
            proxyinfo = {
                'user' : mylogin,
                'pass' : mypassword,
                'host' : myproxy,
                'port' : 8080
            }
        """
        self.proxyinfo = proxyinfo
        self.__install_opener()         
    
    def __install_opener(self):
        self.cj = cookielib.LWPCookieJar()
        if os.path.isfile(pymarry.COOKIEFILE):
            self.cj.load(pymarry.COOKIEFILE)
            #print 'cookie loaded'
            
        if self.proxyinfo:
            print 'using proxy'
            if self.proxyinfo['user']:
                proxy_support = urllib2.ProxyHandler(
                    {"http":"http://%(user)s:%(pass)s@%(host)s:%(port)s" % self.proxyinfo})
            else:
                proxy_support = urllib2.ProxyHandler(
                    {"http":"http://%(host)s:%(port)s" % self.proxyinfo})
            opener = urllib2.build_opener(proxy_support, urllib2.HTTPCookieProcessor(self.cj))
        else:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
            
        urllib2.install_opener(opener)
    
    def login(self, id='', password=''):
        """
        Logging in marry5.com using your id and password.
        id - user id
        password - user password
        return - True if success logining in, False if fail logging in.
        """
        
        url = 'http://www.marry5.com/sso/SignOn'
        # if we were making a POST type request,
        # we could encode a dictionary of values here,
        # using urllib.urlencode(somedict)
        data = urllib.urlencode({'loginId': id, 'password': password})
        # fake a user agent, some websites (like google) don't like automated exploration
        header = {'User-agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}    
        
        try:
            response = urllib2.urlopen(url, data)
        except IOError, e:
            print 'We failed to open "%s".' % url
            if hasattr(e, 'code'):
                print 'We failed with error code - %s.' % e.code
            elif hasattr(e, 'reason'):
                print "The error object has the following 'reason' attribute :"
                print e.reason
                print "This usually means the server doesn't exist,',"
                print "is down, or we don't have an internet connection."
            sys.exit()        
        else:
            #print 'Here are the headers of the page :'
            #print 'response.info() => %s' % response.info()
            nexturl = response.read()
            #print 'response.read() => %s' % nexturl
            #print response.geturl() #returns the true url of the page fetched
            # (in case urlopen has followed any redirects, which it sometimes does)
    
        if self.cj is None:
            print "We don't have a cookie library available - sorry."
            print "I can't show you any cookies."
        else:
            #print 'These are the cookies we have received so far :'
            #for index, cookie in enumerate(cj):
                #print index, '  :  ', cookie
            self.cj.save(pymarry.COOKIEFILE) # save the cookies again
    
        if nexturl == 'http://truelove.marry5.com/myPsycho.m5':
            pymarry.LOGIN = True
            return True
        else:
            return False

    def __loginRequired(func):
        def wrapper(self, *args, **kwargs):
            #print 'marry5.LOGIN => %s' % marry5.LOGIN
            if not pymarry.LOGIN:
                print 'You need to login to download user images matching you.'
                return None
            return func(self, *args, **kwargs)
        return wrapper
   
    @__loginRequired
    def downloadMatchMe(self, begin=1, end=2):
        """
        Download images of users who match me.
        begin - start page index
        end - end page index
        """
        prefix = 'http://truelove.marry5.com/viewSimpleMatch.m5?pageForm.toPage='        
        for index in range(begin, end+1):
            r = urllib2.urlopen(prefix + str(index))
            page = r.read()
            self.__parseImageUrlFromMatch(page, 'gbk')
        print 'mission complete'
        
    def downloadSearch(self, begin=1, end=2, province='2'):
        """
        Download user images from search results.
        begin - start page index
        end - end page index
        province - the province you wanna search for
        """
        prefix = 'http://truelove.marry5.com/viewSimpleMatch.m5?pageForm.toPage='        
        prefix2 = 'http://www.marry5.com/search/indexSearch.m5?selector.toPage='
        suffix_a = ('&selector.maxPage=20&'
            'form.defaultPhoto=0&form.sex=0&form.fromAge=24&form.toAge=27&'
            'form.contactProvince=')
        suffix_b = ('&form.contactCity=&form.fromStature=164&'
            'form.toStature=&form.fromWeight=40&form.toWeight=&form.marriageStatus=5&'
            'form.education=5&form.educationExt=%3E%3D&form.workingIndustry=&'
            'form.incoming=&form.incomingExt=%3D&form.housingStatus=&'
            'form.creditTotal=&form.creditTotalExt=%3D&'
            'simpleViewMode=false&form.orderMode=1')
        for index in range(begin, end+1):
            print prefix2 + str(index) + suffix_a+province+suffix_b
            r = urllib2.urlopen(prefix2 + str(index) + suffix_a+province+suffix_b)      
            page = r.read()
            self.__parseImageUrlFromSearch(page, 'gbk')
        print 'mission complete'
        
    def downloadSign(self, begin=1, end=2, province='2', sign='5'):
        """
        Download user images from the signs of the zodiac.
        begin - start page index
        end - end page index
        province - the province you wanna search for
        sign - the signs of zodiac you wanna search
        
        search from signs url is different from login and not login.
        
        login:    
        http://www.marry5.com/search/feaSearch.m5?
        selector.toPage=2&form.defaultPhoto=&form.sex=0&form.fromAge=24&form.toAge=40&
        form.contactProvince=2&form.contactCity=21&form.fromStature=&form.toStature=&
        form.fromWeight=&form.toWeight=&form.marriageStatus=&form.education=&
        form.educationExt=%3D&form.workingIndustry=&form.incoming=&form.incomingExt=%3D&
        form.housingStatus=&form.creditTotal=&form.creditTotalExt=%3D&
        simpleViewMode=false&form.orderMode=1&form.constellation=5&form.traiFigure=&
        form.traiBlood=&form.sIncoming=&form.lIncoming=&form.traiOccupation=&
        selector.totalCount=240
        
        not login:
        http://www.marry5.com/search/indexSearch.m5?
        selector.toPage=2&form.defaultPhoto=&form.sex=0&form.fromAge=24&form.toAge=40&
        form.contactProvince=&form.contactCity=&form.fromStature=&form.toStature=&
        form.fromWeight=&form.toWeight=&form.marriageStatus=&form.education=&
        form.educationExt=%3D&form.workingIndustry=&form.incoming=&form.incomingExt=%3D&
        form.housingStatus=&form.creditTotal=&form.creditTotalExt=%3D&simpleViewMode=false&
        form.orderMode=1&form.constellation=5&form.traiFigure=&form.traiBlood=&
        form.sIncoming=&form.lIncoming=&form.traiOccupation=&selector.totalCount=240
        
        http://www.marry5.com/search/feaSearch.m5?
        form.contactProvince=2&form.contactCity=21&form.sex=0&form.fromAge=24&
        form.toAge=28&form.constellation=5
        """
        prefix = 'http://www.marry5.com/search/indexSearch.m5?form.contactProvince='        
        suffix_a = ('&form.contactCity=&form.sex=0&form.fromAge=24&'
            'form.toAge=28&form.constellation=')
        suffix_b = '&selector.toPage='
    
        for index in range(begin, end+1):
            url = prefix+province + suffix_a+sign + suffix_b+str(index)
            print url
            r = urllib2.urlopen(url)     
            page = r.read()
            self.__parseImageUrlFromSearch(page, 'gbk')
        print 'mission complete'
    
    def __parseImageUrlFromMatch(self, page, fromEncoding='gbk'):
        try:
            page = page.decode(fromEncoding)
            soup = BeautifulSoup(page)
            tds = soup.findAll('td', attrs={'class':'b14', 'align':'left'})
        except AttributeError, e:
            print e
        except IndexError, e:
            print e
        except UnicodeDecodeError, e:
            print e
        else:
            urls = [td.a['href'] for td in tds]
            ids = [url[url.rfind('=')+1:] for url in urls]
            #names = [n.a.strong.string.strip() for n in tds]
            f = 'images/downloaded.txt'
            ids = self.__filterIds(ids, f)
            if len(ids) >= 1:            
                prefix = 'http://www.marry5.com/my/maximumListPhoto.m5?memberId='
                suffix = '&item.selector.toPage=1'        
                image_list_urls = [prefix+id+suffix for id in ids]        
                self.__getImages(image_list_urls, ids) # all members image url
                self.__writeToFile('\n'.join(ids)+'\n', f, 'a')
    
    def __parseImageUrlFromSearch(self, page, fromEncoding='gbk'):
        try:
            page = page.decode(fromEncoding)
            soup = BeautifulSoup(page)            
            #tds = soup.findAll('td', attrs={'width':'100', 'align':'left'})
            divs = soup.findAll('div', attrs={'style':'float:left; padding-right: 10px;'})
        except AttributeError, e:
            print e
        except IndexError, e:
            print e
        except UnicodeDecodeError, e:
            print e
        else:
            #urls = [td.a['href'] for td in tds]
            urls = [div.a['href'] for div in divs]
            ids = []
            for url in urls:
                b = url.rfind('/u') + 2
                e = url.rfind('.html')
                ids.append(url[b:e])
            f = 'images/downloaded.txt'
            ids = self.__filterIds(ids, f)
            if len(ids) >= 1:
                prefix = 'http://www.marry5.com/my/maximumListPhoto.m5?memberId='
                suffix = '&item.selector.toPage=1'
                image_list_urls = [prefix+id+suffix for id in ids]
                self.__getImages(image_list_urls, ids) # all members image url
                self.__writeToFile('\n'.join(ids)+'\n', f, 'a')
    
    def __getImages(self, urls, ids):
        i = 0
        for url in urls:
            #dir = '%s_%s' % (names[i], ids[i])
            dir = 'images/'
            #filename = '%s_%s_1.jpg' % (names[i], ids[i])
            filename = self.__makeFileName(ids[i], 1)
            if not os.path.isdir(dir): # dir not exist
                os.mkdir(dir)
            self.__downloadImage(url, dir, filename) # one member's image url
            #writeToFile(page, str(i) + '.txt')
            i += 1
            
    def __downloadImage(self, url, dir, filename):                 
        r = urllib2.urlopen(url)
        page = r.read().decode('gbk')
        soup = BeautifulSoup(page)
        try:
            photoDiv = soup.find('div', attrs={'id':'photoDiv', 'align':'center'})
            aTag = photoDiv.find('a')
        except AttributeError, e:
            print 'AttributeError. No image link found. %s' % e
        except IndexError, e:
            print 'IndexError. No image link found. %s' % e
        else:
            imageUrl = aTag['href']
            name = dir + filename
            if not os.path.isfile(name):
                print 'downloading => %s' % filename.encode('utf-8')
                urllib.urlretrieve(imageUrl, name)
        
        # download next image if exist
        try:
            links = soup.find('td', attrs={'height':'20'}).div.findAll('a')
        except AttributeError, e:
            print 'AttributeError. No next page.'
        except IndexError, e:
            print 'IndexError. No next page.'
        else:
            # download this member's all image
            s = '下一张'.decode('utf-8')
            for link in links:
                if link.string == s: # only one link, maybe next, maybe previous
                    next = int(url[-1]) + 1
                    url = url[0:-1] + str(next)
                    #filename = '%s_%d.jpg' % (dir, next)
                    p1 = filename[:filename.rindex('_')]
                    filename = '%s_%d.jpg' % (p1, next)
                    self.__downloadImage(url, dir, filename)
    
    def __getMemberInfo(self, id):
        urlprefix='http://www.marry5.com/my/viewMemberInfo.m5?memberId='
        url = urlprefix + id
        r = urllib2.urlopen(url)
        page = r.read().decode('gbk')
        soup = BeautifulSoup(page)
        print '<<< retrieving user %s information ... >>>' % id

        """
        info_1 = soup.findAll(id="my_info2")
        username = info_1[0].span.strong.string.strip()
        sex = info_1[2].span.span.string.strip()
        year = info_1[3].span.string.strip()

        info_2 = soup.findAll(id="my_info3")
        birthdate = info_2[0].span.string.strip()[:10]
        signs = getSigns(birthdate)
        workplace = '%s%s' % (info_2[1].span.span.string.strip(),
                        info_2[1].span.span.nextSibling.nextSibling.string.strip())
        height = info_2[2].span.string.strip()
        weight = info_2[3].span.string.strip()
        
        info_3 = soup.findAll(id="my_info4")
        edu = info_3[0].span.span.string.strip()
        marriage = info_3[1].span.span.string.strip()
        income = info_3[2].span.span.string.strip()
        """
        
        part = soup.findAll('span', {'class':'blue12'})
        
        d = {}
        d['username'] = self.__getInfo(part, 
            'part[0].nextSibling.string.strip()', 'name')
        d['sex'] = self.__getInfo(part, 
            'part[6].nextSibling.nextSibling.string.strip()', 'sex')
        d['year'] = self.__getInfo(part, 
            r"part[9].nextSibling.string.replace('\r\n', '').replace(' ', '')", 'year')
        d['birthdate'] = self.__getInfo(part, 
            'part[1].nextSibling.string.strip()[:10]', 'birthdate')
        d['signs'] = self.__getSigns(d['birthdate'])
        d['workplace'] = self.__getInfo(part, 
            "'%s%s' % (part[4].nextSibling.nextSibling.string.strip(), \
                part[4].nextSibling.nextSibling.nextSibling.nextSibling.string.strip())",
             'workplace')
        d['height'] = self.__getInfo(part, 
            r"part[7].nextSibling.string.replace('\r\n', '').replace(' ', '')", 'height')
        d['weight'] = self.__getInfo(part,
            r"part[10].nextSibling.string.replace('\r\n', '').replace(' ', '')", 'weight')
        d['edu'] = self.__getInfo(part, 
            'part[2].nextSibling.nextSibling.string.strip()', 'education')
        d['marriage'] = self.__getInfo(part,
            'part[5].nextSibling.nextSibling.string.strip()', 'marriage')
        d['income'] = self.__getInfo(part,
            'part[8].nextSibling.nextSibling.string.strip()', 'income')
        d['light'] = self.__getLightInfo(id)
        return d
    
    def __getInfo(self, part, html, error):
        try:
            return eval(html)
        except AttributeError, e:
            print 'AttributeError. No "%s" information found. %s' % (error, e)
        except IndexError, e:
            print 'IndexError. No "%s" information found. %s' % (error, e)
    
    def __getSigns(self, birthdate):
        try:
            d = birthdate.split('-')
            d = int(d[1] + d[2])
        except AttributeError, e:
            print 'AttributeError. No sign information found. %s' % (e)
        except IndexError, e:
            print 'IndexError. No sign information found. %s' % (e)
        else:
            if d in [i for i in xrange(321, 421)]:
                signs = '白羊'.decode('utf-8')
            elif d in [i for i in xrange(421, 522)]:
                signs = '金牛'.decode('utf-8')
            elif d in [i for i in xrange(522, 622)]:
                signs = '双子'.decode('utf-8')
            elif d in [i for i in xrange(622, 723)]:
                signs = '巨蟹'.decode('utf-8')
            elif d in [i for i in xrange(723, 824)]:
                signs = '狮子'.decode('utf-8')
            elif d in [i for i in xrange(824, 924)]:
                signs = '处女'.decode('utf-8')
            elif d in [i for i in xrange(924, 1024)]:
                signs = '天秤'.decode('utf-8')
            elif d in [i for i in xrange(1024, 1123)]:
                signs = '天蝎'.decode('utf-8')
            elif d in [i for i in xrange(1123, 1222)]:
                signs = '射手'.decode('utf-8')
            elif d in [i for i in xrange(121, 220)]:
                signs = '水瓶'.decode('utf-8')
            elif d in [i for i in xrange(220, 321)]:
                signs = '双鱼'.decode('utf-8')
            else:
                signs = '魔羯'.decode('utf-8')
            return signs
    
    def __getLightInfo(self, id):
        """
        old url: http://truelove.marry5.com/doubleMatch.m5?memberId=
        """
        dm_urlprefix = 'http://www.marry5.com/my/doubleMatch.m5?memberId='
        dm_url = dm_urlprefix + id
        dm_r = urllib2.urlopen(dm_url)
        dm_page = dm_r.read().decode('gbk')
        dm_soup = BeautifulSoup(dm_page)
        light = 'no'
        try:
            light_url = dm_soup.find(width="45").div.img['src']
        except AttributeError, e:
            print 'AttributeError. No light information found. %s' % (e)
        except IndexError, e:
            print 'IndexError. No light information found. %s' % (e)        
        else:
            light = pymarry.LIGHT[light_url[light_url.rfind('/')+1:-4]]
        return light
                
    def __makeFileName(self, id, index):
        d = self.__getMemberInfo(id)
        if d:
            filename = '%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%d.jpg' % (
                id, d['username'], d['workplace'], d['birthdate'], d['signs'], d['height'], 
                d['weight'], d['edu'], d['income'], d['light'].decode('utf-8'), index)
        else:
            filename = '%s_%s_%d.jpg' % (id,
                                    reduce(lambda a,b : str(a)+str(b), time.localtime()),
                                    index) 
        return filename
    
    def __sendMessageTo(self, id, msg):
        """
        Send message to a user whom you wanna say hello to.        
        id - user id
        msg - message you wanna send
        
        POST /message/webSentMessage.m5 messageVO.receiverId=12345&
        messageVO.content=ssdffds&counter=123    
        POST /message/webSentMessage.m5 messageVO.receiverId=12345&
        messageVO.content=%C4%E3%BA%C3&counter=128
        """
        url = 'http://www.marry5.com/message/webSentMessage.m5'
        data = urllib.urlencode({'messageVO.receiverId': id,
                                 'messageVO.content': msg.decode('utf-8').encode('gbk'),
                                 'counter': 130-len(msg.decode('utf-8'))})
        # fake a user agent, some websites (like google) don't like automated exploration
        headers = {'User-agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
        req = urllib2.Request(url, data, headers)        
        response = urllib2.urlopen(req)
        print 'send message to %s complete' % id
        
    def sendMessageBatch(self, msg, dir='images/selected/', filename='sent.txt'):
        """
        Send message to a batch of users whom you wanna say hello to.
        You can pick images into 'images/selected' directory and we will send message
        to all users in this directory.
        
        msg - message you wanna send
        dir - directory contains picked images you will send message to.
              default value is 'images/selected'.
        filename - file contains already sent user id list to avoid duplicate sending.
                   default value is 'sent.txt'
        """
        names = os.listdir(dir)
        ids = []
        f_sent = dir + filename
        sent_ids = self.__readIdsFromFile(f_sent)
        for name in names:
            if name != filename:
                id = name[:name.find('_')]
            else:
                continue
            if id not in sent_ids:
                self.__sendMessageTo(id, msg)
                ids.append(id)
                sent_ids.append(id)
            else:
                print '%s omitted, we have already say hello to her/him before.' % id
        if len(ids) >= 1:
            self.__writeToFile('\n'.join(ids)+'\n', f_sent, 'a')
        print 'mission complete'
    
    def __filterIds(self, ids, file):
        done_ids = self.__readIdsFromFile(file)
        filtered_ids = []
        if done_ids and len(done_ids) >=1:
            for id in ids:          
                if id not in done_ids:
                    filtered_ids.append(id)
                else:
                    print '%s omitted, we have already got her/him images before.' % id
            return filtered_ids
        else:
            return ids
    
    def __readIdsFromFile(self, filename):
        ids = []
        if os.path.isfile(filename):
            f = open(filename)
            for line in f:
                if line.endswith('\n'):
                    ids.append(line[:-1]) #clip new line character
                else:
                    ids.append(line)
            return ids
      
    def __writeToFile(self, page, filename, mode='wb'):
        #page = unicode(page, 'utf-8')
        f = file(filename, mode)
        import codecs
        writer = codecs.lookup('utf-8')[3](f)
        writer.write(page)
        writer.close()
