# -*- coding: utf-8 -*-

'''

@author: wyndem
@Emil:   wyndem.wen@timevary.com
@FileName: mail.py
@Time: 2018/7/19 17:41
@Description: 163

'''
import aiohttp
import pickle
import datetime
import execjs
import requests

import config
from tool import TOOL as t, retry
from bs4 import BeautifulSoup



def get_des_psswd(pwd):
    jsstr = get_js()
    ctx = execjs.compile(jsstr) #加载JS文件
    return (ctx.call('rsaPwd', "10001","967094d470dc7940f32ab7e8b701cf2ca732c7cd7aed176ecd8cac605da26e5fc8664382bb2c0b73b8a0109016130ff0c0f73985fa990a18466008b14284969d58f2c2c2698f91b6dcc6bebd90663d288dca79586b8294a56619dd87ab21274088c318e26b6e3210b165118b87ebc8798df572b2b84cfa9fe54d04e3b8dc1b25",pwd))  #调用js方法  第一个参数是JS的方法名，后面的data和key是js方法的参数

def get_js():
    f = open(t.getPropath()+"\pwd.txt", 'r', encoding='utf-8') # 打开JS文件
    line = f.readline()
    htmlstr = ''
    while line:
        htmlstr = htmlstr+line
        line = f.readline()
    f.close()
    return htmlstr



class Mail163():

    def __init__(self,person, title, today=True):
        self.url='http://m77.mm.mail.163.com'
        self.mail_163_url={}
        self.cookies=None
        self.person=person.strip()
        self.title=title
        self.today=today
        self.proxies={}
        self.headers=t.getHeaders()

    '登录'
    @retry
    async def login(self,user,pwd):
        try:
            self.proxies={'http':t.getProXY()}
            print(self.proxies)
        except:
            self.proxies={}
        data = {
            'method': 'login',
            'back_url': '',
            'timestamp': '0',
            'm': '1',
            'register': '0',
            'wml': 'false',
            'srandid': '',
            'ucid': '',
            'publicExponen': '10001',
            'modulus': '967094d470dc7940f32ab7e8b701cf2ca732c7cd7aed176ecd8cac605da26e5fc8664382bb2c0b73b8a0109016130ff0c0f73985fa990a18466008b14284969d58f2c2c2698f91b6dcc6bebd90663d288dca79586b8294a56619dd87ab21274088c318e26b6e3210b165118b87ebc8798df572b2b84cfa9fe54d04e3b8dc1b25',
            'username': user,
            'domain': '163.com',
            'password': get_des_psswd(pwd)
        }
        s = requests.session()
        try:
            r=s.post('http://m77.mm.mail.163.com/login.s', data=data,proxies=self.proxies,headers=self.headers,timeout=10)
            bsObj = BeautifulSoup(r.text, "html.parser")
            for name in  bsObj.findAll("a"):
                if name.get_text().find('收件箱') > -1:
                    self.mail_163_url['inbox'] = self.url + name.get('href')
                elif name.get_text().find('垃圾邮件') >-1:
                    print('1')
                    self.mail_163_url['spam'] = self.url + name.get('href')
                elif name.get_text().find('订阅邮件') >-1:
                    self.mail_163_url['subscribe'] = self.url + name.get('href')
            if s.cookies['NTES_SESS'] != None:
                self.cookies=s.cookies
                config.cookies_163[user]=[s.cookies.get_dict(),self.mail_163_url]
                print(user+'登录成功'+s.cookies['NTES_SESS'])
                return self.cookies
        except:
            print('错哒')
        return  None

    async def async_login(self, user, pwd):
        try:
            self.proxies = {'http': t.getProXY()}
        except:
            self.proxies = {}
        data = {
            'method': 'login',
            'back_url': '',
            'timestamp': '0',
            'm': '1',
            'register': '0',
            'wml': 'false',
            'srandid': '',
            'ucid': '',
            'publicExponen': '10001',
            'modulus': '967094d470dc7940f32ab7e8b701cf2ca732c7cd7aed176ecd8cac605da26e5fc8664382bb2c0b73b8a0109016130ff0c0f73985fa990a18466008b14284969d58f2c2c2698f91b6dcc6bebd90663d288dca79586b8294a56619dd87ab21274088c318e26b6e3210b165118b87ebc8798df572b2b84cfa9fe54d04e3b8dc1b25',
            'username': user,
            'domain': '163.com',
            'password': get_des_psswd(pwd)
        }
        print('http://'+self.proxies.get('http'))
        async with aiohttp.ClientSession() as s:
            async with s.post('http://m77.mm.mail.163.com/login.s', data=data, proxy='http://'+self.proxies.get('http'), headers=t.getHeaders()) as r:
                print(await r.text)
                bsObj = BeautifulSoup(r.text, "html.parser")
                for name in bsObj.findAll("a"):
                    if name.get_text() == '收件箱':
                        self.mail_163_url['inbox'] = self.url + name.get('href')
                    elif name.get_text().find('垃圾邮件') > -1:
                        self.mail_163_url['spam'] = self.url + name.get('href')
                    elif name.get_text().find('订阅邮件') > -1:
                        self.mail_163_url['subscribe'] = self.url + name.get('href')
                if s.cookies['NTES_SESS'] != None:
                    self.cookies = s.cookies
                    config.cookies_163[user] = s.cookies
                    print(user + '登录成功')
                    return self.cookies
                return None



    '收件箱-返回参数--1：邮件的时间 2:邮件'
    def inbox_mail(self):
        r = requests.get(self.mail_163_url['inbox'], cookies=self.cookies,proxies=self.proxies,headers=t.getHeaders())
        bsObj = BeautifulSoup(r.text, "html.parser")
        for item in bsObj.findAll('div',{'class':'mailitem'}):
            mail_person=item.contents[0].find('strong').get_text().replace(' [新]', '').split(' ')[1].strip()
            mail_data = item.contents[0].find('strong').get_text().replace(' [新]', '').split(' ')[2]
            mail_url=self.url+item.contents[1].find('a').get('href')
            mail_title=item.contents[1].find('a').get_text()
            if mail_person == self.person and mail_title.find(self.title)>-1 :
                messgin = requests.get(mail_url, cookies=self.cookies)
                if datetime.datetime.now().strftime('%Y/%m/%d') == mail_data and self.today:
                    return messgin.text.split('时间：')[1].split('</div>')[0],BeautifulSoup(messgin.text,"lxml").get_text().replace('\n','')
                elif not self.today:
                    return messgin.text.split('时间：')[1].split('</div>')[0],BeautifulSoup(messgin.text,"lxml").get_text().replace('\n','')
        return None

    '垃圾箱-返回参数--1：邮件的时间 2:邮件'
    def spam_mail(self):
        r = requests.get(self.mail_163_url['spam'], cookies=self.cookies,proxies=self.proxies,headers=t.getHeaders())
        bsObj = BeautifulSoup(r.text, "html.parser")
        for item in bsObj.findAll('div', {'class': 'mailitem'}):
            mail_person = item.contents[0].find('strong').get_text().replace(' [新]', '').split(' ')[1].strip()
            mail_data = item.contents[0].find('strong').get_text().replace(' [新]', '').split(' ')[2]
            mail_url = self.url + item.contents[1].find('a').get('href')
            mail_title = item.contents[1].find('a').get_text()
            if mail_person == self.person and mail_title.find(self.title)>-1 :
                messgin = requests.get(mail_url, cookies=self.cookies)
                if datetime.datetime.now().strftime('%Y/%m/%d') == mail_data and self.today:
                    return messgin.text.split('时间：')[1].split('</div>')[0],BeautifulSoup(messgin.text,"lxml").get_text().replace('\n','')
                elif not self.today:
                    return messgin.text.split('时间：')[1].split('</div>')[0],BeautifulSoup(messgin.text,"lxml").get_text().replace('\n','')
        return None


    '订阅邮箱'
    def subscribe(self):
        r = requests.get(self.mail_163_url['subscribe'], cookies=self.cookies, proxies=self.proxies, headers=t.getHeaders())
        bsObj = BeautifulSoup(r.text, "html.parser")
        for item in bsObj.findAll('div', {'class': 'mailitem'}):
            mail_person = item.contents[0].find('strong').get_text().replace(' [新]', '').split(' ')[1].strip()
            mail_data = item.contents[0].find('strong').get_text().replace(' [新]', '').split(' ')[2]
            mail_url = self.url + item.contents[1].find('a').get('href')
            mail_title = item.contents[1].find('a').get_text()
            if mail_person == self.person and mail_title.find(self.title) > -1:
                messgin = requests.get(mail_url, cookies=self.cookies)
                if datetime.datetime.now().strftime('%Y/%m/%d') == mail_data and self.today:
                    return messgin.text.split('时间：')[1].split('</div>')[0], BeautifulSoup(messgin.text,
                                                                                          "lxml").get_text().replace('\n','')
                elif not self.today:
                    return messgin.text.split('时间：')[1].split('</div>')[0], BeautifulSoup(messgin.text,
                                                                                          "lxml").get_text().replace('\n','')
        return None

    '2个一起找，并找出当前时间最早的'
    def  integrate_mail(self,s_time=None):
        list_mail=[self.inbox_mail(), self.spam_mail(),self.subscribe()]
        list_mail=[m for m in list_mail if m != None]
        if len(list_mail) == 0:
            return  None

        max_mail=list_mail[0]
        for mail in list_mail:
            if mail[0] > max_mail[0]:
                max_mail=mail

        if s_time!=None and s_time<=max_mail[0]:
            return max_mail
        elif s_time==None:
            return max_mail

    '异步获取'
    async def async_integrate_mail(self,user,s_time=None):
        self.cookies=config.cookies_163[user]
        self.integrate_mail(s_time)


if __name__ == '__main__':
    '2018/07/20 14:40:30'
    m=Mail163('coinsuper','Welcome to ',True)
    m.login('u79462371qia@163.com', 'ys2338')
    print(m.integrate_mail('2018/07/21 15:14:02'))


