import os
import urllib.request
import bs4
from bs4 import BeautifulSoup
import pickle
import smtplib  
import email.mime.multipart  
import email.mime.text
from apscheduler.schedulers.blocking import BlockingScheduler
import configparser



BASE_URL = "http://www.12306.cn/mormhweb/";
MAIN_URL = "zxdt/index_zxdt.html";

SMTP_NAME = "smtp.126.com";
SMTP_FROM_NAME = "poseidon1111@126.com";
SMTP_FROM_PASSWORD = "thankyou";
SMTP_TO_NAME = "poseidon0000@126.com";


query_index = 0;


def sendMail(text):
    msg=email.mime.multipart.MIMEMultipart();
    msg['from']=SMTP_FROM_NAME;
    msg['to']=SMTP_TO_NAME;
    msg['subject']='铁路局公告';
    content=text;
    txt=email.mime.text.MIMEText(content);
    msg.attach(txt);
    smtp=smtplib.SMTP();
    smtp.connect(SMTP_NAME,'25');
    smtp.login(SMTP_FROM_NAME,SMTP_FROM_PASSWORD);
    smtp.sendmail(SMTP_FROM_NAME,SMTP_TO_NAME,str(msg));
    smtp.quit();


def getHtml():
    global query_index;
    mail_text = "";
    #初始化标识文件
    flag_list = [];
    if os.path.isfile('data.pkl'):
        data_file = open('data.pkl','rb');
        flag_list = pickle.load(data_file);
        data_file.close();

    query_index += 1;
    print("第", query_index, "次查询！");
    res = urllib.request.urlopen(BASE_URL + MAIN_URL).read();
    soup = BeautifulSoup(res, "html.parser");
    listData = soup.find('div', id='newList').ul.children;
    for li in listData:
        if type(li) == bs4.element.Tag:
            isis = False;
            #标题
            title = li.a.string;
            hrefStr = li.a['href'];
            #该公告标记
            str_flag = hrefStr[-19:-5]
            if flag_list.count(str_flag) > 0:
                continue;
            else:
                flag_list.append(str_flag);
            hrefStr = BASE_URL + hrefStr[3:];
            if "济南铁路局" in title:
                isis = True;
            elif "上海铁路局" in title:
                isis = True;
            if isis == True:
                #print(title);
                #print(str_flag);
		#公告详细内容
                res1 = urllib.request.urlopen(hrefStr).read();
                soup1 = BeautifulSoup(res1, "html.parser");
                content_strs = soup1.find('div', class_='content_text').stripped_strings;
                isisis = False;
                for strr in content_strs:
                    if "成都" in strr:
                        if isisis == False:
                            mail_text += "\n\n";
                            mail_text += title;
                            mail_text += "\n";
                            isisis = True;
                        mail_text += strr;
                        mail_text += "\n";

    file = open('data.pkl', 'wb');
    pickle.dump(flag_list, file);
    file.close;
    if mail_text.strip():
        sendMail(mail_text);
    print("查询完毕！\n");
    


print("******************************************************");
print("*                                                    *");
print("*                                                    *");
print("*                 铁路局公告查询系统                 *");
print("*                                                    *");
print("*                     By:markmoon                    *");
print("*                                                    *");
print("******************************************************");

#读取配置参数
cf = configparser.ConfigParser();
if os.path.isfile('config.ini'):
    cf.read('config.ini');

    BASE_URL = cf.get("base", "base_url");
    MAIN_URL = cf.get("base", "main_url");
    SMTP_NAME = cf.get("base", "smtp_name");
    SMTP_FROM_NAME = cf.get("base", "from_name");
    SMTP_FROM_PASSWORD = cf.get("base", "from_pass");
    SMTP_TO_NAME = cf.get("base", "to_name");

    sched = BlockingScheduler();
    sched.add_job(getHtml, 'interval', minutes=30);
    #sched.add_job(getHtml, 'interval', seconds=10);
    sched.start();


