import requests
from bs4 import BeautifulSoup
import urlManager as um1
import urlManager1 as um2
import urlManager3 as um3
import threading
import re
import os
import traceback  
print('''
===================================================
欢迎使用贴吧爬虫！
本大爬虫可以纵横百度贴吧，快速下载指定贴吧的图片。
如果你有充足的时间，你也可以选择下载所有图片。
暖心提醒：强撸灰飞烟灭！

如果要中途退出爬虫，按Ctrl+C键退出。
===================================================

''')
try:
    cwd=os.getcwd()
    baname=input('输入要爬取的贴吧名：')
    while baname=="":
        print('你不告诉我要爬去哪里，怎么给你图片啊！')
        baname=input('输入要爬取的贴吧名：')
    saveDir=input('要保存到的文件夹名（如果空默认保存为贴吧名）：')
    if saveDir=="":
     saveDir=baname
    print('已保存到{}/{}/'.format(cwd,saveDir))
    gate='http://tieba.baidu.com/f?ie=utf-8&kw={}'.format(baname)
    op=requests.get(gate)
    data=op.text
    soup=BeautifulSoup(data,'html.parser')
    #取贴吧所有帖子页数
    pageCount=soup.find_all('span',attrs={'class':'red_text'})
    pageCount=(int(pageCount[0].text)//50)+1
    pageCount=input('该贴吧一共有{}页帖子，输入要爬的页数:'.format(pageCount))
    while pageCount=="":
        print('什么都不输入，好玩儿吗？')
        pageCount=input('该贴吧一共有{}页帖子，输入要爬的页数:'.format(pageCount))

    #爬取每一页的所帖子链接
    for page in range(1,int(pageCount)+1):
        gate='http://tieba.baidu.com/f?kw={}&ie=utf-8&pn={}'.format(baname,page*50-50)
        print('正在分析每一页的链接，已分析{}页'.format(page))
        op=requests.get(gate)
        data=op.text
        soup=BeautifulSoup(data,'html.parser')
        for i in soup.find_all('a',href=re.compile(r"^/p/\d*")):
            um1.add('http://tieba.baidu.com'+i['href'])
    imageCount=1

    def downImage():
            imageUrl=um2.get()
            while imageUrl:
                global imageCount
                print('正在下载第{}张图片'.format(imageCount))
                # print(imageUrl)
                op=requests.get(imageUrl)
                if os.path.exists('{}/{}'.format(cwd,saveDir)):
                 pass
                else:
                 os.mkdir('{}/{}/'.format(cwd,saveDir))
                file=open('{}/{}/{}{}'.format(cwd,saveDir,imageCount,imageUrl[-4:]),'wb')
                file.write(op.content)
                file.close()
                imageCount=imageCount+1
                imageUrl=um2.get()
    #爬每个帖子的某页的图片
    def getImage(url):
        op=requests.get(url)
        data=op.text
        soup=BeautifulSoup(data,'html.parser')
        #获得帖子内页数
        tiePage=soup.find_all('span',attrs={'class':'red'})
        tiePage=tiePage[1].text
        print('该贴共有{}页'.format(tiePage))
        for tiePageCount in range(1,int(tiePage)+1):
            # print('{}?pn={}'.format(url,tiePageCount))
            um3.add('{}?pn='.format(url,tiePageCount))

        #爬取贴内的每一页
        tieUrl=um3.get()
        while tieUrl:
            op=requests.get(tieUrl)
            data=op.text
            soup=BeautifulSoup(data,'html.parser')
            #分析每一页的图片地址
            for i in soup.find_all('img', attrs={'class':'BDE_Image'}):
                um2.add(i['src'])
            #下载图片
            thread2=threading.Thread(target=downImage)
            thread2.start()
            downImage()
            tieUrl=um3.get()


    #将链接队列中所有链接依次取出并处理，调用getTie
    url=um1.get()
    n=1
    while url:
        print('正在爬取第{}个帖子'.format(n))
        # print(url)
        n=n+1
        # getImage()
        thread=threading.Thread(target=getImage(url))
        thread.start()
        url=um1.get()
    print('已完成所有页面的爬取！')
    print('正在发送邮件...')
    try:
        import smtplib    
        from email.mime.text import MIMEText    
        from email.header import Header    
        from email.mime.multipart import MIMEMultipart

        sender = 'msg_sender@sina.com'    
        receiver = '909221500@qq.com'    
        subject = '{}吧的爬虫已经爬取结束'.format(baname)  
        smtpserver = 'smtp.sina.com'    
        username = 'msg_sender@sina.com'    
        password = '909221500'    

        msg = MIMEMultipart()
        msgText = MIMEText('{}吧的图片已经爬好了，主人来舔吧~~'.format(baname) ) #中文需参数‘utf-8’，单字节字符不需要
        msg['From'] = sender
        msg['To']=receiver
        msg['Subject'] = Header(subject, 'utf-8')
        msg.attach(msgText)

        smtp = smtplib.SMTP()    
        smtp.connect('smtp.sina.com')    
        smtp.login(username, password)    
        smtp.sendmail(sender, receiver, msg.as_string())    
        smtp.quit()
        print('邮件发送成功！')
    except :
        print('邮件发送失败，退出进程')
except Exception as err:
    try:
        import smtplib    
        from email.mime.text import MIMEText    
        from email.header import Header    
        from email.mime.multipart import MIMEMultipart

        sender = 'msg_sender@sina.com'    
        receiver = '909221500@qq.com'    
        subject = '{}吧的爬虫意外退出'.format(baname)  
        smtpserver = 'smtp.sina.com'    
        username = 'msg_sender@sina.com'    
        password = '909221500'    

        msg = MIMEMultipart()
        msgText = MIMEText('{}吧的爬虫意外退出，异常：{}'.format(baname,err) ) #中文需参数‘utf-8’，单字节字符不需要
        print('{}吧的爬虫意外退出，异常：{}'.format(baname,err) )
        msg['From'] = sender
        msg['To']=receiver
        msg['Subject'] = Header(subject, 'utf-8')
        msg.attach(msgText)

        smtp = smtplib.SMTP()    
        smtp.connect('smtp.sina.com')    
        smtp.login(username, password)    
        smtp.sendmail(sender, receiver, msg.as_string())    
        smtp.quit()
        print('邮件发送成功！')
    except :
        print('邮件发送失败，退出进程')
    pass
