# -*- coding:utf-8 -*-
# coding=utf-8
# encoding: utf-8
from model import  avinfo
import pymysql
import sys
import os
import importlib
import requests
importlib.reload(sys)
import  uuid
import imghdr
print(sys.getdefaultencoding())
from bs4 import BeautifulSoup
CurrentPath = os.getcwd()
print(os.path)
#print (sys.argv[0])
dirs=os.path.split( os.path.realpath( sys.argv[0] ))
#print (dirs[0]+os.sep+"html"+os.sep+"img")

startpage=1
cookies = dict(playno1='playno1Cookie', playno1_referer='/')
page=requests.get("http://www.playno1.com/portal.php?mod=list&catid=78&page=1",cookies=cookies)
html=page.content
nodes= BeautifulSoup(html,"html5lib",from_encoding='utf-8')
endpageclass=nodes.find("a","last")
endpage=int(str(endpageclass['href']).replace('/portal.php?mod=list&catid=78&page=',''))
print (endpage)
havereadydown=0
for j in range(startpage,endpage):
    
    try:
        page=requests.get("http://www.playno1.com/portal.php?mod=list&catid=78&page="+str(j),cookies=cookies)
    except:
        continue
    #print (page.content)
    html=page.content
    baseurl="http://www.playno1.com/"
    nodes= BeautifulSoup(html,"html5lib",from_encoding='utf-8')
    avnewsall=nodes.find_all("div","fire_float");
    htmlhead='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html xmlns="http://www.w3.org/1999/xhtml"><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8" /></head>'
    for avnews in avnewsall:
        detailurl=avnews.find("a")
        publishtime=avnews.find("span","fire_left").string
        publishyear = str(publishtime)[0:7]
        htmldir = dirs[0] + os.sep + "html"+os.sep+publishyear
        imgdir = dirs[0] + os.sep + "html" + os.sep+publishyear+os.sep + "img"
        if os.path.exists(htmldir) == False:
            os.makedirs(htmldir)
        if os.path.exists(imgdir) == False:
            os.makedirs(imgdir)
        print(publishyear)
        avinfo.contenturl=baseurl+detailurl['href'];
        try:
            detialpage=requests.get(avinfo.contenturl,cookies=cookies);
            detailhtml=detialpage.content
            #print(detailhtml)
            detailnodes=BeautifulSoup(detailhtml,"html5lib",from_encoding='utf-8')
            avnewsinfo=detailnodes.find("div","d")
            imgs = avnewsinfo.find_all("img")
            avnewsinfohtml = str(avnewsinfo.contents).lower()
        except:
            continue
        title = detailurl['title']
        print("更新新闻 标题:"+title)
        htmlfilename=htmldir+os.sep+str(publishtime)[0:10]+"_"+str(title)+".html"
        if os.path.exists(htmlfilename)==False:
            print("预计有"+str(len(imgs))+"个图片要下载")
            i=1;
            for img in imgs:
                try:
                    imgurl = str(img.get('src'))
                    if (imgurl.startswith("http")):
                        print("开始下载第"+str(i)+"个图片 :"+imgurl)
                        content = requests.get(imgurl)
                        if content.status_code==200:
                            print("下载图片成功!" )
                            imgtype = imghdr.what('', h=content.content)
                            if not imgtype:
                                imgtype = 'txt'
                            filename = '{}.{}'.format(uuid.uuid1(), imgtype)
                            allfilename = str(imgdir + os.sep + filename)
                            with open(allfilename, 'wb') as f:
                                f.write(content.content)
                            avnewsinfohtml=avnewsinfohtml.replace(str(imgurl).lower(),str("img/"+filename))
                        i=i+1
                except:
                    continue
            print("保存成THML文件！")
            #avnewsinfohtml=avnewsinfohtml.replace(u'\xa0', u' ')
            try:
                with open(htmlfilename,'w',encoding='utf-8') as f:
                    f.write(htmlhead+avnewsinfohtml)
                print("保存成功!")
            except:
                print("保存失败!")
                continue
        else:
            if havereadydown>=10 :
                print('累计下载过的够10次了')
               # os._exit(0)        
            havereadydown=havereadydown+1
            print(title+"已经下载过了！")



def downimg(imgurl):
    filename=""
    if (imgurl.startswith("http")):
        content = requests.get(imgurl)
        imgtype = imghdr.what('', h=content.content)
        if not imgtype:
            imgtype = 'txt'
        filename='{}.{}'.format(uuid.uuid1(), imgtype)
        allfilename=str(imgdir + os.sep +filename )
        with open(filename, 'wb') as f:
            f.write(content.content)
    return "img/"+filename
