#coding:utf-8

from bs4 import BeautifulSoup
from scrapy.selector import Selector				
import scrapy
import requests,os
import json,re,logging,time
from PIL import Image 
import hashlib
from io import BytesIO
class news(object):
    def __init__(self):
        self.url="http://news.sina.com.cn/"
        self.path=r'C://Users//123ad//Desktop//pics//'
        logging.basicConfig(#level=logging.DEBUG,  
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',  
                    datefmt='%a, %d %b %Y %H:%M:%S',  
                    filename=self.path+'logging.log',  
                    filemode='a') 
        
    def getList(self):    
        r = requests.get(self.url)
        response=r.content
        soup=BeautifulSoup(response,'lxml')
        src=self.getUrl_list(soup)
        print 'nini'
        for m in src:#m为链接        
            try:
                
                the_type = m.split('.')[0][7:]#get types   
                if the_type=='video':
                    logging.warning(u'类型为视频:'+m)
                    continue
                self.getDetail(m)
            except Exception as e:
                logging.warning(str(e))
                continue
                
                if data is False:
                    continue
                
                idata = {'classes': the_type,
                         'craeted_at': str(int(time.time()))}
                idata.update(data)
                insertdata.append(idata)
                # print(insertdata)
                # return ''
                dbobj = dbobj.connection('localhost', 'root', '', 'news')
                dbobj.table('news').insert(insertdata)
                
            
            
                
    def getDetail(self,m):
            content=requests.get(m).content#新闻页面
            
            time=self.getTime(content)
            title=self.getTitle(content)
            
            soup=BeautifulSoup(content,'lxml')
            
            
            soup=soup.select('#artibody')#正文#[a-zA-z]+://[^\s]*.jpg
            
            if soup==None and u'自动播放' in soup:###没有晒除含视频的东西
                print 'nihaoniahoniaho'
                self.__log(m)
                logging.warning(u'内容不含artibody或者含有视频：'+m)
                pass
           
            soup = Selector(text=str(soup[0]), type="html")
            
            content=soup.xpath('//div').extract()[0].encode('utf-8')
            
                
            
            
            body=BeautifulSoup(content,'lxml')
            img_urls=[]
        
            for x in range(len(body.select('.img_wrapper'))):
    
                img_url=body.select('.img_wrapper')[x].img['src']
                img_urls.append(img_url)
            new_img_urls=[]
            for url_img in img_urls:
                
                data=self.downimg(url_img)    
                new_img_urls.append(data['local'])
                    
                    
            for i in range(len(img_urls)):
                body=str(body).replace(img_urls[i],new_img_urls[i])
            print title,time
            body=self.getrealbody(body)
            
            
           
            
            return{
                    'time': time,
                    'src': m,
                    'content': body,
                    'title': title
                    }
            
    
    def getrealbody(self,body):
         soup = Selector(text=str(body), type="html")
         content=soup.xpath('//div//p').extract()#.encode('utf-8')
         body=''
         for i in content:
             body=body+i.encode('utf-8')
         return body
         
        
    def getUrl_list(self,soup):
        
        soup=soup.select('.list_14')#blk_09
        soup = Selector(text=str(soup), type="html")
        return soup.xpath('//li//@href').extract()
        

    def downimg(self,url):
        if not os.path.exists(self.path):
            os.mkdir(self.path)
        res = requests.get(url, stream=True)
        image = res.content
        image_extension = Image.open(BytesIO(res.content)).format.lower()
        try:
            md = hashlib.md5()
            md.update(url)
            imgName = str(time.strftime('%a%b%d%H%M%S%Y', time.localtime())
                          ) + '' + md.hexdigest() + '.' + image_extension
            localPath = self.path + imgName
            with open(localPath, "wb") as jpg:
                jpg.write(image)
            jpg.close()
            return {'local': localPath, 'original': url}
        except Exception as e:
            logging.warning(e)
            return False

    def getTime(self,content):
            content=BeautifulSoup(content,'lxml').select('#navtimeSource')[0]
            time = re.search('<span class="time-source" id="navtimeSource">([\w\W]*?)<span>',str(content))
            if time is None:
                return False
            time = time.group(1).strip()
            
            return time          
            
        
        

    def getTitle(self, content):
        title = BeautifulSoup(content,'lxml').select('#artibodyTitle')[0]
        if title is None:
            return False
        
        return title

        
        


if __name__=='__main__':
    xinlang=news()
    xinlang.getList()
    
   

        
        

