#coding:utf-8

from bs4 import BeautifulSoup
from scrapy.selector import Selector				
import scrapy
import requests,os
import json,re,logging



class news(object):
    def __init__(self):
        self.path=r'C://Users//123ad//Desktop//pics//'#图片路径
        logging.basicConfig(#level=logging.DEBUG,  
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',  
                    datefmt='%a, %d %b %Y %H:%M:%S',  
                    filename=self.path+'logging.log',  
                    filemode='w') 
        
    def xinlang(self,url):
        try:
            r = requests.get(url)
            response=r.content
        except:
            logging.warning(r.status_code)
            logging.warning(r.raise_for_status())

        soup=BeautifulSoup(response,'lxml')
        soup=soup.select('.list_14')#blk_09

        path=r'C://Users//123ad//Desktop//pics//'#图片路径
        soup = Selector(text=str(soup), type="html")
        Title=[]
        for i in soup.xpath('//li//text()'):
            Title.append(i)
        #print len(soup.xpath('//li//@href').extract())

        n=-1
        for m in soup.xpath('//li//@href').extract():#m为链接
            logging.warning(u'开始抓取页面：'+m)
        
        
            n+=1
        
        
            try:
                title = Title[n].extract()#标题
        
                the_type = m.split('.')[0][7:]#get types   
                if the_type=='video':
                    logging.warning(u'类型为视频:'+m)
                    continue
                content=requests.get(m).content#新闻页面
                soup=BeautifulSoup(content,'lxml')
                soup=soup.select('#artibody')#正文#[a-zA-z]+://[^\s]*.jpg
                soup = Selector(text=str(soup[0]), type="html")

                content=soup.xpath('//div').extract()[0].encode('utf-8')
                body=BeautifulSoup(content,'lxml')
                img_urls=[]
        
                for x in range(len(body.select('.img_wrapper'))):
    
                        img_url=body.select('.img_wrapper')[x].img['src']
                        img_urls.append(img_url)
                new_img_urls=[]
                for i in img_urls:
                    local_filename = i.split('/')[-1]
                    img_time=i.split('/')[-2]
                    if  not os.path.exists(str(self.path)+"xinlang//"+"%s//"%(img_time)):
                        os.makedirs(str(self.path)+"xinlang//"+"%s//"%(img_time)) #mkdir
    
    
                    new_img_urls.append("file:///C|/Users/123ad/Desktop/pics/xinlang/"+"%s/"%(img_time)+str(local_filename))
                    html = requests.get(i).content 
                    imagePath=str(self.path)+"xinlang//"+"%s//"%(img_time)+'%s'%(local_filename)
                    print imagePath
                    f=open(imagePath,'wb')#以二进制形式
                    f.write(html)
                    f.close()
                for i in range(len(img_urls)):
                    body=str(body).replace(img_urls[i],new_img_urls[i])
                #print len(img_urls)
                print str(body)#正文
                logging.warning('success')
                logging.warning("**********************************"+u'当前页面抓取结束')
            except:
                logging.warning("**********************************"+u'当前页面抓取结束')
                continue
    def toutiao(self,url1):
        urls=['http://www.toutiao.com/api/pc/feed/?category=news_society&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A16548D41F6D663&cp=584F6D0636F3EE1',         
            'http://www.toutiao.com/api/pc/feed/?category=news_entertainment&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A15588F4BF5D949&cp=584F9DD93489CE1',
            'http://www.toutiao.com/api/pc/feed/?category=news_tech&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A135F8A41F2D996&cp=584F8D49F986DE1',
            'http://www.toutiao.com/api/pc/feed/?category=news_sports&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A1E578F49F0D9AC&cp=584F9DD90A5CDE1',          
            'http://www.toutiao.com/api/pc/feed/?category=news_finance&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A1B5E8D4CFFD9FF&cp=584FFDF94F2F7E1',          
            'http://www.toutiao.com/api/pc/feed/?category=news_fashion&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A10558347F1DA1F&cp=584F0D5AA1EF5E1',
            'http://www.toutiao.com/api/pc/feed/?category=news_hot&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A165E824DF1DA55&cp=584F7D2AD555DE1',
            'http://www.toutiao.com/api/pc/feed/?category=news_military&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A1B5F8E4FF4DA73&cp=584FDD6A87C3DE1',
            'http://www.toutiao.com/api/pc/feed/?category=news_discovery&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A155D874FFFDA88&cp=584F8DCA08887E1',
            'http://www.toutiao.com/api/pc/feed/?category=news_world&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A11528A42FFDAA7&cp=584F6DDAEA876E1',
            'http://www.toutiao.com/api/pc/feed/?category=news_history&utm_source=toutiao&widen=1&max_behot_time=0&max_behot_time_tmp=0&as=A1F5A8D41FADAD3&cp=584FCDDAFD33CE1',
        
              ]
        
        for url in urls:
            try:
                r = requests.get(url)
                response=r.content
            except:
                 logging.info(r.status_code)
                 logging.info(r.raise_for_status())
                
            response=json.loads(response)
            for k in response["data"]:                
                source_url = k["source_url"]
                title=k["title"]#标题                            
                url2=url1+'a'+source_url[7:len(url)-1]#原文链接.spilt('/')[-2]                
                response=requests.get(url2).content
                logging.warning(u'开始抓取页面：'+url2)
                soup=BeautifulSoup(response,'lxml')                
                body=soup.find('div',class_="article-content")
                
                time=soup.find('span',class_="time")#时间               
                if body==None:#晒除含组图或者其他不能正常抓取的
                    logging.warning(u'页面找不到符合要求的东西:'+url2)
                    logging.warning("**********************************"+u'当前页面抓取结束')

                    continue   
                category=k["chinese_tag"]#类别                
                soup=BeautifulSoup(str(body),'lxml')               
                urls=re.compile('src="[a-zA-z]+://[^\s]*"')
                urls=urls.findall(str(soup))
                
                if urls==None:
                    logging.warning(u'页面没有图片链接') 
                    pass
                else:                    
                    img_urls=[]        
                    for x in urls:
                        if len(x)<=54:    
                            img_url=x[5:len(x)-1]
                            img_urls.append(img_url)
                        pass
                    new_img_urls=[]
                    
                    
                    for i in img_urls:
                        
                        
                        local_filename = i.split('/')[-1]+'.jpg'
                        if  not os.path.exists(str(self.path)+"toutiao//"+'large//'):
                            os.makedirs(str(self.path)+"toutiao//"+"large//")    
                        new_img_urls.append("file:///C|/Users/123ad/Desktop/pics/toutiao/"+"large/"+str(local_filename))
                        
                        html = requests.get(i).content 
                        imagePath=str(self.path)+"toutiao//"+"large//"+'%s'%(local_filename)                       
                        f=open(imagePath,'wb')#以二进制形式
                        f.write(html)
                        f.close()
                    for i in range(len(img_urls)):                        
                        body=str(body).replace(img_urls[i],new_img_urls[i])
                    
                    print str(body)#正文
                    logging.warning('success')
                    logging.warning("**********************************"+u'当前页面抓取结束')
if __name__=='__main__':
    new=news()
    new.xinlang('http://news.sina.com.cn/')
    new.toutiao('http://www.toutiao.com/')   

        
        

