# coding: utf-8 
# @Time : 2021/5/11 10:07
# @Author : shenshaoxiong
# @FileName: NewsSpider.py
# @Email   : 765105236@qq.com

import re
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from JNSNewsSpider.items import NewsItem
from scrapy.selector import Selector
from scrapy.http import Request
from urllib.request import urlopen
from lxml import etree
from ..until.Funcs import test1
from ..until.Funcs import get_mysql_data
from ..until.GetURL2db import get_url_2_db
from ..until.GetHTML2db import get_html_2_db
from ..until.GetFieldData2db import get_field_data_2_db
from ..until.Constant import HOST, USER, PASSWORD, DB, PORT, log_table, log_table_field_list
from ..until.Funcs import insert_mysql
from ..until.Funcs import get_md5
from ..until.Funcs import get_now_time
from ..until.Funcs import update_data
from ..until.Funcs import get_html_title_content
from ..until.Funcs import get_text_date
import scrapy
import sys
import json
import random
import time

def getStart():
    select_table = "news_base"
    select_field_list = ["source","url"]
    task_data_list = get_mysql_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=select_table,
                                    field_list=select_field_list)
    return task_data_list

def getUrlList(size):
    filter = "state=0"
    select_table = "news_url_list"
    select_field_list = ["id","source","url", "state","insert_time"]
    order = " insert_time desc "
    select_data_list = get_mysql_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=select_table,
                                    field_list=select_field_list, size=size, filter=filter,order=order)

    return select_data_list
 

def getUrl(task_data_list):
    list = []
    for task_data in task_data_list:
        list.append(task_data["url"])
    #print ("获取网站列表 : ", list)
    return list

def getSource(task_data_list,url):
    #print(url);
    for task_data in task_data_list:
       if url == task_data["url"]:
         return task_data["source"]

def save_news_dw(item):
    # 结果表
    insert_table = "news_dw"
    insert_vaules_list = []
    insert_field_list = ["id", "title", "content","abstract","author","keywords","organization", "publish_time", "source", "url", "insert_time"]
    # 日志
    log_list = []
    # 更新数据
    update_values_list = []

    id = item["id"]
    source = item["source"]
    url = item["url"]
    # html = task_data["html"]
    # html_title_content_dict = get_html_title_content(html)
    # title = html_title_content_dict["title"]
    # content = html_title_content_dict["content"]
    # publish_time = get_now_time()
    title = item["title"]
    content = item["content"]
    publish_time = item["publish_time"]
    abstract = item["abstract"]
    author = item["author"]
    keywords = item["keywords"]
    organization = item["organization"]

    insert_time = get_now_time()
    insert_vaules_list.append((id, title, content,abstract,author,keywords,organization, publish_time, source, url, insert_time))
    update_values_list.append((1, id))
    
    insert_mysql(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=insert_table,
                 field_list=insert_field_list, values_list=insert_vaules_list)
    update_sql = "UPDATE news_detail_html SET state = %s WHERE id = %s"
    update_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, sql=update_sql,
                values_list=update_values_list)
    pass

def get_news_dw(task_data):
    # 结果表
    insert_table = "news_dw"
    insert_vaules_list = []
    insert_field_list = ["id", "title", "content","abstract","author","keywords","organization", "publish_time", "source", "url", "insert_time"]
    # 日志
    log_list = []
    # 更新数据
    update_values_list = []

    id = task_data["id"]
    source = task_data["source"]
    url = task_data["url"]
    html = task_data["html"]
    # html_title_content_dict = get_html_title_content(html)
    # title = html_title_content_dict["title"]
    # content = html_title_content_dict["content"]
    # publish_time = get_now_time()
    title = ''
    content = ''
    publish_time = ''
    abstract=''
    author=''
    keywords=''
    organization=''
    # publish_time = get_text_date(html)
    insert_time = get_now_time()
    insert_vaules_list.append((id, title, content,abstract,author,keywords,organization, publish_time, source, url, insert_time))
    update_values_list.append((1, id))
    update_sql = "UPDATE news_detail_html SET state = %s WHERE id = %s"
    update_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, sql=update_sql,
                values_list=update_values_list)
    try:
        insert_mysql(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=insert_table,
                 field_list=insert_field_list, values_list=insert_vaules_list)
    except Exception as e:
                pass
    pass  
        
def getProxy():
    select_table = "proxy"
    select_field_list = ["ipport"]
    task_data_list = get_mysql_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=select_table,
                                    field_list=select_field_list,size=None, filter='state =0')
    return task_data_list

def delProxy(ipport):
    update_sql = "UPDATE proxy SET state = 1 WHERE ipport = %s"
    update_values_list = []
    update_values_list.append((ipport))
    update_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, sql=update_sql, values_list=update_values_list)
            


class NewsSpider(scrapy.Spider):
    # 获取列表页url
    name = 'news'
    allowed_domains = ['baidu.com','sina.com.cn','163.com','qq.com','sohu.com','xinhuanet.com','thepaper.cn','ifeng.com','chinanews.com']
    start_data = getStart()
    start_urls = getUrl(start_data)
    proxyList =  getProxy();
    # print("proxyList:",proxyList)
    # start_urls = ["http://www.chinanews.com/"]

    def start_requests(self):
        for url in self.start_urls:
            Request(url=url,callback=self.parse)
        inow=0
        icnt = 0
        while icnt < 10:
            size = str(inow*1000) +",1000"
            try:
                start_url_list = getUrlList(size);
                for url_list_item in start_url_list:
                    proxy_one = ''
                    if len(self.proxyList) > 0 : 
                        proxy_one = random.choice(self.proxyList)['ipport']
                    self.logger.debug("列表使用代理 proxy_one: %s" % proxy_one)
                    if inow % 50 == 1 :
                        scrapy.Request(url=url_list_item["url"],meta={ 'proxy':proxy_one, 'id': url_list_item["id"],'source': url_list_item["source"],'url':url_list_item["url"],'insert_time':url_list_item["insert_time"]}, callback=self.detailParse) 
                    else :
                        yield scrapy.Request(url=url_list_item["url"],meta={ 'proxy':proxy_one, 'id': url_list_item["id"],'source': url_list_item["source"],'url':url_list_item["url"],'insert_time':url_list_item["insert_time"]}, callback=self.detailParse) 
                    inow=inow+1
                    time.sleep(1)
            except Exception as e:
                self.logger.debug("获取列表异常 : %s" % e)
                pass
            icnt = icnt+1
            # time.sleep(300)
        pass

    def parse(self, response):
        starturl = response.url
        #print("解析开始页：",starturl)
        source = getSource(self.start_data,starturl)
        insert_table = "news_url_list"
        insert_field_list = ["id", "source", "url", "insert_time", "state"]
        html = response.body
        page = etree.HTML(html)
        ps = page.xpath('//a/@href')
        url_set = set()
        for p in ps:
            if p[0:4] == "http" and p[-1] != "/":
                url_set.add(p)
        url_list = list(url_set)
        # print("解析的url列表:",url_list)
        insert_time = get_now_time()
        proxy_one = ''
        if len(self.proxyList) > 0 : 
            proxy_one = random.choice(self.proxyList)['ipport']
        self.logger.debug("使用代理 proxy_one: %s" % proxy_one)
        for url in url_list:
            try:
                id = get_md5(url)
                filter = "id='%s'" % id
                select_field_list = ["id", "state"]
                select_data_list = get_mysql_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=insert_table,
                                    field_list=select_field_list, size=None, filter=filter)
                # self.logger.debug("select_data_list: %s %s" % (select_data_list,len(select_data_list)))
                if len(select_data_list) > 0 :
                    # self.logger.debug("state: %s %s" % (select_data_list[0]["state"],select_data_list[0]["state"] == 0))
                    if  select_data_list[0]["state"] == 0 :
                        yield scrapy.Request(url=url,meta={ 'proxy':proxy_one, 'id': id,'source':source,'url':url,'insert_time':insert_time}, callback=self.detailParse)                   
                else :    
                    insert_vaules_list = []
                    insert_vaules_list.append((id, source, url, insert_time, 0))
                    insert_mysql(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=insert_table,
                    field_list=insert_field_list, values_list=insert_vaules_list)
                    yield scrapy.Request(url=url,meta={ 'proxy':proxy_one, 'id': id,'source':source,'url':url,'insert_time':insert_time}, callback=self.detailParse)                
            except Exception as e:
                self.logger.debug("url_list error: %s " % (e))
        pass

    def detailParse(self, response):
        # 提取每次Response的meta数据
        url = response.url
        # print("提取：",url,response.meta["url"])
        # 结果表
        insert_table = "news_detail_html"
        insert_vaules_list = []
        insert_field_list = ["id", "source", "url", "html", "state", "insert_time"]
        # 日志
        log_list = []
        # 更新数据
        update_values_list = []
        id = response.meta["id"]
        # print("提取：",id)
        source = response.meta["source"]
        html = response.body
        response_state = response.status
        response_info = response.headers
        insert_time = get_now_time()
        insert_vaules_list.append((id, source, url, html, 0, insert_time))
        log_list.append((id, response_state, source, response_info, url, insert_time))
        update_values_list.append((1, id))
        try:
            update_sql = "UPDATE news_url_list SET state = %s WHERE id = %s"
            update_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, sql=update_sql, values_list=update_values_list)
            # task_data={"id":id, "source":source, "url":url, "html":html, "state":0, "insert_time":insert_time}
            insert_mysql(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=insert_table,
                 field_list=insert_field_list, values_list=insert_vaules_list)
            # insert_mysql(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=log_table,
            #      field_list=log_table_field_list, values_list=log_list)
            # get_news_dw(task_data)
        except Exception as e:
            self.logger.debug("url_list error: %s " % e)
        
        try:
            item = NewsItem()
            if re.search(r"qq\.com", response.url) is not None:
                item = self.parse_item_qq(response)
            if re.search(r"sina\.com\.cn", response.url) is not None:
                item = self.parse_item_sina(response)
            if re.search(r"ifeng\.com", response.url) is not None:
                item = self.parse_item_ifeng(response)
            if re.search(r"sohu\.com", response.url) is not None:
                item = self.parse_item_sohu(response)
            if re.search(r"163\.com", response.url) is not None:
                item = self.parse_item_163(response)
            if re.search(r"baidu\.com", response.url) is not None:
                item = self.parse_item_baidu(response)
            if re.search(r"xinhuanet\.com", response.url) is not None:
                item = self.parse_item_xinhuanet(response)
            if re.search(r"thepaper\.cn", response.url) is not None:
                item = self.parse_item_thepaper(response)
            if re.search(r"chinanews\.com", response.url) is not None:
                item = self.parse_item_chinanews(response)
            item['id'] = id
            item['url'] = url
            item['source'] = source
        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        #         print ("item %s value %s" % (key, data))
            if ( len(item['title']) >0 and len(item['content']) >0):
                save_news_dw(item)   
        except Exception as e:
                self.logger.debug("url_list error: %s" % e)

        # 跟踪解析详情页中的链接
        # print("跟进：",url)
        list_insert_table = "news_url_list"
        list_insert_field_list = ["id", "source", "url", "insert_time", "state"]
        page = etree.HTML(html)
        ps = page.xpath('//a/@href')
        url_set = set()
        for p in ps:
            if p[0:4] == "http" and p[-1] != "/":
                url_set.add(p)
        url_list = list(url_set)
        # print("解析的url列表:",url_list)
        proxy_one = ''
        if len(self.proxyList) > 0 : 
            proxy_one = random.choice(self.proxyList)['ipport']
        self.logger.debug("使用代理 proxy_one: %s" % proxy_one)
        for next_url in url_list:
            try:
                next_id = get_md5(next_url)
                filter = "id='%s'" % next_id
                select_field_list = ["id", "state"]
                select_data_list = get_mysql_data(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=list_insert_table,
                                    field_list=select_field_list, size=None, filter=filter)
                # self.logger.debug("select_data_list:", select_data_list,len(select_data_list))
                if len(select_data_list) > 0 :
                    if  select_data_list[0]["state"] == 0 :
                        yield scrapy.Request(url=next_url,meta={'proxy':proxy_one,'id': next_id,'source':source,'url':next_url,'insert_time':insert_time,'state':0}, callback=self.detailParse)
                else :
                    list_insert_vaules_list = []
                    list_insert_vaules_list.append((next_id, source, next_url, insert_time, 0))
                    insert_mysql(host=HOST, user=USER, password=PASSWORD, db=DB, port=PORT, table=list_insert_table,
                 field_list=list_insert_field_list, values_list=list_insert_vaules_list)
                    yield scrapy.Request(url=next_url,meta={'proxy':proxy_one,'id': next_id,'source':source,'url':next_url,'insert_time':insert_time,'state':0}, callback=self.detailParse)
            except Exception as e:
                self.logger.debug("url_list error: %s" % e)
        pass

    def parse_item_baidu(self, response):
        """
        parse baidu News item from response.
        """
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
        # selector = Selector(response)
        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('/html/head/title/text()').extract()
        if len(title) > 0:          
            item['title'] = ''.join(title).strip()
            
        author =  response.xpath('//div[re:test(@class,"^index-module_authorTxt*")]/a/p[re:test(@class,"^index-module_authorName*")]/text()').extract()
        if len(author) > 0: 
            item['author'] = author[0].strip()
        
        content = response.xpath('//div[re:test(@class,"^index-module_articleWrap*")]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        publish_time = response.xpath('/html/head/meta[@itemprop="dateUpdate"]/@content').extract()
        if len(publish_time) > 0:
            item['publish_time'] = publish_time[0]
        # abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        # if len(abstract) > 0:
        #     item['abstract'] = abstract[0]
        # keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        # if len(keywords) > 0:
        #     item['keywords'] = keywords[0]

        organization = response.xpath('//div[re:test(@class,"^index-module_articleSource*")]/span[re:test(@class,"^index-module_accountAuthentication*")]/text()').extract()
        if len(organization) > 0:  
            item['organization']= organization[0]

        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        #         print ("item %s value %s" % (key, data))
        return item

    def parse_item_163(self, response):
        """
        parse 163 News item from response.
        """
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('//h1[@class="post_title"]/text()').extract()
        if len(title) > 0:          
            item['title'] = ''.join(title).strip()

        author =  response.xpath('//div[@class="post_author"]/a/img/@alt').extract()
        if len(author) > 0: 
            item['author'] = author[0].strip()
        
        content = response.xpath('//div[@class="post_content"]/div[@class="post_body"]/p/text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        publish_time = response.xpath('@data-publishtime').extract()
        if len(publish_time) > 0:
            item['publish_time'] = publish_time[0]
        abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        organization = response.xpath('//div[@class="post_info"]/text()').extract()
        if len(organization) > 0:  
            item['organization']= organization[0].partition('来源:')[2].strip()

        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        return item

    def parse_item_qq(self, response):
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)

        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        # title = response.xpath('//h1/text()').extract()
        # if len(title) > 0:          
        #     item['title'] = ''.join(title).strip()

        # author =  response.xpath('//a[@class="author"]/div/text()').extract()
        # if len(author) > 0: 
        #     item['author'] = author[0].strip()
        
        content = response.xpath('//div[@class="content-article"]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        # publish_time = response.xpath('/html/head/meta[@name="apub:time"]/@content').extract()
        # if len(publish_time) > 0:
        #     item['publish_time'] = publish_time[0]
        abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        # organization = response.xpath('/html/head/meta[@name="mediaid"]/@content').extract()
        # if len(organization) > 0:  
        #     item['organization']= organization[0]

        script_list = response.xpath('/html/head/script//text()').extract()
        for scipt_item in script_list:
            if scipt_item.find("window.DATA") >= 0:
                strjson  =  scipt_item[13:]
                newsjson = json.loads(strjson)
                item['title'] = newsjson["title"]
                item['author'] = newsjson["media"]
                item['organization'] = newsjson["media"]
                item['publish_time'] = newsjson["pubtime"]

        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        #         print ("item %s value %s" % (key, data))
        # return item

    def parse_item_sina(self, response):
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
   
        item = NewsItem()
        item['id'] = ''
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('//h1[@class="main-title"]/text()').extract()
        if len(title) > 0:          
            item['title'] = ''.join(title).strip()

        author =  response.xpath('/html/head/meta[@name="mediaid"]/@content').extract()
        if len(author) > 0: 
            item['author'] = author[0].strip()
        
        content = response.xpath('//div[@id="article"]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        publish_time = response.xpath('/html/head/meta[@name="weibo: article:create_at"]/@content').extract()
        if len(publish_time) > 0:
            item['publish_time'] = publish_time[0] + ":00"
        abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        organization = response.xpath('/html/head/meta[@name="mediaid"]/@content').extract()
        if len(organization) > 0:  
            item['organization']= organization[0]

        
        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        #         print ("item %s value %s" % (key, data))
        return item

    def parse_item_sohu(self, response):
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('/html/head/title/text()').extract()
        if len(title) > 0:          
            item['title'] = ''.join(title).strip()

        author =  response.xpath('/html/head/meta[@name="mediaid"]/@content').extract()
        if len(author) > 0: 
            item['author'] = author[0].strip()
        
        content = response.xpath('//article[@id="mp-editor"]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        publish_time = response.xpath('/html/head/meta[@itemprop="datePublished"]/@content').extract()
        if len(publish_time) > 0:
            item['publish_time'] = publish_time[0] + ":00"
        abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        organization = response.xpath('/html/head/meta[@name="mediaid"]/@content').extract()
        if len(organization) > 0:  
            item['organization']= organization[0]

        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        #         print ("item %s value %s" % (key, data))
        
        return item

    def parse_item_ifeng(self, response):
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('/html/head/meta[@property="og:title"]/@content').extract()
        if len(title) > 0:          
            item['title'] = title[0].strip()

        author =  response.xpath('//p[re:test(@class,"^author*")]/text()').extract()
        if len(author) > 0: 
            item['author'] = author[0].strip()
        
        content = response.xpath('//div[re:test(@class,"^text*")]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        publish_time =  response.xpath('/html/head/meta[@name="og:time "]/@content').extract()
        if len(publish_time) > 0:
            item['publish_time'] = publish_time[0].strip()
        abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        organization = response.xpath('//span[re:test(@class,"^source*")]/a/text()').extract()
        if len(organization) > 0:  
            item['organization']= organization[0]
        return item

    def parse_item_xinhuanet(self, response):
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('//span[@class="title"]/text()').extract()
        if len(title) > 0:          
            item['title'] = title[0].strip()

        author =  response.xpath('/html/head/meta[@name="mediaid"]/@content').extract()
        if len(author) > 0: 
            item['author'] = author[0].strip()
        
        content = response.xpath('//div[@id="detail"]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        publish_time = response.xpath('//div[@class="info"]/text()').extract()
        if len(publish_time) > 0:
            item['publish_time'] = publish_time[0].strip()
        abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        organization = response.xpath('//div[@class="source"]/text()').extract()
        if len(organization) > 0:  
            item['organization']= organization[0].partition('来源:')[2].strip()

        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        #         print ("item %s value %s" % (key, data))
        
        return item

    def parse_item_thepaper(self, response):
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('//h1[@class="news_title"]/text()').extract()
        if len(title) > 0:          
            item['title'] = title[0].strip()

        # author =  response.xpath('//p[re:test(@class,"^author*")]/text()').extract()
        # if len(author) > 0: 
        #     item['author'] = author[0].strip()
        
        content = response.xpath('//div[@class="news_txt"]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        item['publish_time'] =  get_now_time()
        # if len(publish_time) > 0:
        #     item['publish_time'] = publish_time[0].strip()
        abstract = response.xpath('/html/head/meta[@name="Description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="Keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        organization = response.xpath('//span[re:test(@class,"^source*")]/a/text()').extract()
        if len(organization) > 0:  
            item['organization']= organization[0]
        return item

    def parse_item_chinanews(self, response):
        # self.logger.debug("parse func: %s" % sys._getframe().f_code.co_name)
        item = NewsItem()
        item['id'] = ''
        item['url'] = response.url
        item['source'] = ''
        item['title'] = ''
        item['author'] = ''
        item['content'] = ''
        item['publish_time'] = ''
        item['abstract'] = ''
        item['keywords']=''
        item['organization']= ''

        title = response.xpath('//div[@class="content"]/h1/text()').extract()
        if len(title) > 0:          
            item['title'] = title[0].strip()

        author =   response.xpath('//div[id="author_baidu"]//text()').extract()
        if len(author) > 0: 
            item['author'] = author[0].strip()
        
        content = response.xpath('//div[@class="content"]/div[@class="left_zw"]//text()').extract()
        if len(content) > 0:
            item['content'] = ''.join(content).strip()
        publish_time = response.xpath('//span[@id="pubtime_baidu"]/text()').extract()
        if len(publish_time) > 0:
            item['publish_time'] = publish_time[0].strip()
        abstract = response.xpath('/html/head/meta[@name="description"]/@content').extract()
        if len(abstract) > 0:
            item['abstract'] = abstract[0]
        keywords = response.xpath('/html/head/meta[@name="keywords"]/@content').extract()
        if len(keywords) > 0:
            item['keywords'] = keywords[0]
        organization = response.xpath('//span[@id="source_baidu"]/a/text()').extract()
        if len(organization) > 0:  
            item['organization']= organization[0]

        # for key in item:
        #     for data in item[key]:
        #         self.logger.debug("item %s value %s" % (key, data))
        #         print ("item %s value %s" % (key, data))
        
        return item
