{% extends 'base_backend.html' %}
{% load static %}
{% load my_filter %}

{% block header_tail %}

<link rel="stylesheet" href="{% static 'zhongbao/zhongbao_admin/css/style.css' %}">
<link rel="stylesheet" href="{% static 'zhongbao/zhongbao_admin/css/iconfont.css' %}">
<link rel="stylesheet" href="{% static 'plugins/datatables/dataTables.bootstrap.css' %}">
<link rel="stylesheet" href="{% static 'public/ComboSelect/css/combo.select.css' %}">
<link rel="stylesheet" href="{% static 'zhongbao/config/css/main.css' %}">
<link rel="stylesheet" href="{% static 'layui/css/layui.css' %}">

<script src="{% static 'plugins\jQuery\jquery-2.2.3.min.js' %}"></script>
<script src="{% static 'plugins/vue1/vue.js' %}"></script>
<script src="{% static 'layui/layui.js' %}"></script>

{% endblock header_tail %}

{% block section_content %}

<div class="box-header with-border">
    <h3 class="box-title">账号设置</h3>
</div>
<div class="row" style="margin-top: 10px;">
    <div class="col-md-12">
        <div class="box box-primary">
            <div class="box-body">
                <!-- Custom Tabs (Pulled to the right) -->
                <div class="nav-tabs-custom">
                    <ul class="nav nav-tabs pull-right">
                        <li class="active"><a href="#tab_1-1" data-toggle="tab">多种编码判断</a></li>
                        <li><a href="#tab_2-2" data-toggle="tab">ctime</a></li>
                        <li><a href="#tab_3-2" data-toggle="tab">论坛模板</a></li>
                        <li><a href="#tab_4-2" data-toggle="tab">民政互动模板</a></li>
                        <li><a href="#tab_5-2" data-toggle="tab">平媒模板</a></li>
                        <li><a href="#tab_6-2" data-toggle="tab">新闻模板</a></li>
                        <li><a href="#tab_7-2" data-toggle="tab">正则匹配详情页链接</a></li>
                        <a class="dropdown-toggle" data-toggle="dropdown" href="#">
                            <button type="button" class="btn btn-success #tab_2-2" id="but"
                                    onclick="window.open('{% url 'zhongbao_admin:novice_down' %}')">模板下载
                            </button>
                            </span>
                        </a>
                    </ul>
                    <div class="tab-content">
                        <div class="tab-pane active" id="tab_1-1">
                            <div class="box-body">
                      <pre style="font-weight: 600;">
#coding=utf-8
#############################################################################
# Copyright (c) 2014  - Beijing Intelligent Star, Inc.  All rights reserved


'''
文件名：wy_ed_dc_qq_news.py
功能：宣汉网爬虫抓取文件。

代码历史：
2014-08-26：庞  威  代码创建
'''
import datetime
import spiderDefault
from urlparse import urljoin
import re
import htmlparser
class MySpider(spiderDefault.Spider):

def __init__(self, cmd_args=None):
    spiderDefault.Spider.__init__(self, cmd_args=cmd_args)

    #类别码，01新闻、02论坛、03博客、04微博 05平媒 06微信  07 视频、99搜索引擎
    self.info_flag = "01"
    self.siteName = "李德虎-香港文汇报讨论区"
    self.site_domain = 'wenweipo.com'

    self.start_urls = [

    'http://paper.wenweipo.com/001YO/',
    'http://paper.wenweipo.com/005WW/',
    'http://paper.wenweipo.com/003HK/',
    'http://paper.wenweipo.com/002CH/',
    'http://paper.wenweipo.com/003TW/',
    'http://paper.wenweipo.com/004GJ/',
    'http://paper.wenweipo.com/006FI/',
    'http://paper.wenweipo.com/008ED/',
    'http://paper.wenweipo.com/009OT/',
    'http://paper.wenweipo.com/011EN/',
    'http://paper.wenweipo.com/010SP/',
    'http://info.wenweipo.com/?action-category-catid-1',
    'http://info.wenweipo.com/',
    'http://info.wenweipo.com/?action-category-catid-2',
    'http://info.wenweipo.com/?action-category-catid-9'

    ]
    self.encoding = 'Big5'
    #self.max_interval = None
    self.debup_uri = None
def get_detail_page_urls(self, data):
    '''
    从列表页获取详情页url; 返回列表
    '''
    detail_page_urls = []

    if data is not None:

        url = data.response.request.url
        loops = data.xpathall('''//img[@class="dot"]/parent::*/a|//ul[@class="m_news"]/li/a|//ul[@class="global_tx_list1"]/li/a''')
        for item in loops:
            post_url = item.xpath("//@href").text()
            post_url = urljoin(url,post_url)
            detail_page_urls.append(post_url)
    return detail_page_urls

def get_detail_page_info(self, data):
    '''
    解析详情页信息；参数data可直接调用xpath,re等方法；
    返回值为字典类型
    '''
    result = []
    if data is None:
        return None
url = data.response.request.url
resp = data.response
char = data.regex("charset=(.*)>").text().strip()
if re.search("utf",char,re.I):
        resp.encoding = "utf8"
elif re.search("gb",char,re.I):
        resp.encoding = "gbk"
elif re.search('big5',char,re.I):
    resp.encoding = "big5"
else:
     return None
data = htmlparser.Parser(resp.text)

    title = data.xpath('''//h1/font|//div[@id="article"]/h1''').text().strip()
    source = self.siteName

    ctime = data.xpath('''(//font[@class="list_newsfast"])[1]|//div[@id="article_extinfo"]/div[2]/text()[last()]''').replace(u'年','-').replace(u'月','-').replace(u'日','').regex('(\d+-\d+-\d+ \d+:\d+|\d+-\d+-\d+)').datetime()
    content = data.xpath('''//p[@class="content_p"]/parent::*|//div[@id="article_body"]''').text().strip()
    try:
        c1_list = data.xpathall('''//script|//style''')
        for i in c1_list:
            c1 = i.text().strip()
            content = content.replace(c1,"",1).strip()
    except:
        pass

    utc_now = datetime.datetime.utcnow()
    post = {
            'url':url,
            'title':title,
            'source':source,
            'ctime':ctime,
            'gtime':utc_now,

            'content':content,
            'siteName':self.siteName,

            'data_db':self.data_db,
            }
    result.append(post)
    return result


if __name__ == "__main__":
spider = MySpider()
spider.proxy_enable = False
spider.init_dedup()
spider.init_downloader()


# ------------ parse() ----------
#     url = 'http://www.baotuowang.com/forum.php?mod=forumdisplay&fid=48&filter=author&orderby=dateline'
#     resp = spider.download(url)
#
#     urls, fun, next_url = spider.parse(resp)
#     for url in urls:
#         print url


url = 'http://paper.wenweipo.com/2015/10/30/YO1510300002.htm'
resp = spider.download(url)
#    resp.encoding="utf-8"
# print resp.url
#    print resp.text
res = spider.parse_detail_page(resp, url)

if res is not None:
    for item in res:
        for k, v in item.iteritems():
            print k,v


                      </pre>
                            </div>
                        </div>
                        <!-- /.tab-pane -->
                        <div class="tab-pane" id="tab_2-2">
                            <div class="box-body">
                      <pre style="font-weight: 600;">
# 常用
#例： 2018-08-24    2018-08-24 13:03:02
ctime = data.xpath('''//span[@class="_1wVn6"]//time''').datetime()

#文本时间1
# 例：2018-08-24 13时03分02秒
c_time = data.xpath('''//span[@class="_1wVn6"]//time''').text().replace('时',':').replace('分',':').replace('秒','')
ctime = htmlparser.Parser(c_time).datetime()

# 文本时间2/外国时间
# 例：2018-08-24 13时03分02秒
try:
c_time = data.xpath('''//span[@class="_1wVn6"]//time''').text().replace('时',':').replace('分',':').replace('秒','')
ctime = datetime.datetime.strptime(c_time,"%Y-%m-%d %H:%M:%S") - datetime.timedelta(hours=8)
except:
ctime=gtime-datetime.timedelta(minutes=1)

#时间戳
# 例：1536120664
c_time = data.xpath('''//span[@class="_1wVn6"]//time''').text()
ctime = datetime.datetime.utcfromtimestamp(float(c_time)) - datetime.timedelta(hours=8)
                      </pre>
                            </div>
                        </div>
                        <!-- /.tab-pane -->
                        <div class="tab-pane" id="tab_3-2">
                            <div class="box-body">
                      <pre style="font-weight: 600;">
#coding=utf-8
#############################################################################
# Copyright (c) 2014  - Beijing Intelligent Star, Inc.  All rights reserved


'''
#文件命名规则：zb-姓名首字母_网站域名_bbs.py
#例：zb-lxt_newsxc_bbs.py
文件名：？_？_newsxc.py

#该文件所部署的网站名称
#例：功能：南宣论坛
功能： ？

代码历史：
例：2018-5-21：张三  代码创建
？-？-？：？  代码创建
'''
import spider
import copy
import setting
import htmlparser
import datetime
import sys
reload(sys)
from urlparse import urljoin
class MySpider(spider.Spider):
def __init__(self,cmd_args=None):
    spider.Spider.__init__(self,cmd_args=cmd_args)
    #类别码，01新闻、02论坛、03博客、04微博 05平媒 06微信  07 视频、99搜索引擎
    self.info_flag = "02"   #所属类别
    #网站名称
    self.siteName = "？"
    #网站一级域名
    self.site_domain = '？'

    self.start_urls = [


        #列表页链接
        'url',  #频道名称
        'url',  #频道名称
        'url',  #频道名称

    ]
    #网页编码
    #例：self.encoding = 'gbk'
    self.encoding = 'gbk'
    self.c_time = datetime.datetime.utcnow()-datetime.timedelta(days=3)
    self.max_interval = datetime.datetime.utcnow()-datetime.timedelta(days=3)
    self.page_url = {}
    #self.dedup_uri = None

def get_start_urls(self, data=None):
    return self.start_urls

def parse(self, response):

    url_list = []
    if response is not None:
        try:
            response.encoding = self.encoding
            unicode_html_body = response.text
            data = htmlparser.Parser(unicode_html_body)
        except Exception, e:
            print "parse(): %s"%e
            return (url_list, None, None)
        purl =response.request.url

        #解析各个板块中所有子版块的链接的a标签
        #例：urls = data.xpathall('''//tr[@class="fl_row"]//h2//a''')
        urls = data.xpathall('''？''')
        if urls:
            for urla in urls:
                #提取子版块链接，正则解析出板块的id，使用%s占位符拼接出板块的链接
                #例：
                #fid = urla.xpath('''//@href''').regex('forum-(\d+)').text().strip()
                #url = 'http://bbs.newsxc.com/forum.php?mod=forumdisplay&fid=%s&filter=author&orderby=dateline'%fid
                fid = urla.xpath('''？''').regex('？').text().strip()
                url = '？'%fid
                url_list.append(url)
    return (url_list,  self.parse_next, None)

def parse_next(self, response):

    url_list = []
    if response is not None:
        try:
            response.encoding = self.encoding
            unicode_html_body = response.text
            data = htmlparser.Parser(unicode_html_body)
        except Exception, e:
            print "parse(): %s"%e
            return (url_list, None, None)
        purl =response.request.url

        #包含详情页链接和时间的模块，一般以//tr结尾
        #例：urls = data.xpathall('''//tbody[contains(@id,'normalthread')]//tr''')
        urls = data.xpathall('''？''')
        purl =response.request.url
        if urls:
            for urla in urls:#这时的urla可以看做将解析到的模块当做新的网页打开，页面内只有解析到的模块，以解析的模块为顶级节点进行解析，而不再是<html>标签

                #此处不要再出现解析模块的Xpath，继续向子节点解析即可
                #提取详情页链接，正则解析出详情页的id，使用%s占位符拼接出详情页的链接
                #例：tid = urla.xpath('''//a[@class="s xst"]/@href''').regex('tid=(\d+)').text().strip()
                tid = urla.xpath('''？''').regex('？').text().strip()
                url = '？'%tid
                ##模块内解析详情页时间，一般以//span、//td、//div、//p等结尾
                #例：ctime = urla.xpath('''//td[@class="by"][1]''').datetime()
                ctime = urla.xpath('''？''').datetime()
                if ctime < self.c_time:
                    continue
                url_list.append(url)
                self.page_url[url]=purl
    return (url_list,  None, None)

def clear_special_xp(self, data, xp):
    data = copy.copy(data)
    result = data._root.xpath(xp)
    for i in result:
        try:
            i.getparent().remove(i)
        except Exception as e:
            log.logger.error(e)
    return data


def parse_detail_page(self, response=None, url=None):
    '''''
    解析内容页文本
    '''
    try:
        response.encoding = self.encoding
        unicode_html_body = response.text
        data = htmlparser.Parser(unicode_html_body)
    except Exception, e:
        print "parse_detail_page(): %s"%e
        return None
    if url is None:
        url = response.request.url
    result = []
    #详情页内解析标题，一般以//text()结尾
    #例：title = data.xpath('''.//*[@id='thread_subject']''').text().strip()
    title = data.xpath('''？''').text().strip()


    gtime = datetime.datetime.utcnow()
    #详情页内解析包含发布时间的模块，一般以//span、//div、//p结尾
    #例：ctime = data.xpath('''(//div[@class="authi"]/em)[1]''').regex('(\d+-\d+-\d+ \d+:\d+)').datetime()
    ctime = data.xpath('''？''').regex('(\d+-\d+-\d+ \d+:\d+)').datetime()

    #详情页解析当前页面所属频道的面包屑，一般以//text()结尾
    #例：channel = data.xpath('''.//*[@id='pt']/div/a[last()-1]//text()''').text().strip()
    channel = data.xpath('''？''').text().strip()
    list_page_url = self.page_url.get(url)

    #详情页解析作者，一般以//text()结尾
    #例：source = data.xpath('''(//div[@class="authi"]/a[@class="xw1"])[1]''').text().strip()
    source = data.xpath('''？''').text().strip()

    #详情页解析回复数量，一般以//text()结尾，需要转化数据类型为int型
    #例：reply_count = data.xpath('''//div[@class="hm ptn"]/span[@class="xi1"][2]/text()''').regex('(\d+)').int()
    reply_count = data.xpath('''？''').regex('(\d+)').int()

    #详情页解析查看数量，一般以//text()结尾，需要转化数据类型为int型
    #例：visit_count = data.xpath('''//div[@class="hm ptn"]/span[@class="xi1"][1]/text()''').regex('(\d+)').int()
    visit_count = data.xpath('''？''').regex('(\d+)').int()

    pic_urls = []
    data = self.clear_special_xp(data,'''//p[@class="mbn"]|//div[@class="tip tip_4"]|//style|//script|//div[@class="xs0"]''')

    #详情页解析正文，一般以//p、//div、//span等结尾
    #例：content = data.xpath('''(//div[@class="t_fsz"])[1]''').text().strip()
    content = data.xpath('''？''').text().strip()

    #详情页解析带DOM结构的正文，一般以//p、//div、//span等结尾
    #例：content_xml = data.xpath('''(//div[@class="t_fsz"])[1]''').data.encode('utf-8')
    content_xml = data.xpath('''？''').data.encode('utf-8')

    #此处解析真跟捏的图片，内容与上方content后面填写的内容一致，后面的‘//img/@file’不要删除，有的图片链接在//img//@src下，根据情况写
    #例：pic_urls_list = data.xpathall('''(//div[@class="t_fsz"])[1]//img/@file''')
    pic_urls_list = data.xpathall('''(？)[1]//img/@file''')
    if pic_urls_list:
        for i in pic_urls_list:
            i = i.text().strip()
            i = urljoin(url,i)
            pic_urls.append(i)


    if not content:
        content = title
    post = {'title':title,
            'ctime':ctime,
            'gtime':gtime,
            'source':source,
            'channel':channel,
            'siteName':self.siteName+'-'+channel,
            'visitCount':[{'count':visit_count, 'spider_time':gtime}],
            'replyCount':[{'count':reply_count, 'spider_time':gtime}],
            'url':url,
            'content':content,
            'content_xml':content_xml,
            }
    if list_page_url:
        post.update({'list_page_url':list_page_url})
    if pic_urls:
        post.update({'pic_urls':pic_urls})
    result.append(post)

    return result


if __name__ == '__main__':
spider = MySpider()
spider.proxy_enable = False
spider.init_dedup()
spider.init_downloader()


#此处选填单测功能的详情页完整链接，仅用于测试上方的parse_detail_page()函数
#例：url = 'http://bbs.newsxc.com/thread-526534-1-1.html'
url = '？'
resp = spider.download(url)
res = spider.parse_detail_page(resp, url)
for item in res:
   for k, v in item.iteritems():
       print k, v

                      </pre>
                            </div>
                        </div>
                        <div class="tab-pane" id="tab_4-2">
                            <div class="box-body">
                      <pre style="font-weight: 600;">

#coding=utf-8

#############################################################################
# Copyright (c) 2014  - Beijing Intelligent Star, Inc.  All rights reserved


'''
文件名：lb_ssxhbj_news.py
功能：鄯善县环境保护局
代码历史：
2017-11-6：林宾  代码创建
'''

import datetime
import htmlparser
import re
import copy
import requests
from urlparse import urljoin
import spiderDefault

class MySpider(spiderDefault.Spider):

    def __init__(self, cmd_args=None):
        spiderDefault.Spider.__init__(self, cmd_args=cmd_args)

        #类别码，01新闻、02论坛、03博客、04微博 05平媒 06微信  07 视频、99搜索引擎
        self.info_flag = "02"
        self.siteName = "孟村回族自治县人民政府"
        self.site_domain = 'mengcun.gov.cn'

        self.start_urls = [

        'http://www.mengcun.gov.cn/plus/list.php?tid=4',  #生活

        ]
        self.encoding = 'utf-8'
        self.c_time = datetime.datetime.utcnow()-datetime.timedelta(days=3)
        self.page_url = {}
        self.max_interval = datetime.timedelta(days=3)
        #self.dedup_uri=None


    def clear_special_xp(self,data,xp):
        #'''删除指定xpath数据'''
        data = copy.copy(data)
        result = data._root.xpath(xp)
        for i in result:
            try:
                i.getparent().remove(i)
            except Exception as e:
                log.logger.error(e)
        return data

    def get_detail_page_urls(self, data):
        '''
        从列表页获取详情页url; 返回列表
        '''
        detail_page_urls = []

        if data is not None:
            url = data.response.request.url
            loops = data.xpathall('''//table[@id ='table-4']//tr''')
            for item in loops:
                post_url = item.xpath('''//@href''').text()
                if not post_url:
                    continue
                post_url = urljoin(url,post_url)
                ctime = item.xpath('''//td[3]''').datetime()
                if ctime < self.c_time:
                    continue
                self.page_url[post_url]=url
                detail_page_urls.append(post_url)
        return detail_page_urls

    def get_detail_page_info(self, data):
        '''

        解析详情页信息；参数data可直接调用xpath,re等方法；
        返回值为字典类型
        '''

        url = data.response.request.url
        result = []
        pic_urls = []

        gtime = datetime.datetime.utcnow()

        title = data.xpath('''//dd[@style="color:#ff0000"]''').text().strip()

        gtime = datetime.datetime.utcnow()

        ctime = data.xpath('''//div[@class='services_list']//dl[2]''').datetime()

        channel = '百姓留言'
        list_page_url = self.page_url.get(url)

        source = data.xpath('''//div[@class='services_list']//dl[4]''').text().replace('回复单位：','').strip()

        reply_count = 0

        visit_count = 0

        content = ''
        list_content = data.xpathall('''//div[@class='services_list']//dl[3]//dd|//div[@class='services_list']//dl[5]//dd''')
        for i in list_content:
            str_content = self.clear_special_xp(i,'''//script|//style''').text().strip()
            content += str_content
        content=title if not content else content
        content_xml = ''
        list_content_xml = data.xpathall('''//div[@class='services_list']//dl[3]//dd|//div[@class='services_list']//dl[5]//dd''')
        for i in list_content_xml:
            str_content_xml = self.clear_special_xp(i,'''//script''').data.encode('utf8')
            content_xml += str_content_xml
        picurls= data.xpathall('''(//div[@class='services_list']//dl[3]//dd|//div[@class='services_list']//dl[5]//dd)//img//@src''')
        if picurls:
            for i in picurls:
                pic = i.text().strip()
                pic = urljoin(url,pic)
                pic_urls.append(pic)

        if not content:
            content = title
        post = {'title': title,
                'ctime': ctime,
                'gtime': gtime,
                'source': source,
                'channel': channel,
                'siteName': self.siteName + '-' + channel,
                'visitCount': [{'count': visit_count, 'spider_time': gtime}],
                'replyCount': [{'count': reply_count, 'spider_time': gtime}],
                'url': url,
                'content': content,
                }
        if list_page_url:
            post.update({'list_page_url': list_page_url})
        if pic_urls:
            post.update({'pic_urls': pic_urls})
        if content_xml:
            post.update({'content_xml': content_xml})
        result.append(post)

        return result


if __name__ == "__main__":
    spider = MySpider()
    spider.proxy_enable = False
    spider.init_dedup()
    spider.init_downloader()


    # ------------ parse() ----------
    #     url = 'http://www.baotuowang.com/forum.php?mod=forumdisplay&fid=48&filter=author&orderby=dateline'
    #     resp = spider.download(url)
    #
    #     urls, fun, next_url = spider.parse(resp)
    #     for url in urls:
    #         print url


    url = 'http://www.ljzfin.com/news/info/41853.html'
    resp = spider.download(url)
    #    resp.encoding="utf-8"
    # print resp.url
    # print resp.text
    res = spider.parse_detail_page(resp, url)

    if res is not None:
        for item in res:
            for k, v in item.iteritems():
                print k,v


                      </pre>
                            </div>

                        </div>
                        <!-- /.tab-pane -->
                        <div class="tab-pane" id="tab_5-2">
                            <div class="box-body">
                      <pre style="font-weight: 600;">
                            #coding=utf-8#coding=utf-8
#############################################################################
# Copyright (c) 2014  - Beijing Intelligent Star, Inc.  All rights reserved

'''
文件名：lb_dayoo_gzdaily_news.py
功能：广州日报 爬虫抓取配置文件

代码历史：
2014-12-03：段毅飞，创建代码
'''
import spider
import copy
import re
import setting
import htmlparser
import datetime
from urlparse import urljoin


class MySpider(spider.Spider):
    def __init__(self, cmd_args=None):
        spider.Spider.__init__(self, cmd_args=cmd_args)

        # 网站名称
        self.siteName = "？"
        # 类别码，01新闻、02论坛、03博客、04微博 05平媒 06微信  07 视频、99搜索引擎
        self.info_flag = "05"

        # 入口地址，放之前随便一天的首版链接
        self.start_urls = ['？']
        #网站域名
        self.site_domain = '？'
        #网页编码
        self.encoding = '？'
        self.head = ''
        self.gtime = datetime.datetime.utcnow()
        self.time_limit = self.gtime - datetime.timedelta(days=3)
        # self.max_interval = None
        # self.urldedup = None
        self.page_url = {}

    def clear_special_xp(self, data, xp):
        # '''删除指定xpath数据'''
        data = copy.copy(data)
        result = data._root.xpath(xp)
        for i in result:
            try:
                i.getparent().remove(i)
            except Exception as e:
                log.logger.error(e)
        return data

    def get_now_time_str(self):
        '''
        获取当前的时间，例如：year=2014,month=01,day=01
        '''
        today = datetime.date.today()
        year = today.strftime("%Y")
        month = today.strftime("%m")
        day = today.strftime("%d")
        return (year, month, day)

    def get_start_urls(self, data=None):
        '''
        返回start_urls
        '''
        start_urls = []
        #拼接带日期的链接
        url = '？'
        self.head = url % self.get_now_time_str()
        #拼接版面命名规则
        post_url = self.head + '？'
        start_urls.append(post_url)
        return start_urls

    def parse(self, response):
        """
        默认爬虫入口页分析函数；
        返回格式为(url_list,  callback, next_page_url)
        其中，url_list：表示当前data中分析出的url列表；如果没有url，改值为[]
        参数callback: 表示url_list中网页的分析函数；如果url_list中的值为详情页url，callback=None;
        该函数的返回值为同样为( url_list, callback, next_page_url)
        参数next_page_url: 下一页要分析的url，如果不翻页，该值为 None
        """
        url_list = []
        try:
            response.encoding = self.encoding
            unicode_html_body = response.text
            data = htmlparser.Parser(unicode_html_body)
        except Exception, e:
            print "parse(): %s" % e
        return (url_list, None, None)
        #一级解析，解析每个版面链接，一般以//@href结尾
        urls = data.xpathall('？')
        for url in urls:
            url = self.head + url.text().replace("./", "")
        url_list.append(url)
        return (url_list, self.parse_next, None)

    def parse_next(self, response):
        """
        次级列表页分析函数；
        """
        url_list = []
        try:
            response.encoding = self.encoding
            unicode_html_body = response.text
            data = htmlparser.Parser(unicode_html_body)
        except Exception, e:
            print "parse_next(): %s" % e
            return url_list, None, None
        purl = response.request.url
        #解析文章列表链接
        urls = data.xpathall("？")
        for url in urls:
            url = self.head + url.text()
            url_list.append(url)
            self.page_url[url] = purl

        return (url_list, None, None)

    def parse_detail_page(self, response=None, url=None):
        '''
        详细页解析
        '''
        try:
            response.encoding = self.encoding
            unicode_html_body = response.text
            data = htmlparser.Parser(unicode_html_body)
        except Exception, e:
            print "parse_detail_page(): %s" % e
            return None
        if url is None:
            url = response.request.url

        result = []
        pic_urls= []
        gtime = datetime.datetime.utcnow()
        url = response.url
        #解析当前新闻所属的版面文字，一般位于页面左下角，以//text()结尾
        channel = data.xpath('''？''').text().strip()
        channel = self.siteName if not channel else channel
        #解析当前新闻页面标题
        title = data.xpath('''？''').text().strip()
        #解析当前新闻页面作者，有的作者在注释例，需要正则解析
        source = data.xpath('''？''').text() or self.siteName
        retweeted_source = self.siteName
        ctime=gtime-datetime.timedelta(minutes=1)
        delete_xpath = '''//script'''
        data = self.clear_special_xp(data,delete_xpath)
        content = ''
        #解析当前新闻页新闻正文
        list_content = data.xpathall('''？''')
        for i in list_content:
            str_content = self.clear_special_xp(i,'''//script''').text().strip()
        content += str_content
        content = title if not content else content
        #解析当前新闻页新闻正文的xml样式
        content_xml = data.xpath('''？''').data.encode('utf-8')
        #解析当前新闻页新闻正文内所包含的图片
        picurls= data.xpathall('''(？)//img//@src''')
        if picurls:
            for i in picurls:
                pic = i.text().strip()
                try:
                    pic = urljoin(url,pic)
                except:
                    continue
                pic_urls.append(pic)
        retweeted_status_url = ''
        list_page_url = self.page_url.get(url,'')

        post = {'title':title,
            'gtime':gtime,
            'ctime':ctime,
            'source':source,
            'content_xml':content_xml,
            'retweeted_source':retweeted_source,
            'channel':channel,
            'siteName':self.siteName+'-'+channel,
            'url':url,
            'content':content,
            }

        if pic_urls:
        post.update({"pic_urls":pic_urls})
        if channel == self.siteName:
        post.update({"siteName":self.siteName})
        if list_page_url:
        post.update({'list_page_url':list_page_url})
        if retweeted_status_url:
        post.update({'retweeted_status_url':retweeted_status_url})
        result.append(post)
        return result


if __name__ == '__main__':
    spider = MySpider()
    spider.proxy_enable = False
    spider.init_dedup()
    spider.init_downloader()

    # ------------ get_start_urls() ----------
    #    urls = spider.get_start_urls()
    #    for url in urls:
    #        print url

    # ------------ parse() ----------
    #    url = 'http://epaper.ssrb.com.cn/html/2014-09/01/node_1.htm'
    #    resp = spider.download(url)
    #    urls, fun, next_url = spider.parse_next(resp)
    #    for url in urls:
    #        print url

    # ------------ parse_detail_page() ----------
    url = 'http://gzdaily.dayoo.com/pc/html/2017-12/28/content_2_2.htm'
    resp = spider.download(url)
    res = spider.parse_detail_page(resp, url)
    for item in res:
        for k, v in item.iteritems():
            print k, v

                      </pre>
                            </div>
                        </div>
                        <div class="tab-pane" id="tab_6-2">
                            <div class="box-body">
                      <pre style="font-weight: 600;">
                        ##coding=utf-8

#############################################################################
# Copyright (c) 2018  - Beijing Intelligent Star, Inc.  All rights reserved


'''
#文件命名规则：姓名首字母_网站域名_news.py
#例：lxt_dzwww_news.py
文件名：？_？_news.py

#该文件所部署的网站名称
#例：大众网要闻
功能： ？

代码历史：
例：2018-5-21：张三  代码创建
？-？-？：？  代码创建
'''

import datetime
import htmlparser
import re
import copy
import requests
from urlparse import urljoin
import spiderDefault
import myreadability
import json

class MySpider(spiderDefault.Spider):

    def __init__(self, cmd_args=None):
        spiderDefault.Spider.__init__(self, cmd_args=cmd_args)

        #类别码，01新闻、02论坛、03博客、04微博 05平媒 06微信  07 视频、99搜索引擎
        self.info_flag = "01"
        #网站名称
        self.siteName = '?'
        #网站一级域名
        self.site_domain = '?'

        self.start_urls = [

            #列表页链接
            'url',  #频道名称
            'url',  #频道名称
            'url',  #频道名称
            'url',  #频道名称


        ]
        #网页编码
        #例：self.encoding = 'gbk'
        self.encoding = '？'
        self.c_time = datetime.datetime.utcnow()-datetime.timedelta(days=3)
        self.page_url = {}
        self.max_interval = datetime.timedelta(days=3)
        #self.dedup_uri=None


    def clear_special_xp(self,data,xp):
        #'''删除指定xpath数据'''
        data = copy.copy(data)
        result = data._root.xpath(xp)
        for i in result:
            try:
                i.getparent().remove(i)
            except Exception as e:
                log.logger.error(e)
        return data

    def get_detail_page_urls(self, data):
        '''
        从列表页获取详情页url; 返回列表
        '''
        detail_page_urls = []

        if data is not None:
            url = data.response.request.url
            #包含详情页链接和时间的模块，一般以//tr、//li、//div等结尾
            #例：loops = data.xpathall('''//div[contains(@class,"news-list2")]//li''')
            loops = data.xpathall('''？''')
            for item in loops: #这时的item可以看做将解析到的模块当做新的网页打开，页面内只有解析到的模块，以解析的模块为顶级节点进行解析，而不再是<html>标签

                #此处不要再出现解析模块的Xpath，继续向子节点解析即可
                #模块内解析详情页链接，一般以//@href结尾
                #例：post_url = item.xpath('''//h3//a//@href''').text().strip()
                post_url = item.xpath('''？''').text().strip()
                if not post_url:
                    continue
                post_url = urljoin(url,post_url)
                #模块内解析详情页时间，一般以//span、//div、//p等结尾，如没有则删除这三行
                #例：ctime = item.xpath('''//div[@class="tail"]//span[1]''').datetime()
                ctime = item.xpath('''？''').datetime()
                if ctime < self.c_time:
                    continue
                self.page_url[post_url]=url
                detail_page_urls.append(post_url)
            detail_page_urls = set(detail_page_urls)
        return detail_page_urls

    def get_detail_page_info(self, data):
        '''

        解析详情页信息；参数data可直接调用xpath,re等方法；
        返回值为字典类型
        '''

        url = data.response.request.url
        result = []
        pic_urls = []

        #详情页内解析标题，一般以//text()结尾
        #title = data.xpath('''//div[@class="layout"]//h2//text()''').text().strip()
        title = data.xpath('''？''').text().strip()

        gtime = datetime.datetime.utcnow()
        #详情页内解析包含发布时间的模块，一般以//span、//div、//p结尾
        #ctime = data.xpath('''//div[@class="layout"]//div[@class="left"]''')
        ctime = data.xpath('''？''').datetime()
        if ctime < self.c_time:
            return None
        #详情页解析当前页面所属频道的面包屑，一般以//text()结尾
        #source = data.xpath('''//div[@class="add"]//a[last()]//text()''')
        channel = data.xpath('''？''').text().strip()

        #详情页解析作者，一般以//text()结尾
        #source = data.xpath('''//div[@id="xl-headline"]//div[@class="left"]//text()''')
        source = data.xpath('''？''').text().replace('作者：','').strip() or self.siteName

        #详情页解析来源，一般以//text()结尾，如没有，此字段=''
        # retweeted_source = data.xpath('''//div[@id="xl-headline"]//div[@class="left"]//text()''')
        retweeted_source = data.xpath('''？''').text().strip() or self.siteName

        #详情页解析来源链接，一般以//@href结尾，如没有，此字段=''
        #例：retweeted_status_url = data.xpath('''''')
        retweeted_status_url =  ''

        list_page_url = self.page_url.get(url,'')

        content = ''
        content_xml = ''
        #详情页解析正文，一般以//p、//div、//span等结尾
        #例：content1 = data.xpathall('''//div[@class="news-con"]''')
        content1 = data.xpathall('''？''')
        for item in content1:
            #此处填写需要排除的（不进行解析）元素，如有多个以‘|’分隔开，如没有，请将‘|？’删除，注意：‘//script’不要删除！
            #例：content_str = self.clear_special_xp(item,'''//script''')
            content_str = self.clear_special_xp(item,'''//script|？''')
            content += content_str.text().strip()
            content_xml += content_str.data.encode('utf-8')

        content = title if not content else content
        #此处解析真跟捏的图片，内容与上方content1后面填写的内容一致，后面的‘//img//@src’不要删除
        #例：pic_urls_list = data.xpathall('''//div[@class="news-con"]//img//@src''')
        pic_urls_list = data.xpathall('''(？)//img//@src''')
        if pic_urls_list:
            for i in pic_urls_list:
                i = i.text().strip()
                i = urljoin(url,i)
                pic_urls.append(i)

        post = {
            'title':title,
            'gtime':gtime,
            'ctime':ctime,
            'source':source,
            'retweeted_source':retweeted_source,
            'channel':channel,
            'list_page_url':list_page_url,
            'siteName':self.siteName+'-'+channel,
            'url':url,
            'content':content,
            'content_xml': content_xml,
            }
        if pic_urls:
            post.update({"pic_urls":pic_urls})
        if channel == self.siteName:
            post.update({"siteName":self.siteName})
        if retweeted_status_url:
            post.update({'retweeted_status_url':retweeted_status_url})
        result.append(post)
        return result




if __name__ == "__main__":
    spider = MySpider()
    spider.proxy_enable = False
    spider.init_dedup()
    spider.init_downloader()

    #此处选填单测功能的详情页完整链接，仅用于测试上方的get_detail_page_info()函数
    #例：url = 'http://news.dzwww.com/guoneixinwen/201806/t20180629_17546125.htm'
    url = '？'
    resp = spider.download(url)
    # resp.encoding="utf-8"
    res = spider.parse_detail_page(resp, url)

    if res is not None:
        for item in res:
            for k, v in item.iteritems():
                print k,v

                      </pre>
                            </div>
                        </div>
                        <div class="tab-pane" id="tab_7-2">
                            <div class="box-body">
                      <pre style="font-weight: 600;">
        #coding=utf-8
#############################################################################
# Copyright (c) 2018  - Beijing Intelligent Star, Inc.  All rights reserved

'''
文件名：zb_yang_businesstimes_news.py
功能：财经时报
代码历史：
2018-10-04：杨松柏  代码创建
'''

'''只需修改get_detail_page_urls函数，其他部分与最新模板一样，无需修改'''

import datetime
import htmlparser
import re
import copy
import requests
from urlparse import urljoin
import spiderDefault

class MySpider(spiderDefault.Spider):

    def __init__(self, cmd_args=None):
        spiderDefault.Spider.__init__(self, cmd_args=cmd_args)
        #类别码，01新闻、02论坛、03博客、04微博 05平媒 06微信  07 视频、99搜索引擎
        self.info_flag = "01"
        self.siteName = "财经时报"
        self.site_domain = 'businesstimes.cn'

        self.start_urls = [

            'http://www.businesstimes.cn/',#财经时报
            'http://www.businesstimes.cn/auto',#汽车

        ]

        self.encoding = 'utf-8'
        self.c_time = datetime.datetime.utcnow()-datetime.timedelta(days=3)
        self.page_url = {}
        self.max_interval = datetime.timedelta(days=3)
        #self.dedup_uri=None

    def clear_special_xp(self,data,xp):
        #'''删除指定xpath数据'''
        data = copy.copy(data)
        result = data._root.xpath(xp)
        for i in result:
            try:
                i.getparent().remove(i)
            except Exception as e:
                log.logger.error(e)
        return data

    def get_detail_page_urls(self, data):
        '''
        从列表页获取详情页url; 返回列表
        '''
        detail_page_urls = set()

        if data is not None:
            url = data.response.request.url
            '''
            regex(reg, default="")，使用正则表达式re解析data数据
            返回使用参数re解析data后的结果，如果有多个结果，返回第一个解析结果

            regexhall(reg)，功能同regex()
            不同之处为该函数返回所有符合条件的结果；
            如果解析无结果，返回 []
            '''
            loops = data.regexall('''href=["'](/articles/\d+/\d{8}/(?:\w+-)*\w+\.html?)["']''')
            #也可使用python内建模板re
            # loops = re.findall('''href=["'](/articles/\d+/\d{8}/(?:\w+-)*\w+\.html?)["']''',data.data)
            for item in loops:
                post_url = item.text().strip()
                if not post_url:
                    continue
                post_url = urljoin(url,post_url)
                if self.site_domain not in post_url:
                    continue
                try:
                    c_time = item.regex('''/(\d{8})/''').text()
                    ctime = datetime.datetime.strptime(c_time,'%Y%m%d') - datetime.timedelta(hours=8)
                except:
                    ctime = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
                if ctime < self.c_time:
                    continue
                self.page_url[post_url]=url
                detail_page_urls.add(post_url)
        return detail_page_urls

    def get_detail_page_info(self, data):
        '''

        解析详情页信息；参数data可直接调用xpath,re等方法；
        返回值为字典类型
        '''
        if not data:
            return None

        url = data.response.request.url
        result = []
        pic_urls = []
        gtime = datetime.datetime.utcnow()
        list_page_url = self.page_url.get(url,'')

        channel =data.xpath('''//div[@class="section-title flex-xs jc-sb"]//div[@class="flex-1"]//text()''').text().replace('所在位置：','').strip()
        # channel = self.channel_dict.get(url,'')
        channel = self.siteName if not channel or '首页' in channel else channel
        # channel = self.siteName

        title = data.xpath('''//main//h1''').text().strip()

        source = data.xpath('''//div[@class="byline"]//a''').text().strip()
        # if re.search('作者：(.*)',source,re.S):
        #     source = re.search('作者：(.*)',source,re.S).group(1).strip()
        # else:
        #     source = self.siteName
        source = self.siteName if not source else source
        # source = self.siteName

        # retweeted_source = data.xpath('''//div[@class="info"]''').text().strip()
        retweeted_source = self.siteName

        # if data.xpath('''//div[@class="info_from_wrap"]//a[1]//@href'''):
        #     retweeted_status_url = urljoin(url,data.xpath('''//div[@class="info_from_wrap"]//a[1]//@href''').text())
        # else:
        #     retweeted_status_url = ''
        retweeted_status_url = ''

        ctime = data.xpath('''//div[contains(@class,"page-content")]//time''').datetime()
        ctime=gtime-datetime.timedelta(minutes=1) if ctime>=gtime else ctime

        delete_xpath = '''//script'''
        data = self.clear_special_xp(data,delete_xpath)

        content = content_xml = ''
        list_content = data.xpathall('''//div[contains(@class,"page-content")]//p''')
        for i in list_content:
            content_xml += i.data.encode('utf-8')
            str_content = self.clear_special_xp(i,'''//script|//style''')
            content += str_content.text().strip()

        picurls= data.xpathall('''(//div[contains(@class,"imageBox")])//img//@src''')
        if picurls:
            for i in picurls:
                pic = i.text().strip()
                pic = urljoin(url,pic)
                pic_urls.append(pic)

        content=title if not content else content

        post = {'title':title,
                'gtime':gtime,
                'ctime':ctime,
                'source':source,
                'retweeted_source':retweeted_source,
                'channel':channel,
                'siteName':self.siteName+'-'+channel,
                'url':url,
                'content':content,
                }
        if pic_urls:
            post.update({"pic_urls":pic_urls})
        if channel == self.siteName:
            post.update({"siteName":self.siteName})
        if content_xml:
            post.update({'content_xml':content_xml})
        if list_page_url:
            post.update({'list_page_url':list_page_url})
        if retweeted_status_url:
            post.update({'retweeted_status_url':retweeted_status_url})
        result.append(post)
        return result


if __name__ == "__main__":
    spider = MySpider()
    spider.proxy_enable = False
    spider.init_dedup()
    spider.init_downloader()


    # ------------ parse() ----------
    #     url = 'http://www.baotuowang.com/forum.php?mod=forumdisplay&fid=48&filter=author&orderby=dateline'
    #     resp = spider.download(url)
    #
    #     urls, fun, next_url = spider.parse(resp)
    #     for url in urls:
    #         print url


    url = 'http://www.businesstimes.cn/articles/124851/20181025/hy.htm'
    resp = spider.download(url)
    #    resp.encoding="utf-8"
    # print resp.url
    # print resp.text
    res = spider.parse_detail_page(resp, url)

    if res is not None:
        for item in res:
            for k, v in item.iteritems():
                print k,v
                      </pre>
                            </div>
                        </div>
                    </div>
                    <!-- /.tab-content -->
                </div>
                <!-- nav-tabs-custom -->
            </div>


        </div>
        <!-- /.box -->
    </div>
    <!-- /.col -->
</div>
<!-- /.row -->

<script>
    function changeColor(id, color) {
        //$(id).parent().parent().parent().children().children().children().find("li[color='green']");
        $(id).parent().parent().addClass("active");
        $(id).children().css("color", color);
    }

    $('#myTasks').addClass("active");
    changeColor("#novice_spider", "#00FF7F");
</script>


{% endblock section_content %}