﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using Quartz;
using Lucene.Net.Store;
using System.IO;
using Lucene.Net.Index;
using System.Net;
using System.Text;
using mshtml;
using Lucene.Net.Documents;
using System.Xml.Linq;
using System.Text.RegularExpressions;
using log4net;
using Lucene.Net.Analysis.PanGu;

/// <summary>
///dojob 的摘要说明
/// </summary>
public class dojob:IJob
{
    private static ILog logger = LogManager.GetLogger(typeof(dojob));
	public dojob()
	{
		//
		//TODO: 在此处添加构造函数逻辑
		//
	}

    #region IJob 成员

    #region 执行任务
    /// <summary>
    /// 执行任务
    /// </summary>
    /// <param name="context"></param>
    public void Execute(JobExecutionContext context)
    {
        try
        {
            logger.Debug("开始进行索引");
            DoIndex();
            logger.Debug("进行索引结束");
        }
        catch (Exception ex)
        {
            logger.Error("启动索引任务异常", ex);
        }
    } 
    #endregion

    #endregion

    #region 启动索引  建立索引库
    /// <summary>
    /// 启动索引  建立索引库
    /// </summary>
    private void DoIndex()
    {
        string indexPath = "c:/index";
        FSDirectory directory = FSDirectory.Open(new DirectoryInfo(indexPath), new NativeFSLockFactory());
        bool isUpdate = IndexReader.IndexExists(directory);
        if (isUpdate)
        {
            //如果索引目录被锁定（比如索引过程中程序异常退出），则首先解锁
            if (IndexWriter.IsLocked(directory))
            {
                IndexWriter.Unlock(directory);
            }
        }

        IndexWriter writer = new IndexWriter(directory, new PanGuAnalyzer(), !isUpdate, Lucene.Net.Index.IndexWriter.MaxFieldLength.UNLIMITED);
        WebClient wc = new WebClient();
        wc.Encoding = Encoding.UTF8;//否则下载的是乱码
        int maxId = GetMaxId();
        for (int i = 1; i <= maxId; i++)
        {
            string url = "http://localhost:32768/detail.aspx?id=" + i;
            string html = wc.DownloadString(url);

            //HTMLDocumentClass对页面html进行解析的类，IE就是用它进行解析的
            HTMLDocumentClass doc = new HTMLDocumentClass();
            doc.designMode = "on"; //不让解析引擎去尝试运行javascript
            //html为要解析的代码
            doc.IHTMLDocument2_write(html);
            doc.close();

            string title = doc.title;
            string body = doc.body.innerText;//去掉标签，只取其中的text内容

            //为避免重复索引，所以先删除number=i的记录，再重新添加
            writer.DeleteDocuments(new Term("number", i.ToString()));

            Document document = new Document();
            //只有对需要全文检索的字段才ANALYZED（分词）
            document.Add(new Field("number", i.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
            document.Add(new Field("title", title, Field.Store.YES, Field.Index.NOT_ANALYZED));
            document.Add(new Field("body", body, Field.Store.YES, Field.Index.ANALYZED, Lucene.Net.Documents.Field.TermVector.WITH_POSITIONS_OFFSETS));
            writer.AddDocument(document);
            logger.Debug("索引" + i + "完毕");
        }
        writer.Close();
        directory.Close();//不要忘了Close，否则索引结果搜不到
        logger.Debug("全部索引完毕");
    } 
    #endregion

    #region 读取rss的xml文件  获得最大id号
    /// <summary>
    /// 读取rss的xml文件  获得最大id号
    /// </summary>
    /// <returns></returns>
    public int GetMaxId()
    {
        XDocument xdoc = XDocument.Load("http://localhost:32768/rss.aspx");
        XElement channel = xdoc.Root.Element("channel");
        XElement firstItem = channel.Elements("item").First();
        XElement link = firstItem.Element("link");
        Match m = Regex.Match(link.Value, @"detail\.aspx\?id=(\d+)");
        int maxid = Convert.ToInt32(m.Groups[1].Value);//正则表达式中的数组的下标起始为1
        return maxid;
    }
    #endregion
}