﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using Lucene.Net.Store;
using System.IO;
using Lucene.Net.Index;
using Lucene.Net.Analysis.PanGu;
using System.Net;
using System.Text;
using mshtml;
using Lucene.Net.Documents;
using System.Xml.Linq;
using System.Text.RegularExpressions;
using log4net;

public partial class test2 : System.Web.UI.Page
{
    private ILog logger = LogManager.GetLogger(typeof(test2));
    protected void Page_Load(object sender, EventArgs e)
    {

    }

    protected void Button1_Click(object sender, EventArgs e)
    {
        logger.Debug("开始");
        string indexPath = "c:/index";
        FSDirectory directory = FSDirectory.Open(new DirectoryInfo(indexPath), new NativeFSLockFactory());
        bool isUpdate = IndexReader.IndexExists(directory);
        if (isUpdate)
        {
            //如果索引目录被锁定（比如索引过程中程序异常退出），则首先解锁
            if (IndexWriter.IsLocked(directory))
            {
                IndexWriter.Unlock(directory);
            }
        }
        IndexWriter writer = new IndexWriter(directory, new PanGuAnalyzer(), !isUpdate, Lucene.Net.Index.IndexWriter.MaxFieldLength.UNLIMITED);
        WebClient wc = new WebClient();
        wc.Encoding = Encoding.UTF8;//否则下载的是乱码

        //todo：读取rss，获得第一个item中的链接的编号部分就是最大的帖子编号

        int maxId = GetMaxId();
        for (int i = 1; i <= maxId; i++)
        {
            string url = "http://localhost:32768/detail.aspx?id=" + i + ".aspx";
            string html = wc.DownloadString(url);

            //HTMLDocumentClass对页面html进行解析的类，IE就是用它进行解析的
            HTMLDocumentClass doc = new HTMLDocumentClass();
            doc.designMode = "on"; //不让解析引擎去尝试运行javascript
            //html为要解析的代码
            doc.IHTMLDocument2_write(html);
            doc.close();

            string title = doc.title;
            string body = doc.body.innerText;//去掉标签，只取其中的text内容

            //为避免重复索引，所以先删除number=i的记录，再重新添加
            writer.DeleteDocuments(new Term("number", i.ToString()));

            Document document = new Document();
            //只有对需要全文检索的字段才ANALYZED（分词）
            document.Add(new Field("number", i.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
            document.Add(new Field("title", title, Field.Store.YES, Field.Index.NOT_ANALYZED));
            document.Add(new Field("body", body, Field.Store.YES, Field.Index.ANALYZED, Lucene.Net.Documents.Field.TermVector.WITH_POSITIONS_OFFSETS));
            writer.AddDocument(document);
            logger.Debug("索引"+i+"完毕");
        }
        writer.Close();
        directory.Close();//不要忘了Close，否则索引结果搜不到
        logger.Debug("全部索引完毕");
        logger.Debug("结束");
    }

    #region 读取rss的xml文件  获得最大id号
    /// <summary>
    /// 读取rss的xml文件  获得最大id号
    /// </summary>
    /// <returns></returns>
    public int GetMaxId()
    {
        XDocument xdoc = XDocument.Load("http://localhost:32768/rss.aspx");
        XElement channel = xdoc.Root.Element("channel");
        XElement firstItem = channel.Elements("item").First();
        XElement link = firstItem.Element("link");
        Match m = Regex.Match(link.Value, @"detail\.aspx?id\=(\d+)");
        int maxid = Convert.ToInt32(m.Groups[2].Value);//正则表达式中的数组的下标起始为1
        return maxid;
    } 
    #endregion
}