- UID
- 580
- 精华
- 积分
- 561
- 威望
- 点
- 宅币
- 个
- 贡献
- 次
- 宅之契约
- 份
- 最后登录
- 1970-1-1
- 在线时间
- 小时
|
基于Lucene.net3.03 加 ICTCLAS2014实现的站内搜索引擎
Lucene.net是一个搜索引擎的框架,它自身并不能实现搜索,需要我们自己在其中实现索引的建立,索引的查找。所有这些都是根据它自身提供的API来实现。Lucene.net本身是基于java的,但是经过翻译成.ne版本的,可以在ASP.net中使用这个来实现站内搜索。
要实现基于汉语的搜索引擎,首先的要实现汉语的分词。目前网上大部分都是利用已经有的盘古分词来实现的分词系统,但是盘古分词效果不太好。在这里我把最新的ICTCLAS2014嵌入到Lucene.net中。Lucene.net中所有的分词系统都是基于Analyzer类来继承实现的。所以如果要使用ICTCLAS2014嵌入到Lucene.net中,就必要要继承Analyzer类实现自己的分词类。
1 ICTCLAS的引入
首先我们要把ICTCLAS的dll引入到C#文件中 ,因为这个dll不是在C#中建立的类库,所以无法直接将其加入到C#的引用中。我们考虑使用下面的方法来实现,为了方便,我们把引入的函数以及结构体放入一个类中。如下所示:- using System;
- using System.Collections.Generic;
- using System.Linq;
- using System.Text;
- using System.Runtime.InteropServices;
- using Lucene.Net.Analysis;
- namespace Lucene.Net.Analysis.DChinese
- {
- [StructLayout(LayoutKind.Explicit)]
- public struct result_t
- {
- [FieldOffset(0)]
- public int start;
- [FieldOffset(4)]
- public int length;
- [FieldOffset(8)]
- public int sPos1;
- [FieldOffset(12)]
- public int sPos2;
- [FieldOffset(16)]
- public int sPos3;
- [FieldOffset(20)]
- public int sPos4;
- [FieldOffset(24)]
- public int sPos5;
- [FieldOffset(28)]
- public int sPos6;
- [FieldOffset(32)]
- public int sPos7;
- [FieldOffset(36)]
- public int sPos8;
- [FieldOffset(40)]
- public int sPos9;
- [FieldOffset(44)]
- public int sPos10;
- //[FieldOffset(12)] public int sPosLow;
- [FieldOffset(48)]
- public int POS_id;
- [FieldOffset(52)]
- public int word_ID;
- [FieldOffset(56)]
- public int word_type;
- [FieldOffset(60)]
- public double weight;
- }
- public class SplitWord
- {
- const string path = @"NLPIR.dll";//设定dll的路径
- //对函数进行申明
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_Init", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_Init(String sInitDirPath, int encoding = 0, String sLicenceCode = null);
- //特别注意,C语言的函数NLPIR_API const char * NLPIR_ParagraphProcess(const char *sParagraph,int bPOStagged=1);必须对应下面的申明
- [DllImport(path, CharSet = CharSet.Ansi, CallingConvention = CallingConvention.Cdecl, EntryPoint = "NLPIR_ParagraphProcess")]
- public static extern IntPtr NLPIR_ParagraphProcess(String sParagraph, int bPOStagged = 1);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_Exit", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_Exit();
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_ImportUserDict", CallingConvention = CallingConvention.Cdecl)]
- public static extern int NLPIR_ImportUserDict(String sFilename);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_FileProcess", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_FileProcess(String sSrcFilename, String sDestFilename, int bPOStagged = 1);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_FileProcessEx", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_FileProcessEx(String sSrcFilename, String sDestFilename);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_GetParagraphProcessAWordCount", CallingConvention = CallingConvention.Cdecl)]
- public static extern int NLPIR_GetParagraphProcessAWordCount(String sParagraph);
- //NLPIR_GetParagraphProcessAWordCount
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_ParagraphProcessAW", CallingConvention = CallingConvention.Cdecl)]
- public static extern void NLPIR_ParagraphProcessAW(int nCount, [Out, MarshalAs(UnmanagedType.LPArray)] result_t[] result);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_AddUserWord", CallingConvention = CallingConvention.Cdecl)]
- public static extern int NLPIR_AddUserWord(String sWord);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_SaveTheUsrDic", CallingConvention = CallingConvention.Cdecl)]
- public static extern int NLPIR_SaveTheUsrDic();
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_DelUsrWord", CallingConvention = CallingConvention.Cdecl)]
- public static extern int NLPIR_DelUsrWord(String sWord);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_NWI_Start", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_NWI_Start();
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_NWI_Complete", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_NWI_Complete();
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_NWI_AddFile", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_NWI_AddFile(String sText);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_NWI_AddMem", CallingConvention = CallingConvention.Cdecl)]
- public static extern bool NLPIR_NWI_AddMem(String sText);
- [DllImport(path, CharSet = CharSet.Ansi, CallingConvention = CallingConvention.Cdecl, EntryPoint = "NLPIR_NWI_GetResult")]
- public static extern IntPtr NLPIR_NWI_GetResult(bool bWeightOut = false);
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_NWI_Result2UserDict", CallingConvention = CallingConvention.Cdecl)]
- public static extern uint NLPIR_NWI_Result2UserDict();
- [DllImport(path, CharSet = CharSet.Ansi, EntryPoint = "NLPIR_GetKeyWords", CallingConvention = CallingConvention.Cdecl)]
- public static extern IntPtr NLPIR_GetKeyWords(String sText, int nMaxKeyLimit = 50, bool bWeightOut = false);
- [DllImport(path, CharSet = CharSet.Ansi, CallingConvention = CallingConvention.Cdecl, EntryPoint = "NLPIR_GetFileKeyWords")]
- public static extern IntPtr NLPIR_GetFileKeyWords(String sFilename, int nMaxKeyLimit = 50, bool bWeightOut = false);
- }
- }
复制代码 这个类里面包含了所有的ICTCLAS的API函数,包含初始化,添加词语,添加词典,词典保存,分词等各种API。并且都是STATIC函数。
2 分词类DChineseAnalyzer的建立
分词类的建立我们参考StandarAnalyzer分词的实现,再次基础上实现了DChineseAnalyzer类。在分词类中实现必要的构造函数,以及- public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
- public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
复制代码 这两个函数。他们的作用是在函数中调用用分词器Tokenizer的派生类来实现分词。在现有的版本中一般是在使用分词类的时候,直接调用ReusableTokenStream函数,而不是调用TokenStream函数,这样可以做到一个分词类对象的建立可供多个分词文本的使用。从而减少内存的浪费,提高效率。
以及一些字段,利用这些字段,我们可以加入一些停用词,用户自己的词典。
3 分词器DChineseTokenizer的建立
这个类是分词的核心关键所在。我们要在其中调用ICTCLAS中的分词。在这里面要注意的一个函数是- public override bool IncrementToken()
复制代码 它是我们获取下一个分词结果要用到的函数,如果想要遍历分词结果,就要建立一个循环,不断的调用IncrementToken函数。
整个分词系统代码如下所示:- using System;
- using System.Collections.Generic;
- using System.Linq;
- using System.Text;
- using System.IO;
- using System.Runtime.InteropServices;
- using Lucene.Net.Analysis;
- using Lucene.Net.Analysis.Standard;
- using Lucene.Net.Util;
- using Lucene.Net.Documents;
- using Lucene.Net.Analysis.Tokenattributes;
- using Version = Lucene.Net.Util.Version;
- namespace Lucene.Net.Analysis.DChinese
- {
- public class DChineseAnalyzer : Analyzer
- {
- private ISet<string> stopSet;
- public static readonly ISet<string> STOP_WORDS_SET;
- private Version matchVersion;
- private bool replaceInvalidAcronym;
- private bool enableStopPositionIncrements;
- public DChineseAnalyzer(Version version, ISet<string> stopWords)
- {
- stopSet = stopWords;
- replaceInvalidAcronym = false;
- enableStopPositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(version);
- replaceInvalidAcronym = matchVersion.OnOrAfter(Version.LUCENE_24);
- this.matchVersion = version;
- }
- public DChineseAnalyzer(Version version)
- : this(version, STOP_WORDS_SET)
- {
- }
- public DChineseAnalyzer(Version version, System.IO.FileInfo stopWords)
- : this(version, WordlistLoader.GetWordSet(stopWords))
- {
- }
- static DChineseAnalyzer()
- {
- STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
- }
- public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
- {
- TokenStream result = new DChineseTokenizer(matchVersion, reader);
- result = new LowerCaseFilter(result);
- result = new StopFilter(enableStopPositionIncrements, result, stopSet);
- result = new PorterStemFilter(result);
- return result;
- }
- private class SavedStreams
- {
- protected internal DChineseTokenizer source;
- protected internal TokenStream result;
- };
- public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
- {
- SavedStreams streams = (SavedStreams)PreviousTokenStream;
- if (streams == null)
- {
- streams = new SavedStreams();
- streams.source = new DChineseTokenizer(matchVersion, reader);
- streams.result = new LowerCaseFilter(streams.source);
- streams.result = new StopFilter(enableStopPositionIncrements, streams.result, stopSet);
- streams.result = new PorterStemFilter(streams.result);
- PreviousTokenStream = streams;
- }
- else
- {
- streams.source.Reset(reader);
- }
- streams.source.SetReplaceInvalidAcronym(replaceInvalidAcronym);
- return streams.result;
- }
- }
- public sealed class DChineseTokenizer : Tokenizer
- {
- private bool m_replaceInvalidAcronym;
- private int offset = 0;
- private int bufferIndex = 0;
- private int dataLen = 0;
- private const int MAX_WORD_LEN = 255;
- private const int IO_BUFFER_SIZE = 4096;
- private readonly char[] ioBuffer = new char[IO_BUFFER_SIZE];
- private ITermAttribute termAtt;
- private IOffsetAttribute offsetAtt;
- private IPositionIncrementAttribute posIncrAtt;
- private void Init(System.IO.TextReader input, Version matchVersion)
- {
- if (matchVersion.OnOrAfter(Version.LUCENE_24))
- {
- m_replaceInvalidAcronym = true;
- }
- else
- {
- m_replaceInvalidAcronym = false;
- }
- //this.input = input;
- this.input = ChangeInput(input);
- termAtt = AddAttribute<ITermAttribute>();
- offsetAtt = AddAttribute<IOffsetAttribute>();
- posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
- }
- public DChineseTokenizer(Version matchVersion, System.IO.TextReader input)
- : base()
- {
- Init(input, matchVersion);
- }
- public DChineseTokenizer(Version matchVersion, System.IO.TextReader input, AttributeSource source)
- : base(source)
- {
- Init(input, matchVersion);
- }
- public DChineseTokenizer(Version matchVersion, System.IO.TextReader input, AttributeFactory factory)
- : base(factory)
- {
- Init(input, matchVersion);
- }
- public override bool IncrementToken()
- {
- ClearAttributes();
- int length = 0;
- int start = bufferIndex;
- char[] buffer = termAtt.TermBuffer();
- while (true)
- {
- if (bufferIndex >= dataLen)
- {
- offset += dataLen;
- dataLen = input.Read(ioBuffer, 0, ioBuffer.Length);
- if (dataLen <= 0)
- {
- dataLen = 0;
- if (length > 0)
- break;
- return false;
- }
- bufferIndex = 0;
- }
- char c = ioBuffer[bufferIndex++];
- if (!System.Char.IsWhiteSpace(c))
- {
- if (length == 0)
- {
- start = offset + bufferIndex - 1;
- }
- else if (length == buffer.Length)
- {
- buffer = termAtt.ResizeTermBuffer(1 + length);
- }
- buffer[length++] = c;
- if (length == MAX_WORD_LEN)
- break;
- }
- else if (length > 0)
- break;
- }
- termAtt.SetTermLength(length);
- offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + length));
- posIncrAtt.PositionIncrement = 1;
- return true;
- }
- public override void Reset()
- {
- base.Reset(input);
- bufferIndex = 0;
- offset = 0;
- dataLen = 0;
- }
- public override void Reset(TextReader input)
- {
- String inputString = input.ReadToEnd();
- IntPtr intPtr = SplitWord.NLPIR_ParagraphProcess(inputString, 0);
- string strResult = Marshal.PtrToStringAnsi(intPtr);
- this.input = new StringReader(strResult);
- bufferIndex = 0;
- offset = 0;
- dataLen = 0;
- }
- public override void End()
- {
- int finalOffset = CorrectOffset(offset);
- offsetAtt.SetOffset(finalOffset, finalOffset);
- }
- public void SetReplaceInvalidAcronym(bool replaceInvalidAcronym)
- {
- this.m_replaceInvalidAcronym = replaceInvalidAcronym;
- }
- private TextReader ChangeInput(TextReader input)
- {
- //string indexPath = System.Environment.CurrentDirectory;
- //string indexPath = GetType().Assembly.Location;
- //string indexPath = System.IO.Path.GetDirectoryName(Page.Request.PhysicalPath);
- //string dirParent = Directory.GetParent(indexPath).Parent.FullName;
- string dirParent = System.AppDomain.CurrentDomain.BaseDirectory;
-
- bool bInit = SplitWord.NLPIR_Init(dirParent, 0, null);
- if (!bInit)
- {
- return null;
- }
- String inputString = input.ReadToEnd();
- IntPtr intPtr = SplitWord.NLPIR_ParagraphProcess(inputString, 0);
- string strResult = Marshal.PtrToStringAnsi(intPtr);
- return new StringReader(strResult);
- }
- }
- }
复制代码 (2) 索引,查找
分词系统建立完毕,这是基础也是核心,后面我们建立索引要用到分词系统。下面依次讲解索引的建立,索引的查找。
索引的建立采用的是倒排序,原理就是遍历所有的文本,对其进行分词,然后把分的词汇建立索引表。形式类似如下:
词汇 出现词汇的篇章1,篇章2,篇章3……
建立索引的时候要注意这样的Document,Field这俩术语。Document代表的是一个文档,它里面包含一个或者多个Filed,Field表示的就是一种域,你可以在一个Document里面添加各种各样的域,名字自己起,但是关于文档的内容一定要加进去,方式如下所示:- doc.Add(new Field("contents", str, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
复制代码 整个索引的建立如下所示:- using System;
- using System.Collections.Generic;
- using System.Linq;
- using System.Web;
- using System.IO;
- using Lucene.Net.Analysis;
- using Lucene.Net.Analysis.Standard;
- using Lucene.Net.Index;
- using Lucene.Net.Documents;
- using Lucene.Net.Search;
- using Lucene.Net.Analysis.DChinese;
- using Version = Lucene.Net.Util.Version;
- using FSDirectory = Lucene.Net.Store.FSDirectory;
- using NativeFSLockFactory = Lucene.Net.Store.NativeFSLockFactory;
- namespace WebApplication6
- {
- public class IndexFiles
- {
-
- public static bool CreateIndexFromFile(DirectoryInfo docDir, DirectoryInfo IndexDir)
- {
- string strUserDicPath = System.AppDomain.CurrentDomain.BaseDirectory;
- string strTestDic = strUserDicPath;
- HashSet<string> lstStopWords = new HashSet<string>();
- strUserDicPath = strUserDicPath + "UserDictionary\\StopWords.txt";
- string[] strs = null;
- StreamWriter sw = new StreamWriter(strTestDic + "UserDictionary\\StopTest.txt");
- using (StreamReader strReader = new StreamReader(strUserDicPath))
- {
- string strLine;
- while ((strLine = strReader.ReadLine()) != null)
- {
- strLine = strLine.Trim();
- strs = strLine.Split();
- foreach (string str in strs)
- {
- lstStopWords.Add(str);
- sw.WriteLine(str);
-
- }
- }
- strReader.Close();
- sw.Close();
- }
- bool bExist = File.Exists(docDir.FullName) || Directory.Exists(docDir.FullName);
- if (!bExist)
- {
- return false;
- }
- //using (IndexWriter writer = new IndexWriter(FSDirectory.Open(IndexDir), new DChineseAnalyzer(Version.LUCENE_30), true, IndexWriter.MaxFieldLength.LIMITED) )
- //IndexWriter writer = new IndexWriter(fsDirrctory, new StandardAnalyzer(Version.LUCENE_30), true, IndexWriter.MaxFieldLength.LIMITED);
-
- FSDirectory fsDirrctory = FSDirectory.Open(IndexDir, new NativeFSLockFactory());
- Analyzer analyzer = new DChineseAnalyzer(Version.LUCENE_30,lstStopWords);
- IndexWriter writer = new IndexWriter(fsDirrctory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
- try
- {
- IndexDirectory(writer, docDir);
- writer.Optimize();
- writer.Commit();
- }
- finally
- {
- writer.Dispose();
- fsDirrctory.Dispose();
- }
-
- return true;
- }
- internal static void IndexDirectory(IndexWriter writer, DirectoryInfo directory)
- {
- foreach (var subDirectory in directory.GetDirectories())
- IndexDirectory(writer, subDirectory);
- foreach (var file in directory.GetFiles())
- IndexDocs(writer, file);
- }
- internal static void IndexDocs(IndexWriter writer, FileInfo file)
- {
- Console.Out.WriteLine("adding " + file);
- try
- {
- writer.AddDocument(Document(file));
- }
- catch (FileNotFoundException)
- {
- // At least on Windows, some temporary files raise this exception with an
- // "access denied" message checking if the file can be read doesn't help.
- }
- catch (UnauthorizedAccessException)
- {
- // Handle any access-denied errors that occur while reading the file.
- }
- catch (IOException)
- {
- // Generic handler for any io-related exceptions that occur.
- }
- }
- public static Document Document(FileInfo f)
- {
- // make a new, empty document
- Document doc = new Document();
- // Add the path of the file as a field named "path". Use a field that is
- // indexed (i.e. searchable), but don't tokenize the field into words.
- doc.Add(new Field("path", f.FullName, Field.Store.YES, Field.Index.NOT_ANALYZED));
- // Add the last modified date of the file a field named "modified". Use
- // a field that is indexed (i.e. searchable), but don't tokenize the field
- // into words.
- doc.Add(new Field("modified", DateTools.TimeToString(f.LastWriteTime.Millisecond, DateTools.Resolution.MINUTE), Field.Store.YES, Field.Index.NOT_ANALYZED));
- // Add the contents of the file to a field named "contents". Specify a Reader,
- // so that the text of the file is tokenized and indexed, but not stored.
- // Note that FileReader expects the file to be in the system's default encoding.
- // If that's not the case searching for special characters will fail.
- string str = File.ReadAllText(f.FullName);
- //doc.Add(new Field("contents", new StreamReader(f.FullName, System.Text.Encoding.UTF8)));
- doc.Add(new Field("contents", str, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- // return the document
- return doc;
- }
- }
- }
复制代码 查找的实现:
Lucene.net中有多种多样的查找类,但是如果要实现多条件查询就要使用PhraseQuery
类。通过搜索函数把搜索结果放到容器里面。
最后结果的呈现时候,我们把搜索结果放到列表里面,如果还要显示关键词加亮,那么就需要做一点额外的工作。在这里我是通过ColorWord这个类实现的。具体的搜索代码如下所示:- using System;
- using System.Collections.Generic;
- using System.Linq;
- using System.Web;
- using System.IO;
- using Lucene.Net.Analysis;
- using Lucene.Net.Analysis.DChinese;
- using Lucene.Net.Documents;
- using Lucene.Net.QueryParsers;
- using Lucene.Net.Index;
- using Lucene.Net.Search;
- using FSDirectory = Lucene.Net.Store.FSDirectory;
- using NoLockFactory = Lucene.Net.Store.NoLockFactory;
- using Version = Lucene.Net.Util.Version;
- namespace WebApplication6
- {
- public static class SearchFiles
- {
- public static List<ItemList> SearchIndex(DirectoryInfo dirIndex, List<string> termList)
- {
- FSDirectory dirFS = FSDirectory.Open(dirIndex, new NoLockFactory());
- IndexReader reader = IndexReader.Open(dirFS,true);
- IndexSearcher searcher = new IndexSearcher(reader);
- Analyzer analyzer = new DChineseAnalyzer(Version.LUCENE_30);
- PhraseQuery query = new PhraseQuery();
- foreach (string word in termList)
- {
- query.Add( new Term("contents",word) );
- }
- query.Slop = 100;
- TopScoreDocCollector collector = TopScoreDocCollector.Create(1000, true);
- searcher.Search(query,collector);
- ScoreDoc[] hits = collector.TopDocs().ScoreDocs;
- List<ItemList> lstResult = new List<ItemList>();
- for (int i = 0; i < hits.Length; i++)
- {
- Document doc = new Document();
- doc = searcher.Doc(hits[i].Doc);
- ItemList item = new ItemList();
- //item.ItemContent = doc.Get("contents");
- item.ItemContent = ColorWord.addColor(doc.Get("contents"),termList);
- item.ItemPath = doc.Get("path");
- lstResult.Add(item);
- }
- return lstResult;
- }
- }
- }
复制代码 最终结果展示:
|
|