Java Lucene入门

一、lucene版本:7.2.1

pom文件:java

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.example</groupId>
    <artifactId>demo-lucene</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <packaging>war</packaging>

    <name>demo-lucene</name>
    <description>Demo project for Spring Boot</description>

    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.1.0.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
        <java.version>1.8</java.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
        </dependency>

        <!-- Lucene核心库 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-core</artifactId>
            <version>7.2.1</version>
        </dependency>
        <!-- Lucene解析库 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-queryparser</artifactId>
            <version>7.2.1</version>
        </dependency>
        <!-- Lucene附加的分析库 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-analyzers-common</artifactId>
            <version>7.2.1</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>


</project>

二、代码以下:

package com.example.demo;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

import java.io.IOException;
import java.nio.file.Paths;

/**
 * Created by 1 on 2018/11/20.
 */
public class DemoLucene {

    public static void main(String[] args) throws IOException {
        String path = "D:\\lucene\\index";

        Directory directory = FSDirectory.open(Paths.get(path));
        Analyzer analyzer = new StandardAnalyzer();
        IndexWriterConfig config = new IndexWriterConfig(analyzer);
        config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
        IndexWriter indexWriter = new IndexWriter(directory, config);

        Document document1 = new Document();
        document1.add(new StringField("name", "张三", Field.Store.YES));
        document1.add(new StringField("no", "1001", Field.Store.YES));
        document1.add(new TextField("content", "中心小学的张三是个喜欢学习的学生", Field.Store.YES));
        indexWriter.addDocument(document1);

        Document document2 = new Document();
        document2.add(new StringField("name", "李四", Field.Store.YES));
        document2.add(new StringField("no", "1002", Field.Store.YES));
        document2.add(new TextField("content", "中心小学的李四是个期末考试成绩很好的学生", Field.Store.YES));
        indexWriter.addDocument(document2);

        Document document3 = new Document();
        document3.add(new StringField("name", "王五", Field.Store.YES));
        document3.add(new StringField("no", "1003", Field.Store.YES));
        document3.add(new TextField("content", "南宁市中心小学的王五在班级里是个班长", Field.Store.YES));
        indexWriter.addDocument(document3);

        indexWriter.close();

        DirectoryReader directoryReader = DirectoryReader.open(directory);
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);
        Query query = new TermQuery(new Term("content", "班长"));
        System.out.println("查询语句 = " + query);
        TopDocs topDocs = indexSearcher.search(query, 100);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if(scoreDocs.length == 0){
            System.out.println("未找到数据");
        }else{
            for (int i = 0; i < scoreDocs.length; i++) {
                Document docResult = indexSearcher.doc(scoreDocs[i].doc);
                System.out.println(String.format("======================  第%d条  ======================", i + 1));
                System.out.println("name:" + docResult.get("name"));
                System.out.println("no:" + docResult.get("no"));
                System.out.println("content:" + docResult.get("content"));
            }
        }

    }

}

一、Directory是个抽象类,其子类的BaseDirectory也是抽象类。git

  BaseDirectory的子类:github

    RAMDirectory(普通类):把索引建立到内存web

    FSDirectory(抽象类):把索引建立到磁盘spring

本文使用FSDirectory。经过FSDirectory的静态方法open能够建立一个目录。看看open方法:apache

FSDirectory.open方法返回FSDirectory抽象类的其中一个子类,这里根据状况会返回一个MMapDirectory。maven

三、Lucene经常使用查询的Query抽象类

  

一、抽象类Query有不少子类如上图,这里只列举经常使用的类。函数

  一、PhraseQuery类:用于查询英文单词,用法以下。

        PhraseQuery.Builder builder = new PhraseQuery.Builder();
        builder.add(new Term("content", "tom"));
        builder.add(new Term("content", "is"));
        builder.add(new Term("content", "student"));
        System.out.println("查询语句 = " + builder.build());
        TopDocs topDocs = indexSearcher.search(builder.build(), 100);

  二、WildcardQuery类:用于通配符查询,用法以下。

        WildcardQuery wildcardQuery = new WildcardQuery(new Term("content", "中心小学*"));
        System.out.println("查询语句 = " + wildcardQuery);
        TopDocs topDocs = indexSearcher.search(wildcardQuery, 100);

  三、FuzzyQuery类:用于模糊查询,用法以下:

        FuzzyQuery fuzzyQuery = new FuzzyQuery(new Term("name", "王健林"), 2, 3);
        System.out.println("查询语句 = " + fuzzyQuery);
        TopDocs topDocs = indexSearcher.search(fuzzyQuery, 100);

    这里的FuzzyQuery使用了3个参数的构造函数:spring-boot

  public FuzzyQuery(Term term, int maxEdits, int prefixLength) {
    this(term, maxEdits, prefixLength, defaultMaxExpansions, defaultTranspositions);
  }

    maxEdits:最大编辑距离(该参数范围是0-2,默认值是2,编辑指:新增一个字符、修改一个字符、删除一个字符)学习

      举个例子,上面的模糊查询参数是“王健林”,maxEdits=2,那么将匹配下列内容:

        一、王健林首富(最大编辑距离maxEdits=2,在“王健林”后面作2次新增字符分别是“首”、“富”)

        二、王力宏(最大编辑距离maxEdits=2,把“健”、“林”分别修改成“力”、“宏”)

        三、王(最大编辑距离maxEdits=2,把“健”、“林”分别删除)

        四、王五(最大编辑距离maxEdits=2,把“健”修改成“五”,把“林”删除)

        等等,能够看出最大编辑距离maxEdits=2,那么将随机组合“增、删、改”字符2次后是否匹配。

    prefixLength:前缀长度(默认值是0,当prefixLength不为0则表示前面多少个字符必需要匹配)

      举个例子:
        FuzzyQuery fuzzyQuery = new FuzzyQuery(new Term("name", "王健林"), 2, 2):那么必须匹配“王健”开头的内容,
        FuzzyQuery fuzzyQuery = new FuzzyQuery(new Term("name", "王健林"), 2, 3):那么必须匹配“王健林”开头的内容,可是没法匹配“王健林”这条记录,若是知道的话也告诉我为何。

  四、BooleanQuery类:用于组合查询,用法以下:

        BooleanQuery.Builder builder = new BooleanQuery.Builder();
        builder.add(new TermQuery(new Term("content", "首富")), BooleanClause.Occur.MUST);
        builder.add(new TermQuery(new Term("content", "影院")), BooleanClause.Occur.MUST);
        System.out.println("查询语句 = " + builder.build());
        TopDocs topDocs = indexSearcher.search(builder.build(), 100);

    Occur是个枚举有如下值:

    MUST、MUST_NOT、FILTER、SHOULD

  五、IntPoint、LongPoint、FloatPoint、DoublePoint类:用于数值范围查询,用法以下:

      newRangeQuery:整型范围查询

      newExactQuery:整型精确查询

        Query query = null;
        query = IntPoint.newRangeQuery("number", 1, 10);//范围查询
        query = IntPoint.newExactQuery("number", 5);//精确查询
        System.out.println("查询语句 = " + query);
        TopDocs topDocs = indexSearcher.search(query, 20);

  六、TermQuery类:用于词条查询,用法以下:

        Query query = null;
        query = new TermQuery(new Term("name", "张三"));
        System.out.println("查询语句 = " + query);
        TopDocs topDocs = indexSearcher.search(query, 20);

 四、Lucene经常使用的Field

  TextField:Reader或String索引全文搜索
  StringField:将String逐字索引做为单个标记
  IntPoint:int为精确/范围查询创建索引。
  LongPoint:long为精确/范围查询创建索引。
  FloatPoint:float为精确/范围查询创建索引。
  DoublePoint:double为精确/范围查询创建索引。
  SortedDocValuesField:byte[]逐列索引,用于排序/分面
  SortedSetDocValuesField:SortedSet<byte[]>逐列索引,用于排序/分面
  NumericDocValuesField:long逐列索引,用于排序/分面
  SortedNumericDocValuesField:SortedSet<long>逐列索引,用于排序/分面
  StoredField:仅用于在摘要结果中检索的存储值
  
  经常使用的Field以下:
    TextField:索引、分词
    StringField:索引
    StoredField:存储值
  
        Document document = new Document();
        //建立StringField,使用Field.Store.YES枚举代表要存储该值
        document.add(new StringField("name", "王健林", Field.Store.YES));
        //建立StringField,使用Field.Store.YES枚举代表要存储该值
        document.add(new StringField("no", "1006", Field.Store.YES));
        //建立IntPoint
        document.add(new IntPoint("number", 6));
        //若要存储该IntPoint的值,则添加同名的StoredField
        document.add(new StoredField("number", 6));
        //若要排序该IntPoint的值,则添加同名的SortedNumericDocValuesField
        document.add(new SortedNumericDocValuesField("number", 6L));
        //建立TextField,使用Field.Store.YES枚举代表要存储该值
        document.add(new TextField("content", "王健林是万达集团的董事长,下有万达影院、万达酒店、万达广场等产业", Field.Store.YES));
 

 五、Lucene经常使用的Analyzer(分词器)

 一、StandardAnalyzer分词器  

  当你运行文章开头的demo,查询字段为"content",查询的值为"班长",实际上应该查到document3这个对象的数据,由于已经添加了类型为TextField的字段。而且根据上面说的TextField是建立分词,建立索引。建立分词后,"班长"这个词应该能够查询的到。为何查询不到数据?这里和分词器有关。

  Document document3 = new Document();
  document3.add(new StringField("name", "王五", Field.Store.YES));
  document3.add(new StringField("no", "1003", Field.Store.YES));
  document3.add(new TextField("content", "南宁市中心小学的王五在班级里是个班长", Field.Store.YES));
  indexWriter.addDocument(document3);  
  
  
文章开头的demo用的分词器是StandardAnalyzer分词器,看名字就知道是标准分词器。
        Analyzer analyzer = new StandardAnalyzer();
  如今把这个分词器进行上述的"南宁市中心小学的王五在班级里是个班长"这段文本进行分词,看看结果。
    public static void main(String[] args) throws IOException {
        StandardAnalyzer();
    }

    public static void StandardAnalyzer() throws IOException {
        String text = "南宁市中心小学的王五在班级里是个班长";
        Analyzer analyzer = new StandardAnalyzer();
        TokenStream tokenStream= analyzer.tokenStream("word",text);
        tokenStream.reset();
        CharTermAttribute charTermAttribute=tokenStream.addAttribute(CharTermAttribute.class);
        System.out.println("==========StandardAnalyzer分词开始==========");
        while(tokenStream.incrementToken()){
            System.out.print(String.format("[%s] ", charTermAttribute.toString()));
        }
        System.out.println("");
        System.out.println("==========StandardAnalyzer分词结束==========");
    }

  结果以下:

==========StandardAnalyzer分词开始==========
[南] [宁] [市] [中] [心] [小] [学] [的] [王] [五] [在] [班] [级] [里] [是] [个] [班] [长] 
==========StandardAnalyzer分词结束==========
  能够看到,StandardAnalyzer这个标准分词器是一个字符一个字符来分词,因此当查询"班长"这个词的时候查不到。

  二、IKAnalyzer分词器(开源,github搜索)

  这是个开源的分词器(github地址:https://github.com/wks/ik-analyzer),如今来看看这个分词器的结果。
    public static void IKAnalyzer() throws IOException {
        String text = "南宁市中心小学的王五在班级里是个班长";
        Analyzer analyzer = new IKAnalyzer();
        TokenStream tokenStream= analyzer.tokenStream("word",text);
        tokenStream.reset();
        CharTermAttribute charTermAttribute=tokenStream.addAttribute(CharTermAttribute.class);
        System.out.println("==========IKAnalyzer分词开始==========");
        while(tokenStream.incrementToken()){
            System.out.print(String.format("[%s] ", charTermAttribute.toString()));
        }
        System.out.println("");
        System.out.println("==========IKAnalyzer分词结束==========");
    }

  结果以下:

==========IKAnalyzer分词开始==========
[南宁市] [南宁] [市中心] [中心小学] [中心] [小学] [的] [王] [五] [在] [班级] [里] [是] [个] [班长] 
==========IKAnalyzer分词结束==========

  能够看到这个IKAnalyzer分词器能识别词语来分词。因此只须要对文章开头的demo代码把StandardAnalyzer换成IKAnalyzer。

        Directory directory = FSDirectory.open(Paths.get(path));
        //这里原来是StandardAnalyzer分词器,换为IKAnalyzer分词器
        Analyzer analyzer = new IKAnalyzer();
        IndexWriterConfig config = new IndexWriterConfig(analyzer);
        config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
        IndexWriter indexWriter = new IndexWriter(directory, config);

  这样就能查询到文章开头的demo的数据。

相关文章
相关标签/搜索