2.Solr 利用 Zookeeper 进行分布式管理,而 Elasticsearch 自身带有分布式协调管理功能。html
3.Solr 支持更多格式的数据,好比JSON、XML、CSV,而 Elasticsearch 仅支持json文件格式。java
4.Solr 官方提供的功能更多,而 Elasticsearch 自己更注重于核心功能,高级功能多有第三方插件提供node
5.Solr 在传统的搜索应用中表现好于 Elasticsearch,但在处理实时搜索应用时效率明显低于 Elasticsearch。数据库
6.Solr 是传统搜索应用的有力解决方案,但 Elasticsearch 更适用于新兴的实时搜索应用。编程
7.Solr专一于文本搜索,而Elasticsearch则经常使用于查询、过滤和分组分析统计
2.Elastic 的底层是开源库 Lucene,须要java8环境,默认9200端口json
Search:从replSet中选定node(负载策略)->请求分发 ->结果集合并api
@Before1.依赖的包
public void createConn() throws Exception {
System.out.printf("---create----------");
Settings settings = Settings.settingsBuilder().put("cluster.name", "lzxcluster").build();
client = TransportClient.builder().settings(settings).build().
addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("node01"), 9300)).
addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("node02"), 9300)).
addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("node03"), 9300));
}
@After
public void closeConn() {
System.out.printf("---close----------");
client.close();
}
@Test
public void createIndex() {
System.out.printf("---create--index--------");
//检查是否已有索引库存在
IndicesExistsResponse indicesExistsResponse = client.admin().indices().prepareExists("lzxtest").execute().actionGet();
if (indicesExistsResponse.isExists()) {
client.admin().indices().prepareDelete("lzxtest").execute();
}
Map<String, Object> sets = new HashMap<>();
//设置副本数2
sets.put("number_of_replicas", 2);
client.admin().indices().prepareCreate("lzxtest").setSettings(sets).execute();
}
@Test
public void addData(){
Map<String,Object> dataMap=new HashMap<>();
dataMap.put("name","aaa");
dataMap.put("content","wqlwx is a bad manz");
dataMap.put("size",28);
//prepareIndex(索引库,类型)
IndexResponse indexResponse=client.prepareIndex("lzxtest","testfields")
.setSource(dataMap)
.execute().actionGet();
System.out.println("id:"+indexResponse.getId());
}
@Test
public void queryData(){
QueryBuilder queryBuilder=new MatchQueryBuilder("content","lzx");
SearchResponse searchResponse=client.prepareSearch("lzxtest")
.setTypes("testfields")
.setQuery(queryBuilder)
.execute()
.actionGet();
SearchHits searchHits= searchResponse.getHits();
System.out.println("总共内容命中次数"+searchHits.getTotalHits());
for (SearchHit searchHit:searchHits){
System.out.println("单个所有内容:"+searchHit.getSourceAsString());
System.out.println("内容:"+searchHit.getSource().get("content"));
}
}
@Test
public void queryDataByPage(){
QueryBuilder queryBuilder=new MatchQueryBuilder("content","lzx");
SearchResponse searchResponse=client.prepareSearch("lzxtest")
.setTypes("testfields")
.addHighlightedField("content") //高亮
.setHighlighterPreTags("<font color=red>")
.setHighlighterPostTags("</font>")
.setQuery(queryBuilder)
.setFrom(0) //起始点
.setSize(2) //查两个
.execute()
.actionGet();
SearchHits searchHits= searchResponse.getHits();
System.out.println("总共内容命中次数"+searchHits.getTotalHits());
for (SearchHit searchHit:searchHits){
System.out.println("单个所有内容:"+searchHit.getSourceAsString());
System.out.println("内容:"+searchHit.getSource().get("content"));
System.out.println("高亮内容:"+searchHit.getHighlightFields().get("content").getFragments()[0]);
}
}
<dependency>1.Java客户端链接Elasticsearch: 客户端版本应和服务端版本一致
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<version>5.5.1</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>transport</artifactId>
<version>5.5.1</version>
</dependency>
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch-analysis-ik</artifactId>
<version>5.5.1</version>
</dependency>
/**
* cluster.name:设置ES实例的名称
* client.transport.sniff:自动嗅探整个集群的状态,把集群中其余ES节点的ip添加到本地的客户端列表中
* PreBuiltTransportClient:初始化client较老版本发生了变化,此方法有几个重载方法,初始化插件等。
* */
Settings esSettings = Settings.builder()
.put("cluster.name", clusterName)
.put("client.transport.sniff", true)
.build();
client = new PreBuiltTransportClient(esSettings);
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(ip), esPort));
/**
* 若是 ElasticSearch 服务安装了 x-pack 插件,须要 PreBuiltXPackTransportClient 实例才能访问
* */
Settings settings = Settings.builder().put("cluster.name", "xxx")
.put("xpack.security.transport.ssl.enabled", false)
.put("xpack.security.user", "xxx:xxx")
.put("client.transport.sniff", true).build();
try {
client = new PreBuiltXPackTransportClient(settings)
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("xxx.xxx.xxx.xxx"), 9300))
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("xxx.xxx.xxx.xxx"), 9300));
} catch (UnknownHostException e) {
e.printStackTrace();
}
CsdnBlog csdn=new CsdnBlog();
csdn.setTag("C");
csdn.setView("100");
csdn.setTitile("编程");
csdn.setDate(new Date().toString());
ObjectMapper mapper = new ObjectMapper();
byte[] json = mapper.writeValueAsBytes(csdn);
IndexResponse response = client.prepareIndex("fendo", "fendodate").setSource(json).get();
XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
.field("user", "ccse")
.field("postDate", new Date())
.field("message", "this is Elasticsearch").endObject();
IndexResponse response = client.prepareIndex("fendo", "fendodata").setSource(builder).get();
BulkByScrollResponse response =异步方式:
DeleteByQueryAction.INSTANCE.newRequestBuilder(client).filter(QueryBuilders.matchQuery("gender", "male"))//查询条件
.source("persons") //index(索引名)
.get(); //执行
long deleted = response.getDeleted(); //删除文档的数量
DeleteByQueryAction.INSTANCE.newRequestBuilder(client)Upsert:更新插入
.filter(QueryBuilders.matchQuery("gender", "male")) //查询
.source("persons") //index(索引名)
.execute(new ActionListener<BulkByScrollResponse>() { //回调监听
@Override
public void onResponse(BulkByScrollResponse response) {
long deleted = response.getDeleted(); //删除文档的数量
}
@Override
public void onFailure(Exception e) {
}
});
IndexRequest indexRequest = new IndexRequest("index", "type", "1").source(jsonBuilder()多值获取:一次获取多个文档
.startObject()
.field("name", "Joe Smith")
.field("gender", "male")
.endObject());
UpdateRequest updateRequest = new UpdateRequest("index", "type", "1").doc(jsonBuilder()
.startObject()
.field("gender", "male")
.endObject())
.upsert(indexRequest); //若是不存在此文档 ,就增长 `indexRequest`
client.update(updateRequest).get();
MultiGetResponse multiGetItemResponses = client.prepareMultiGet()
.add("twitter", "tweet", "1") //一个id的方式
.add("twitter", "tweet", "2", "3", "4") //多个id的方式
.add("another", "type", "foo") //能够从另一个索引获取
.get();
for (MultiGetItemResponse itemResponse : multiGetItemResponses) { //迭代返回值
GetResponse response = itemResponse.getResponse();
if (response.isExists()) { //判断是否存在
String json = response.getSourceAsString(); //_source 字段
}
}