Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1395,7 +1395,7 @@ private void resetState() {

private void setOpened() {
// The backend tx may be reused, here just set a flag
assert this.opened.get() == false;
assert !this.opened.get();
this.opened.set(true);
this.transactions.get().openedTime(DateUtil.now().getTime());
this.refs.incrementAndGet();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

public class AnalyzerFactory {

private static Map<String, Class<? extends Analyzer>> analyzers;
private static final Map<String, Class<? extends Analyzer>> analyzers;

static {
analyzers = new ConcurrentHashMap<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public class AnsjAnalyzer implements Analyzer {
"NlpAnalysis"
);

private String analysis;
private final String analysis;

public AnsjAnalyzer(String mode) {
if (!SUPPORT_MODES.contains(mode)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ public class HanLPAnalyzer implements Analyzer {
.enablePlaceRecognize(true)
.enableOrganizationRecognize(true);

private String tokenizer;
private final String tokenizer;

public HanLPAnalyzer(String mode) {
if (!SUPPORT_MODES.contains(mode)) {
Expand All @@ -71,7 +71,7 @@ public HanLPAnalyzer(String mode) {

@Override
public Set<String> segment(String text) {
List<Term> terms = null;
List<Term> terms;
switch (this.tokenizer) {
case "standard":
terms = StandardTokenizer.segment(text);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ public class IKAnalyzer implements Analyzer {
"max_word"
);

private boolean smartSegMode;
private final boolean smartSegMode;

public IKAnalyzer(String mode) {
if (!SUPPORT_MODES.contains(mode)) {
Expand All @@ -58,7 +58,7 @@ public Set<String> segment(String text) {
IKSegmenter ik = new IKSegmenter(new StringReader(text),
this.smartSegMode);
try {
Lexeme word = null;
Lexeme word;
while ((word = ik.next()) != null) {
result.add(word.getLexemeText());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ public class JiebaAnalyzer implements Analyzer {

private static final JiebaSegmenter JIEBA_SEGMENTER = new JiebaSegmenter();

private JiebaSegmenter.SegMode segMode;
private final JiebaSegmenter.SegMode segMode;

public JiebaAnalyzer(String mode) {
if (!SUPPORT_MODES.contains(mode)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public class MMSeg4JAnalyzer implements Analyzer {

private static final Dictionary DIC = Dictionary.getInstance();

private Seg seg;
private final Seg seg;

public MMSeg4JAnalyzer(String mode) {
if (!SUPPORT_MODES.contains(mode)) {
Expand All @@ -69,7 +69,7 @@ public MMSeg4JAnalyzer(String mode) {
break;
default:
throw new AssertionError(String.format(
"Unsupported segment mode '%s'", this.seg));
"Unsupported segment mode '%s'", mode));
}
}

Expand All @@ -78,7 +78,7 @@ public Set<String> segment(String text) {
Set<String> result = InsertionOrderUtil.newSet();
MMSeg mmSeg = new MMSeg(new StringReader(text), this.seg);
try {
Word word = null;
Word word;
while ((word = mmSeg.next()) != null) {
result.add(word.getString());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

import java.io.Reader;
import java.io.StringReader;
import java.util.List;
import java.util.Set;

import org.apache.lucene.analysis.TokenStream;
Expand All @@ -30,15 +29,12 @@

import com.baidu.hugegraph.HugeException;
import com.baidu.hugegraph.util.InsertionOrderUtil;
import com.google.common.collect.ImmutableList;

/**
* Reference from https://my.oschina.net/apdplat/blog/412921
*/
public class SmartCNAnalyzer implements Analyzer {

public static final List<String> SUPPORT_MODES = ImmutableList.of();

private static final SmartChineseAnalyzer ANALYZER =
new SmartChineseAnalyzer();

Expand All @@ -52,7 +48,7 @@ public Set<String> segment(String text) {
Reader reader = new StringReader(text);
try (TokenStream tokenStream = ANALYZER.tokenStream("text", reader)) {
tokenStream.reset();
CharTermAttribute term = null;
CharTermAttribute term;
while (tokenStream.incrementToken()) {
term = tokenStream.getAttribute(CharTermAttribute.class);
result.add(term.toString());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ public class WordAnalyzer implements Analyzer {
.add("PureEnglish")
.build();

private SegmentationAlgorithm algorithm;
private final SegmentationAlgorithm algorithm;

public WordAnalyzer(String mode) {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

package com.baidu.hugegraph.unit.core;

import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;

Expand All @@ -32,9 +33,9 @@

public class AnalyzerTest {

private static String text1 = "England wins World Cup";
private static String text2 = "英格兰世界杯夺冠,中华人民共和国国歌," +
"百度科技园位于北京市海淀区西北旺东路10号院";
private static final String text1 = "England wins World Cup";
private static final String text2 = "英格兰世界杯夺冠,中华人民共和国国歌," +
"百度科技园位于北京市海淀区西北旺东路10号院";

@Before
public void setup() {
Expand Down Expand Up @@ -216,11 +217,7 @@ public void testIKAnalyzer() {
}

private static Set<String> setOf(String... elems) {
Set<String> result = new HashSet<>();
for (String elem : elems) {
result.add(elem);
}
return result;
return new HashSet<>(Arrays.asList(elems));
Copy link
Copy Markdown
Member

@imbajin imbajin Oct 26, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, and one question is which modules/packages did you check this time? (or u checked all modules?)

If the class file is too much to handle with someone, a simple way is we could divide them with different modules, and others could take the remaining modules

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

升级core的maven依赖时顺手改的,这块有需要,可以几个人分下

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

升级core的maven依赖时顺手改的,这块有需要,可以几个人分下

👌🏻, 那就是说没有系统性的检查一下是吧. 可以按模块分给一下

}

}