Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 65ae7e7

Browse files
authored
AnalyzerTextOptions ctors changes and name suffix on properties (Azure#11810)
1 parent 388270f commit 65ae7e7

File tree

8 files changed

+61
-76
lines changed

8 files changed

+61
-76
lines changed

sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/implementation/converters/AnalyzeRequestConverter.java

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -24,32 +24,32 @@ public static AnalyzeTextOptions map(com.azure.search.documents.indexes.implemen
2424
if (obj == null) {
2525
return null;
2626
}
27-
AnalyzeTextOptions analyzeTextOptions = new AnalyzeTextOptions();
27+
AnalyzeTextOptions analyzeTextOptions = null;
28+
29+
if (obj.getTokenizer() != null) {
30+
LexicalTokenizerName tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
31+
analyzeTextOptions = new AnalyzeTextOptions(obj.getText(), tokenizer);
32+
analyzeTextOptions.setTokenizerName(tokenizer);
33+
} else {
34+
LexicalAnalyzerName analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
35+
analyzeTextOptions = new AnalyzeTextOptions(obj.getText(), analyzer);
36+
analyzeTextOptions.setAnalyzerName(analyzer);
37+
}
2838

2939
if (obj.getCharFilters() != null) {
3040
List<CharFilterName> charFilters =
3141
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
3242
analyzeTextOptions.setCharFilters(charFilters);
3343
}
3444

35-
if (obj.getAnalyzer() != null) {
36-
LexicalAnalyzerName analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
37-
analyzeTextOptions.setAnalyzer(analyzer);
38-
}
3945

4046
if (obj.getTokenFilters() != null) {
4147
List<TokenFilterName> tokenFilters =
4248
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
4349
analyzeTextOptions.setTokenFilters(tokenFilters);
4450
}
4551

46-
String text = obj.getText();
47-
analyzeTextOptions.setText(text);
4852

49-
if (obj.getTokenizer() != null) {
50-
LexicalTokenizerName tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
51-
analyzeTextOptions.setTokenizer(tokenizer);
52-
}
5353
return analyzeTextOptions;
5454
}
5555

@@ -69,9 +69,9 @@ public static com.azure.search.documents.indexes.implementation.models.AnalyzeRe
6969
analyzeRequest.setCharFilters(charFilters);
7070
}
7171

72-
if (obj.getAnalyzer() != null) {
72+
if (obj.getAnalyzerName() != null) {
7373
com.azure.search.documents.indexes.implementation.models.LexicalAnalyzerName analyzer =
74-
LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
74+
LexicalAnalyzerNameConverter.map(obj.getAnalyzerName());
7575
analyzeRequest.setAnalyzer(analyzer);
7676
}
7777

@@ -84,9 +84,9 @@ public static com.azure.search.documents.indexes.implementation.models.AnalyzeRe
8484
String text = obj.getText();
8585
analyzeRequest.setText(text);
8686

87-
if (obj.getTokenizer() != null) {
87+
if (obj.getTokenizerName() != null) {
8888
com.azure.search.documents.indexes.implementation.models.LexicalTokenizerName tokenizer =
89-
LexicalTokenizerNameConverter.map(obj.getTokenizer());
89+
LexicalTokenizerNameConverter.map(obj.getTokenizerName());
9090
analyzeRequest.setTokenizer(tokenizer);
9191
}
9292
return analyzeRequest;

sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/implementation/converters/SuggesterConverter.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
* A converter between {@link com.azure.search.documents.indexes.implementation.models.Suggester} and {@link SearchSuggester}.
1313
*/
1414
public final class SuggesterConverter {
15-
private static final String SEARCH_MODE = "analyzingInfixMatching";
1615
/**
1716
* Maps from {@link com.azure.search.documents.indexes.implementation.models.Suggester} to {@link SearchSuggester}.
1817
*/
@@ -51,7 +50,6 @@ public static com.azure.search.documents.indexes.implementation.models.Suggester
5150
String name = obj.getName();
5251
suggester.setName(name);
5352

54-
suggester.setSearchMode(SEARCH_MODE);
5553
return suggester;
5654
}
5755

sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/implementation/converters/SynonymMapConverter.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
* A converter between {@link com.azure.search.documents.indexes.implementation.models.SynonymMap} and {@link SynonymMap}.
1111
*/
1212
public final class SynonymMapConverter {
13-
private static final String FORMAT = "solr";
13+
1414
/**
1515
* Maps from {@link com.azure.search.documents.indexes.implementation.models.SynonymMap} to {@link SynonymMap}.
1616
*/
@@ -53,8 +53,6 @@ public static com.azure.search.documents.indexes.implementation.models.SynonymMa
5353
String name = obj.getName();
5454
synonymMap.setName(name);
5555

56-
synonymMap.setFormat(FORMAT);
57-
5856
String eTag = obj.getETag();
5957
synonymMap.setETag(eTag);
6058

sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/indexes/models/AnalyzeTextOptions.java

Lines changed: 35 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ public final class AnalyzeTextOptions {
1717
* The text to break into tokens.
1818
*/
1919
@JsonProperty(value = "text", required = true)
20-
private String text;
20+
private final String text;
2121

2222
/*
2323
* The name of the analyzer to use to break the given text. If this
@@ -45,7 +45,7 @@ public final class AnalyzeTextOptions {
4545
* 'Simple', 'Stop', 'Whitespace'
4646
*/
4747
@JsonProperty(value = "analyzer")
48-
private LexicalAnalyzerName analyzer;
48+
private LexicalAnalyzerName analyzerName;
4949

5050
/*
5151
* The name of the tokenizer to use to break the given text. If this
@@ -57,7 +57,7 @@ public final class AnalyzeTextOptions {
5757
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'
5858
*/
5959
@JsonProperty(value = "tokenizer")
60-
private LexicalTokenizerName tokenizer;
60+
private LexicalTokenizerName tokenizerName;
6161

6262
/*
6363
* An optional list of token filters to use when breaking the given text.
@@ -74,23 +74,34 @@ public final class AnalyzeTextOptions {
7474
private List<CharFilterName> charFilters;
7575

7676
/**
77-
* Get the text property: The text to break into tokens.
77+
* Constructor to {@link AnalyzeTextOptions} which takes analyzerName.
7878
*
79-
* @return the text value.
79+
* @param text The text break into tokens.
80+
* @param analyzerName The name of the analyze name.
8081
*/
81-
public String getText() {
82-
return this.text;
82+
public AnalyzeTextOptions(String text, LexicalAnalyzerName analyzerName) {
83+
this.text = text;
84+
this.analyzerName = analyzerName;
8385
}
8486

8587
/**
86-
* Set the text property: The text to break into tokens.
88+
* Constructor to {@link AnalyzeTextOptions} which takes tokenizerName.
8789
*
88-
* @param text the text value to set.
89-
* @return the AnalyzeRequest object itself.
90+
* @param text The text break into tokens.
91+
* @param tokenizerName The name of the tokenizer name.
9092
*/
91-
public AnalyzeTextOptions setText(String text) {
93+
public AnalyzeTextOptions(String text, LexicalTokenizerName tokenizerName) {
9294
this.text = text;
93-
return this;
95+
this.tokenizerName = tokenizerName;
96+
}
97+
98+
/**
99+
* Get the text property: The text to break into tokens.
100+
*
101+
* @return the text value.
102+
*/
103+
public String getText() {
104+
return this.text;
94105
}
95106

96107
/**
@@ -121,8 +132,8 @@ public AnalyzeTextOptions setText(String text) {
121132
*
122133
* @return the analyzer value.
123134
*/
124-
public LexicalAnalyzerName getAnalyzer() {
125-
return this.analyzer;
135+
public LexicalAnalyzerName getAnalyzerName() {
136+
return this.analyzerName;
126137
}
127138

128139
/**
@@ -151,11 +162,12 @@ public LexicalAnalyzerName getAnalyzer() {
151162
* 'StandardAsciiFoldingLucene', 'Keyword', 'Pattern', 'Simple', 'Stop',
152163
* 'Whitespace'.
153164
*
154-
* @param analyzer the analyzer value to set.
165+
* @param analyzerName the analyzer value to set.
155166
* @return the AnalyzeRequest object itself.
156167
*/
157-
public AnalyzeTextOptions setAnalyzer(LexicalAnalyzerName analyzer) {
158-
this.analyzer = analyzer;
168+
public AnalyzeTextOptions setAnalyzerName(LexicalAnalyzerName analyzerName) {
169+
this.analyzerName = analyzerName;
170+
this.tokenizerName = null;
159171
return this;
160172
}
161173

@@ -170,8 +182,8 @@ public AnalyzeTextOptions setAnalyzer(LexicalAnalyzerName analyzer) {
170182
*
171183
* @return the tokenizer value.
172184
*/
173-
public LexicalTokenizerName getTokenizer() {
174-
return this.tokenizer;
185+
public LexicalTokenizerName getTokenizerName() {
186+
return this.tokenizerName;
175187
}
176188

177189
/**
@@ -183,11 +195,12 @@ public LexicalTokenizerName getTokenizer() {
183195
* 'MicrosoftLanguageStemmingTokenizer', 'NGram', 'PathHierarchy',
184196
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'.
185197
*
186-
* @param tokenizer the tokenizer value to set.
198+
* @param tokenizerName the tokenizer value to set.
187199
* @return the AnalyzeRequest object itself.
188200
*/
189-
public AnalyzeTextOptions setTokenizer(LexicalTokenizerName tokenizer) {
190-
this.tokenizer = tokenizer;
201+
public AnalyzeTextOptions setTokenizerName(LexicalTokenizerName tokenizerName) {
202+
this.tokenizerName = tokenizerName;
203+
this.analyzerName = null;
191204
return this;
192205
}
193206

sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/indexes/models/CustomAnalyzer.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ public final class CustomAnalyzer extends LexicalAnalyzer {
2929
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'
3030
*/
3131
@JsonProperty(value = "tokenizer", required = true)
32-
private LexicalTokenizerName tokenizer;
32+
private LexicalTokenizerName tokenizerName;
3333

3434
/*
3535
* A list of token filters used to filter out or modify the tokens
@@ -60,7 +60,7 @@ public final class CustomAnalyzer extends LexicalAnalyzer {
6060
* @return the tokenizer value.
6161
*/
6262
public LexicalTokenizerName getTokenizer() {
63-
return this.tokenizer;
63+
return this.tokenizerName;
6464
}
6565

6666
/**
@@ -71,11 +71,11 @@ public LexicalTokenizerName getTokenizer() {
7171
* 'MicrosoftLanguageStemmingTokenizer', 'NGram', 'PathHierarchy',
7272
* 'Pattern', 'Standard', 'UaxUrlEmail', 'Whitespace'.
7373
*
74-
* @param tokenizer the tokenizer value to set.
74+
* @param tokenizerName the tokenizer value to set.
7575
* @return the CustomAnalyzer object itself.
7676
*/
77-
public CustomAnalyzer setTokenizer(LexicalTokenizerName tokenizer) {
78-
this.tokenizer = tokenizer;
77+
public CustomAnalyzer setTokenizer(LexicalTokenizerName tokenizerName) {
78+
this.tokenizerName = tokenizerName;
7979
return this;
8080
}
8181

sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/indexes/models/SearchSuggester.java

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,6 @@
1212
*/
1313
@Fluent
1414
public final class SearchSuggester {
15-
/*
16-
* A value indicating the capabilities of the suggester.
17-
*/
18-
@JsonProperty(value = "searchMode", required = true)
19-
private static final String SEARCH_MODE = "analyzingInfixMatching";
20-
2115
/*
2216
* The name of the suggester.
2317
*/

sdk/search/azure-search-documents/src/main/java/com/azure/search/documents/indexes/models/SynonymMap.java

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,6 @@
1111
*/
1212
@Fluent
1313
public final class SynonymMap {
14-
15-
/*
16-
* The format of the synonym map. Only the 'solr' format is currently
17-
* supported.
18-
*/
19-
@JsonProperty(value = "format", required = true)
20-
private static final String FORMAT = "solr";
21-
2214
/*
2315
* The name of the synonym map.
2416
*/

sdk/search/azure-search-documents/src/test/java/com/azure/search/documents/indexes/CustomAnalyzerSyncTests.java

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -204,18 +204,14 @@ public void canAnalyze() {
204204
searchIndexClient.createIndex(index);
205205
indexesToCleanup.add(index.getName());
206206

207-
AnalyzeTextOptions request = new AnalyzeTextOptions()
208-
.setText("One two")
209-
.setAnalyzer(LexicalAnalyzerName.WHITESPACE);
207+
AnalyzeTextOptions request = new AnalyzeTextOptions("One two", LexicalAnalyzerName.WHITESPACE);
210208
PagedIterable<AnalyzedTokenInfo> results = searchIndexClient.analyzeText(index.getName(), request);
211209
Iterator<AnalyzedTokenInfo> iterator = results.iterator();
212210
assertTokenInfoEqual("One", 0, 3, 0, iterator.next());
213211
assertTokenInfoEqual("two", 4, 7, 1, iterator.next());
214212
assertFalse(iterator.hasNext());
215213

216-
request = new AnalyzeTextOptions()
217-
.setText("One's <two/>")
218-
.setTokenizer(LexicalTokenizerName.WHITESPACE)
214+
request = new AnalyzeTextOptions("One's <two/>", LexicalTokenizerName.WHITESPACE)
219215
.setTokenFilters(Collections.singletonList(TokenFilterName.APOSTROPHE))
220216
.setCharFilters(Collections.singletonList(CharFilterName.HTML_STRIP));
221217
results = searchIndexClient.analyzeText(index.getName(), request);
@@ -239,21 +235,15 @@ public void canAnalyzeWithAllPossibleNames() {
239235

240236
LexicalAnalyzerName.values()
241237
.stream()
242-
.map(an -> new AnalyzeTextOptions()
243-
.setText("One two")
244-
.setAnalyzer(an))
238+
.map(an -> new AnalyzeTextOptions("One two", an))
245239
.forEach(r -> searchIndexClient.analyzeText(index.getName(), r));
246240

247241
LexicalTokenizerName.values()
248242
.stream()
249-
.map(tn -> new AnalyzeTextOptions()
250-
.setText("One two")
251-
.setTokenizer(tn))
243+
.map(tn -> new AnalyzeTextOptions("One two", tn))
252244
.forEach(r -> searchIndexClient.analyzeText(index.getName(), r));
253245

254-
AnalyzeTextOptions request = new AnalyzeTextOptions()
255-
.setText("One two")
256-
.setTokenizer(LexicalTokenizerName.WHITESPACE)
246+
AnalyzeTextOptions request = new AnalyzeTextOptions("One two", LexicalTokenizerName.WHITESPACE)
257247
.setTokenFilters(new ArrayList<>(TokenFilterName.values()))
258248
.setCharFilters(new ArrayList<>(CharFilterName.values()));
259249
searchIndexClient.analyzeText(index.getName(), request);

0 commit comments

Comments
 (0)