updated search_api modules as indexation was not working localy

had to be tested in production
This commit is contained in:
Bachir Soussi Chiadmi
2017-05-24 18:57:04 +02:00
parent b5d76ec5e6
commit 28c7a0965a
53 changed files with 4166 additions and 187 deletions

View File

@@ -1,5 +1,29 @@
Search API Solr Search 1.x, dev (xxxx-xx-xx):
---------------------------------------------
Search API Solr Search 1.12 (2017-02-23):
-----------------------------------------
- #2612770 by Temoor, drunken monkey: Fixed conjunction in complex filter
conditions.
- #2711685 by berliner: Added support for Solr 6.
- #2842661 by gabrielu, drunken monkey: Fixed duplicate code line in
clearCache().
- #2834159 by zniki.ru: Fixed left-over explicit
SearchApiSolrConnection::escapeFieldName() call.
- #2677912 by jts86, mian3010: Added option to disable committing for Solr.
- #2828473 by detroz: Fixed incorrect variable initialization.
- #2826565 by Sardis, drunken monkey: Fixed multi-valued field highlighting.
- #2807327 by drunken monkey: Fixed configuration form descriptions.
- #2772199 by drunken monkey: Added a warning to the description for the
highlighting server option.
- #2733625 by joelstein, drunken monkey: Fixed notice in
Connection::getFields().
Search API Solr Search 1.11 (2016-07-06):
-----------------------------------------
- #2710397 by drunken monkey: Fixed escaping of highlighting tags.
- #2636016 by drunken monkey: Fixed location search distance facets.
Search API Solr Search 1.10 (2016-03-14):
-----------------------------------------
- #2604322 by lex0r, drunken monkey: Added the option to log all Solr queries.
- #2645366 by tedfordgif, drunken monkey: Fixed browser incorrectly filling the
HTTP Auth form fields.
- #2611716 by aditya_anurag, drunken monkey: Improved the method documentation

View File

@@ -80,6 +80,9 @@ class SearchApiSolrService extends SearchApiAbstractService {
'retrieve_data' => FALSE,
'highlight_data' => FALSE,
'skip_schema_check' => FALSE,
'log_query' => FALSE,
'log_response' => FALSE,
'commits_disabled' => FALSE,
'solr_version' => '',
'http_method' => 'AUTO',
// Default to TRUE for new servers, but to FALSE for existing ones.
@@ -161,7 +164,7 @@ class SearchApiSolrService extends SearchApiAbstractService {
$form['path'] = array(
'#type' => 'textfield',
'#title' => t('Solr path'),
'#description' => t('The path that identifies the Solr instance to use on the server. (For Solr 4.x servers, this should include the name of the core to use.)'),
'#description' => t('The path that identifies the Solr instance to use on the server. (For Solr versions 4 and above, this should include the name of the core to use.)'),
'#default_value' => $options['path'],
);
@@ -211,19 +214,24 @@ class SearchApiSolrService extends SearchApiAbstractService {
$form['advanced']['highlight_data'] = array(
'#type' => 'checkbox',
'#title' => t('Highlight retrieved data'),
'#description' => t('When retrieving result data from the Solr server, try to highlight the search terms in the returned fulltext fields.'),
'#description' => t('When retrieving result data from the Solr server, try to highlight the search terms in the returned fulltext fields. <strong>Note:</strong> Do not use the "Highlighting" processor for the index together with this option use one or the other.'),
'#default_value' => $options['highlight_data'],
);
// Highlighting retrieved data only makes sense when we retrieve data.
// (Actually, internally it doesn't really matter. However, from a user's
// perspective, having to check both probably makes sense.)
$form['advanced']['highlight_data']['#states']['invisible']
[':input[name="options[form][advanced][retrieve_data]"]']['checked'] = FALSE;
$form['advanced']['skip_schema_check'] = array(
'#type' => 'checkbox',
'#title' => t('Skip schema verification'),
'#description' => t('Skip the automatic check for schema-compatibillity. Use this override if you are seeing an error-message about an incompatible schema.xml configuration file, and you are sure the configuration is compatible.'),
'#description' => t('Skip the automatic check for schema-compatibility. Use this override if you are seeing an error-message about an incompatible schema.xml configuration file, and you are sure the configuration is compatible.'),
'#default_value' => $options['skip_schema_check'],
);
$form['advanced']['solr_version'] = array(
'#type' => 'select',
'#title' => t('Solr version override'),
'#description' => t('Specify the Solr version manually in case it cannot be retrived automatically. The version can be found in the Solr admin interface under "Solr Specification Version" or "solr-spec"'),
'#description' => t('Specify the Solr version manually in case it cannot be retrieved automatically. The version can be found in the Solr admin interface under "Solr Specification Version" or "solr-spec".'),
'#options' => array(
'' => t('Determine automatically'),
'3' => '3.x',
@@ -232,12 +240,6 @@ class SearchApiSolrService extends SearchApiAbstractService {
),
'#default_value' => $options['solr_version'],
);
// Highlighting retrieved data only makes sense when we retrieve data.
// (Actually, internally it doesn't really matter. However, from a user's
// perspective, having to check both probably makes sense.)
$form['advanced']['highlight_data']['#states']['invisible']
[':input[name="options[form][advanced][retrieve_data]"]']['checked'] = FALSE;
$form['advanced']['http_method'] = array(
'#type' => 'select',
'#title' => t('HTTP method'),
@@ -249,6 +251,24 @@ class SearchApiSolrService extends SearchApiAbstractService {
'GET' => 'GET',
),
);
$form['advanced']['log_query'] = array(
'#type' => 'checkbox',
'#title' => t('Log search requests'),
'#description' => t('Log all outgoing Solr search requests.'),
'#default_value' => $options['log_query'],
);
$form['advanced']['log_response'] = array(
'#type' => 'checkbox',
'#title' => t('Log search results'),
'#description' => t('Log all search result responses received from Solr. NOTE: This may slow down your site since all response data (including possible retrieved data) will be saved in the Drupal log.'),
'#default_value' => $options['log_response'],
);
$form['advanced']['commits_disabled'] = array(
'#type' => 'checkbox',
'#title' => t('Disable explicit committing'),
'#description' => t('Do not send any commit commands to the Solr server.'),
'#default_value' => $options['commits_disabled'],
);
if (module_exists('search_api_autocomplete')) {
$form['advanced']['autocomplete'] = array(
@@ -573,7 +593,7 @@ class SearchApiSolrService extends SearchApiAbstractService {
// Now add all fields contained in the item, with dynamic fields. Also,
// gather the contents of all text fields to also add them to "content".
$text_content = '';
$text_content = array();
foreach ($item as $key => $field) {
// If the field is not known for the index, something weird has
// happened. We refuse to index the items and hope that the others are
@@ -936,7 +956,7 @@ class SearchApiSolrService extends SearchApiAbstractService {
unset($radius);
$field = $fields[$spatial['field']];
$escaped_field = SearchApiSolrConnection::escapeFieldName($field);
$escaped_field = call_user_func(array($this->connection_class, 'escapeFieldName'), $field);
// If proper bbox coordinates were given use them to filter.
if (isset($spatial['bbox'])) {
@@ -970,7 +990,7 @@ class SearchApiSolrService extends SearchApiAbstractService {
// If the fq consists only of a filter on this field, replace it with
// a range.
$preg_field = preg_quote($escaped_field, '/');
if (preg_match('/^' . $preg_field . ':\["?(\*|\d+(?:\.\d+)?)"? TO "?(\*|\d+(?:\.\d+)?)"?\]$/', $value, $m)) {
if (preg_match('/^(?:\{!tag[^}]+\})?' . $preg_field . ':\["?(\*|\d+(?:\.\d+)?)"? TO "?(\*|\d+(?:\.\d+)?)"?\]$/', $value, $m)) {
unset($fq[$key]);
if ($m[1] && is_numeric($m[1])) {
$min_radius = isset($min_radius) ? max($min_radius, $m[1]) : $m[1];
@@ -1354,6 +1374,7 @@ class SearchApiSolrService extends SearchApiAbstractService {
foreach ($response->highlighting->$id->content as $snippet) {
$snippet = strip_tags($snippet);
$snippet = preg_replace('/^.*>|<.*$/', '', $snippet);
$snippet = check_plain($snippet);
$snippet = $this->formatHighlighting($snippet);
// The created fragments sometimes have leading or trailing punctuation.
// We remove that here for all common cases, but take care not to remove
@@ -1367,9 +1388,27 @@ class SearchApiSolrService extends SearchApiAbstractService {
$prefix_length = strlen($prefix);
foreach ($field_mapping as $search_api_property => $solr_property) {
if (substr($solr_property, 0, $prefix_length) == $prefix && !empty($response->highlighting->$id->$solr_property)) {
// Contrary to above, we here want to preserve HTML, so we just
// replace the [HIGHLIGHT] tags with the appropriate format.
$fields[$search_api_property] = $this->formatHighlighting($response->highlighting->$id->$solr_property);
$value = $response->highlighting->$id->$solr_property;
$value = $this->sanitizeHighlightValue($value, $search_api_property);
// Remove highlight prefixes and suffixes so we could compare values
// in order to replace the correspond items.
$orig_value = preg_replace('#\[(/?)HIGHLIGHT\]#', '', $value);
$field_values = $this->sanitizeHighlightValue($fields[$search_api_property]);
foreach ($field_values as $delta => $field_value) {
foreach ($orig_value as $num => $item) {
if ($item === $field_value) {
$field_values[$delta] = $this->formatHighlighting($value[$num]);
$change = TRUE;
continue 2;
}
}
}
if (!empty($change)) {
$fields[$search_api_property] = array(
'#value' => $field_values,
'#sanitize_callback' => FALSE,
);
}
}
}
}
@@ -1378,12 +1417,33 @@ class SearchApiSolrService extends SearchApiAbstractService {
}
/**
* Changes highlighting tags from our custom, HTML-safe ones to HTML.
* Sanitizes a highlighted field value.
*
* @param string|array $snippet
* The snippet(s) to format.
* @param string|array $value
* Either a highlighted field value, or an array of such values.
* @param string|null $field_id
* (optional) The ID of the field for which this sanitizing occurs, if any.
*
* @return string|array
* The sanitized input.
*/
protected function sanitizeHighlightValue($value, $field_id = NULL) {
if (is_array($value)) {
foreach ($value as $i => $nested_value) {
$value[$i] = $this->sanitizeHighlightValue($nested_value, $field_id);
}
return $value;
}
return check_plain(strip_tags($value));
}
/**
* Changes highlighting tags from our custom, HTML-safe ones to HTML.
*
* @param string|string[] $snippet
* The snippet(s) to format.
*
* @return string|string[]
* The snippet(s), properly formatted as HTML.
*/
protected function formatHighlighting($snippet) {
@@ -1587,9 +1647,7 @@ class SearchApiSolrService extends SearchApiAbstractService {
elseif ($f instanceof SearchApiQueryFilterInterface) {
$q = $this->createFilterQueries($f, $solr_fields, $fields);
if ($filter->getConjunction() != $f->getConjunction() && count($q) > 1) {
// $or == TRUE means the nested filter has conjunction AND, and vice versa
$sep = $or ? ' ' : ' OR ';
$fq[] = '((' . implode(')' . $sep . '(', $q) . '))';
$fq[] = '((' . implode(') ' . $f->getConjunction() . ' (', $q) . '))';
}
else {
$fq = array_merge($fq, $q);
@@ -2315,6 +2373,10 @@ class SearchApiSolrService extends SearchApiAbstractService {
* Sends a commit command to the Solr server.
*/
public function commit() {
// If committing has been disabled altogether, do nothing here.
if (!empty($this->options['commits_disabled'])) {
return;
}
try {
$this->connect();
return $this->solr->commit(FALSE);

View File

@@ -367,8 +367,11 @@ class SearchApiSolrConnection implements SearchApiSolrConnectionInterface {
*/
public function getFields($num_terms = 0) {
$fields = array();
foreach ($this->getLuke($num_terms)->fields as $name => $info) {
$fields[$name] = new SearchApiSolrField($info);
$luke_data = $this->getLuke($num_terms);
if (isset($luke_data->fields)) {
foreach ($luke_data->fields as $name => $info) {
$fields[$name] = new SearchApiSolrField($info);
}
}
return $fields;
}
@@ -514,7 +517,6 @@ class SearchApiSolrConnection implements SearchApiSolrConnectionInterface {
public function clearCache() {
if ($cid = $this->getCacheId()) {
cache_clear_all($cid, 'cache_search_api_solr', TRUE);
cache_clear_all($cid, 'cache_search_api_solr', TRUE);
}
$this->luke = array();
$this->stats = NULL;
@@ -944,10 +946,18 @@ class SearchApiSolrConnection implements SearchApiSolrConnectionInterface {
// built-in http_build_query() doesn't give us the format Solr wants.
$queryString = $this->httpBuildQuery($params);
if (!empty($this->options['log_query'])) {
watchdog('search_api_solr', 'Query: @query', array('@query' => $queryString), WATCHDOG_DEBUG);
}
if ($method == 'GET' || $method == 'AUTO') {
$searchUrl = $this->constructUrl(self::SEARCH_SERVLET, array(), $queryString);
if ($method == 'GET' || strlen($searchUrl) <= variable_get('search_api_solr_http_get_max_length', 4000)) {
return $this->sendRawGet($searchUrl);
$response = $this->sendRawGet($searchUrl);
if (!empty($this->options['log_response'])) {
$this->logResponse($response);
}
return $response;
}
}
@@ -955,7 +965,28 @@ class SearchApiSolrConnection implements SearchApiSolrConnectionInterface {
$searchUrl = $this->constructUrl(self::SEARCH_SERVLET);
$options['data'] = $queryString;
$options['headers']['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8';
return $this->sendRawPost($searchUrl, $options);
$response = $this->sendRawPost($searchUrl, $options);
if (!empty($this->options['log_response'])) {
$this->logResponse($response);
}
return $response;
}
/**
* Logs a Solr response object.
*
* @param object $response
* The response received from Solr.
*/
protected function logResponse($response) {
$data = $response->code . ' ' . $response->status_message . "\n" . print_r($response->response, TRUE);
watchdog('search_api_solr', 'Response: <div style="white-space: pre-wrap;">@response</div>', array('@response' => $data), WATCHDOG_DEBUG);
if (!empty($response->facet_counts)) {
watchdog('search_api_solr', 'Facets: <div style="white-space: pre-wrap;">@facets</div>', array('@facets' => print_r($response->facet_counts, TRUE)), WATCHDOG_DEBUG);
}
}
}

View File

@@ -11,9 +11,9 @@ files[] = includes/solr_connection.interface.inc
files[] = includes/solr_field.inc
files[] = includes/spellcheck.inc
; Information added by Drupal.org packaging script on 2016-02-23
version = "7.x-1.9+10-dev"
; Information added by Drupal.org packaging script on 2017-02-23
version = "7.x-1.12"
core = "7.x"
project = "search_api_solr"
datestamp = "1456233244"
datestamp = "1487844794"

View File

@@ -0,0 +1,31 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
This file allows you to boost certain search items to the top of search
results. You can find out an item's ID by searching directly on the Solr
server. The item IDs are in general constructed as follows:
Search API:
$document->id = $index_id . '-' . $item_id;
Apache Solr Search Integration:
$document->id = $site_hash . '/' . $entity_type . '/' . $entity->id;
If you want this file to be automatically re-loaded when a Solr commit takes
place (e.g., if you have an automatic script active which updates elevate.xml
according to newly-indexed data), place it into Solr's data/ directory.
Otherwise, place it with the other configuration files into the conf/
directory.
See http://wiki.apache.org/solr/QueryElevationComponent for more information.
-->
<elevate>
<!-- Example for ranking the node #1 first in searches for "example query": -->
<!--
<query text="example query">
<doc id="default_node_index-1" />
<doc id="7v3jsc/node/1" />
</query>
-->
<!-- Multiple <query> elements can be specified, contained in one <elevate>. -->
<!-- <query text="...">...</query> -->
</elevate>

View File

@@ -0,0 +1,14 @@
# This file contains character mappings for the default fulltext field type.
# The source characters (on the left) will be replaced by the respective target
# characters before any other processing takes place.
# Lines starting with a pound character # are ignored.
#
# For sensible defaults, use the mapping-ISOLatin1Accent.txt file distributed
# with the example application of your Solr version.
#
# Examples:
# "À" => "A"
# "\u00c4" => "A"
# "\u00c4" => "\u0041"
# "æ" => "ae"
# "\n" => " "

View File

@@ -0,0 +1,7 @@
#-----------------------------------------------------------------------
# This file blocks words from being operated on by the stemmer and word delimiter.
&amp;
&lt;
&gt;
&#039;
&quot;

View File

@@ -0,0 +1,693 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
This is the Solr schema file. This file should be named "schema.xml" and
should be in the conf directory under the solr home
(i.e. ./solr/conf/schema.xml by default)
or located where the classloader for the Solr webapp can find it.
For more information, on how to customize this file, please see
http://wiki.apache.org/solr/SchemaXml
-->
<schema name="drupal-4.4-solr-6.x" version="1.5">
<!-- attribute "name" is the name of this schema and is only used for
display purposes. Applications should change this to reflect the nature
of the search collection.
version="1.2" is Solr's version number for the schema syntax and
semantics. It should not normally be changed by applications.
1.0: multiValued attribute did not exist, all fields are multiValued by
nature
1.1: multiValued attribute introduced, false by default
1.2: omitTermFreqAndPositions attribute introduced, true by default
except for text fields.
1.3: removed optional field compress feature
1.4: autoGeneratePhraseQueries attribute introduced to drive
QueryParser behavior when a single string produces multiple
tokens. Defaults to off for version >= 1.4
1.5: omitNorms defaults to true for primitive field types
(int, float, boolean, string...)
-->
<types>
<!-- field type definitions. The "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real
behavior of the fieldType.
Class names starting with "solr" refer to java classes in the
org.apache.solr.analysis package.
-->
<!-- The StrField type is not analyzed, but indexed/stored verbatim.
- StrField and TextField support an optional compressThreshold which
limits compression (if enabled in the derived fields) to values which
exceed a certain size (in characters).
-->
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- boolean type: "true" or "false" -->
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
<fieldtype name="binary" class="solr.BinaryField"/>
<!-- The optional sortMissingLast and sortMissingFirst attributes are
currently supported on types that are sorted internally as strings.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
- If sortMissingFirst="true", then a sort on this field will cause documents
without the field to come before documents with the field,
regardless of the requested sort order.
- If sortMissingLast="false" and sortMissingFirst="false" (the default),
then default lucene sorting will be used which places docs without the
field first in an ascending sort and last in a descending sort.
-->
<!-- numeric field types that can be sorted, but are not optimized for range queries -->
<fieldType name="integer" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<!--
Note:
These should only be used for compatibility with existing indexes (created with older Solr versions)
or if "sortMissingFirst" or "sortMissingLast" functionality is needed. Use Trie based fields instead.
Numeric field types that manipulate the value into
a string value that isn't human-readable in its internal form,
but with a lexicographic ordering the same as the numeric ordering,
so that range queries work correctly.
-->
<fieldType name="sint" class="solr.TrieIntField" sortMissingLast="true"/>
<fieldType name="sfloat" class="solr.TrieFloatField" sortMissingLast="true"/>
<fieldType name="slong" class="solr.TrieLongField" sortMissingLast="true"/>
<fieldType name="sdouble" class="solr.TrieDoubleField" sortMissingLast="true"/>
<!--
Numeric field types that index each value at various levels of precision
to accelerate range queries when the number of values between the range
endpoints is large. See the javadoc for NumericRangeQuery for internal
implementation details.
Smaller precisionStep values (specified in bits) will lead to more tokens
indexed per value, slightly larger index size, and faster range queries.
A precisionStep of 0 disables indexing at different precision levels.
-->
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<!--
The ExternalFileField type gets values from an external file instead of the
index. This is useful for data such as rankings that might change frequently
and require different update frequencies than the documents they are
associated with.
-->
<fieldType name="file" keyField="id" defVal="1" stored="false" indexed="false" class="solr.ExternalFileField" valType="float"/>
<!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
is a more restricted form of the canonical representation of dateTime
http://www.w3.org/TR/xmlschema-2/#dateTime
The trailing "Z" designates UTC time and is mandatory.
Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
All other components are mandatory.
Expressions can also be used to denote calculations that should be
performed relative to "NOW" to determine the value, ie...
NOW/HOUR
... Round to the start of the current hour
NOW-1DAY
... Exactly 1 day prior to now
NOW/DAY+6MONTHS+3DAYS
... 6 months and 3 days in the future from the start of
the current day
Consult the TrieDateField javadocs for more information.
Note: For faster range queries, consider the tdate type
-->
<fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0" sortMissingLast="true" omitNorms="true"/>
<!-- A Trie based date field for faster date range queries and date faceting. -->
<fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0"/>
<!-- solr.TextField allows the specification of custom text analyzers
specified as a tokenizer and a list of token filters. Different
analyzers may be specified for indexing and querying.
The optional positionIncrementGap puts space between multiple fields of
this type on the same document, with the purpose of preventing false phrase
matching across fields.
For more info on customizing your analyzer chain, please see
http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
-->
<!-- One can also specify an existing Analyzer class that has a
default constructor via the class attribute on the analyzer element
<fieldType name="text_greek" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
</fieldType>
-->
<!-- A text field that only splits on whitespace for exact matching of words -->
<fieldType name="text_ws" class="solr.TextField" omitNorms="true" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
Synonyms and stopwords are customized by external files, and stemming is enabled.
Duplicate tokens at the same position (which may result from Stemmed Synonyms or
WordDelim parts) are removed.
-->
<fieldType name="text" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!-- in this example, we will only use synonyms at query time
<filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-->
<!-- Case insensitive stop word removal. -->
<filter class="solr.StopFilterFactory"
ignoreCase="true"
words="stopwords.txt"
/>
<filter class="solr.WordDelimiterFilterFactory"
protected="protwords.txt"
generateWordParts="1"
generateNumberParts="1"
catenateWords="1"
catenateNumbers="1"
catenateAll="0"
splitOnCaseChange="0"
preserveOriginal="1"/>
<filter class="solr.LengthFilterFactory" min="2" max="100" />
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
<analyzer type="query">
<charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory"
ignoreCase="true"
words="stopwords.txt"
/>
<filter class="solr.WordDelimiterFilterFactory"
protected="protwords.txt"
generateWordParts="1"
generateNumberParts="1"
catenateWords="0"
catenateNumbers="0"
catenateAll="0"
splitOnCaseChange="0"
preserveOriginal="1"/>
<filter class="solr.LengthFilterFactory" min="2" max="100" />
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
<analyzer type="multiterm">
<charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory"
ignoreCase="true"
words="stopwords.txt"
/>
<filter class="solr.WordDelimiterFilterFactory"
protected="protwords.txt"
generateWordParts="1"
generateNumberParts="1"
catenateWords="0"
catenateNumbers="0"
catenateAll="0"
splitOnCaseChange="1"
preserveOriginal="1"/>
<filter class="solr.LengthFilterFactory" min="2" max="100" />
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
<!-- An unstemmed text field - good if one does not know the language of the field -->
<fieldType name="text_und" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
<filter class="solr.WordDelimiterFilterFactory"
protected="protwords.txt"
generateWordParts="1"
generateNumberParts="1"
catenateWords="1"
catenateNumbers="1"
catenateAll="0"
splitOnCaseChange="0"/>
<filter class="solr.LengthFilterFactory" min="2" max="100" />
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory"
ignoreCase="true"
words="stopwords.txt"
/>
<filter class="solr.WordDelimiterFilterFactory"
protected="protwords.txt"
generateWordParts="1"
generateNumberParts="1"
catenateWords="0"
catenateNumbers="0"
catenateAll="0"
splitOnCaseChange="0"/>
<filter class="solr.LengthFilterFactory" min="2" max="100" />
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="multiterm">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory"
ignoreCase="true"
words="stopwords.txt"
/>
<filter class="solr.WordDelimiterFilterFactory"
protected="protwords.txt"
generateWordParts="1"
generateNumberParts="1"
catenateWords="0"
catenateNumbers="0"
catenateAll="0"
splitOnCaseChange="0"/>
<filter class="solr.LengthFilterFactory" min="2" max="100" />
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- Edge N gram type - for example for matching against queries with results
KeywordTokenizer leaves input string intact as a single term.
see: http://www.lucidimagination.com/blog/2009/09/08/auto-suggest-from-popular-queries-using-edgengrams/
-->
<fieldType name="edge_n2_kw_text" class="solr.TextField" omitNorms="true" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EdgeNGramFilterFactory" minGramSize="2" maxGramSize="25" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- Setup simple analysis for spell checking -->
<fieldType name="textSpell" class="solr.TextField" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory" />
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.LengthFilterFactory" min="4" max="20" />
<filter class="solr.LowerCaseFilterFactory" />
<filter class="solr.RemoveDuplicatesTokenFilterFactory" />
</analyzer>
</fieldType>
<!-- This is an example of using the KeywordTokenizer along
With various TokenFilterFactories to produce a sortable field
that does not include some properties of the source text
-->
<fieldType name="sortString" class="solr.TextField" sortMissingLast="true" omitNorms="true">
<analyzer>
<!-- KeywordTokenizer does no actual tokenizing, so the entire
input string is preserved as a single token
-->
<tokenizer class="solr.KeywordTokenizerFactory"/>
<!-- The LowerCase TokenFilter does what you expect, which can be
when you want your sorting to be case insensitive
-->
<filter class="solr.LowerCaseFilterFactory" />
<!-- The TrimFilter removes any leading or trailing whitespace -->
<filter class="solr.TrimFilterFactory" />
<!-- The PatternReplaceFilter gives you the flexibility to use
Java Regular expression to replace any sequence of characters
matching a pattern with an arbitrary replacement string,
which may include back refrences to portions of the orriginal
string matched by the pattern.
See the Java Regular Expression documentation for more
infomation on pattern and replacement string syntax.
http://java.sun.com/j2se/1.5.0/docs/api/java/util/regex/package-summary.html
<filter class="solr.PatternReplaceFilterFactory"
pattern="(^\p{Punct}+)" replacement="" replace="all"
/>
-->
</analyzer>
</fieldType>
<!-- The "RandomSortField" is not used to store or search any
data. You can declare fields of this type it in your schema
to generate pseudo-random orderings of your docs for sorting
or function purposes. The ordering is generated based on the field
name and the version of the index. As long as the index version
remains unchanged, and the same field name is reused,
the ordering of the docs will be consistent.
If you want different psuedo-random orderings of documents,
for the same version of the index, use a dynamicField and
change the field name in the request.
-->
<fieldType name="rand" class="solr.RandomSortField" indexed="true" />
<!-- Fulltext type for matching words based on how they sound i.e.,
"phonetic matching".
-->
<fieldType name="phonetic" class="solr.TextField" >
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.DoubleMetaphoneFilterFactory" inject="false"/>
</analyzer>
</fieldType>
<!-- since fields of this type are by default not stored or indexed,
any data added to them will be ignored outright. -->
<fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
<!-- This point type indexes the coordinates as separate fields (subFields)
If subFieldType is defined, it references a type, and a dynamic field
definition is created matching *___<typename>. Alternately, if
subFieldSuffix is defined, that is used to create the subFields.
Example: if subFieldType="double", then the coordinates would be
indexed in fields myloc_0___double,myloc_1___double.
Example: if subFieldSuffix="_d" then the coordinates would be indexed
in fields myloc_0_d,myloc_1_d
The subFields are an implementation detail of the fieldType, and end
users normally should not need to know about them.
-->
<fieldType name="point" class="solr.PointType" dimension="2" subFieldType="tdouble"/>
<!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
<fieldType name="location" class="solr.LatLonType" subFieldType="tdouble"/>
<!-- A Geohash is a compact representation of a latitude longitude pair in a single field.
See http://wiki.apache.org/solr/SpatialSearch
-->
<fieldtype name="geohash" class="solr.GeoHashField"/>
<!-- Improved location type which supports advanced functionality like
filtering by polygons or other shapes, indexing shapes, multi-valued
fields, etc.
-->
<fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType"
geo="true" distErrPct="0.025" maxDistErr="0.001" distanceUnits="kilometers" />
<!-- Spatial rectangle (bounding box) field. It supports most spatial predicates, and has
special relevancy modes: score=overlapRatio|area|area2D (local-param to the query). DocValues is recommended for
relevancy. -->
<fieldType name="bbox" class="solr.BBoxField"
geo="true" distanceUnits="kilometers" numberType="_bbox_coord" />
<fieldType name="_bbox_coord" class="solr.TrieDoubleField" precisionStep="8" docValues="true" stored="false"/>
</types>
<!-- Following is a dynamic way to include other types, added by other contrib modules -->
<xi:include href="schema_extra_types.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</xi:include>
<!-- Valid attributes for fields:
name: mandatory - the name for the field
type: mandatory - the name of a field type from the <types> fieldType
section
indexed: true if this field should be indexed (searchable or sortable)
stored: true if this field should be retrievable
docValues: true if this field should have doc values. Doc values are
useful for faceting, grouping, sorting and function queries. Although not
required, doc values will make the index faster to load, more
NRT-friendly and more memory-efficient. They however come with some
limitations: they are currently only supported by StrField, UUIDField
and all Trie*Fields, and depending on the field type, they might
require the field to be single-valued, be required or have a default
value (check the documentation of the field type you're interested in
for more information)
multiValued: true if this field may contain multiple values per document
omitNorms: (expert) set to true to omit the norms associated with
this field (this disables length normalization and index-time
boosting for the field, and saves some memory). Only full-text
fields or fields that need an index-time boost need norms.
Norms are omitted for primitive (non-analyzed) types by default.
termVectors: [false] set to true to store the term vector for a
given field.
When using MoreLikeThis, fields used for similarity should be
stored for best performance.
termPositions: Store position information with the term vector.
This will increase storage costs.
termOffsets: Store offset information with the term vector. This
will increase storage costs.
required: The field is required. It will throw an error if the
value does not exist
default: a value that should be used if no value is specified
when adding a document.
-->
<fields>
<!-- The document id is usually derived from a site-spcific key (hash) and the
entity type and ID like:
Search Api :
The format used is $document->id = $index_id . '-' . $item_id
Apache Solr Search Integration
The format used is $document->id = $site_hash . '/' . $entity_type . '/' . $entity->id;
-->
<field name="id" type="string" indexed="true" stored="true" required="true" />
<!-- Add Solr Cloud version field as mentioned in
http://wiki.apache.org/solr/SolrCloud#Required_Config
-->
<field name="_version_" type="long" indexed="true" stored="true" multiValued="false"/>
<!-- Search Api specific fields -->
<!-- item_id contains the entity ID, e.g. a node's nid. -->
<field name="item_id" type="string" indexed="true" stored="true" />
<!-- index_id is the machine name of the search index this entry belongs to. -->
<field name="index_id" type="string" indexed="true" stored="true" />
<!-- copyField commands copy one field to another at the time a document
is added to the index. It's used either to index the same field differently,
or to add multiple fields to the same field for easier/faster searching. -->
<!-- Since sorting by ID is explicitly allowed, store item_id also in a sortable way. -->
<copyField source="item_id" dest="sort_search_api_id" />
<!-- Apache Solr Search Integration specific fields -->
<!-- entity_id is the numeric object ID, e.g. Node ID, File ID -->
<field name="entity_id" type="long" indexed="true" stored="true" />
<!-- entity_type is 'node', 'file', 'user', or some other Drupal object type -->
<field name="entity_type" type="string" indexed="true" stored="true" />
<!-- bundle is a node type, or as appropriate for other entity types -->
<field name="bundle" type="string" indexed="true" stored="true"/>
<field name="bundle_name" type="string" indexed="true" stored="true"/>
<field name="site" type="string" indexed="true" stored="true"/>
<field name="hash" type="string" indexed="true" stored="true"/>
<field name="url" type="string" indexed="true" stored="true"/>
<!-- label is the default field for a human-readable string for this entity (e.g. the title of a node) -->
<field name="label" type="text" indexed="true" stored="true" termVectors="true" omitNorms="true"/>
<!-- The string version of the title is used for sorting -->
<copyField source="label" dest="sort_label"/>
<!-- content is the default field for full text search - dump crap here -->
<field name="content" type="text" indexed="true" stored="true" termVectors="true"/>
<field name="teaser" type="text" indexed="false" stored="true"/>
<field name="path" type="string" indexed="true" stored="true"/>
<field name="path_alias" type="text" indexed="true" stored="true" termVectors="true" omitNorms="true"/>
<!-- These are the fields that correspond to a Drupal node. The beauty of having
Lucene store title, body, type, etc., is that we retrieve them with the search
result set and don't need to go to the database with a node_load. -->
<field name="tid" type="long" indexed="true" stored="true" multiValued="true"/>
<field name="taxonomy_names" type="text" indexed="true" stored="false" termVectors="true" multiValued="true" omitNorms="true"/>
<!-- Copy terms to a single field that contains all taxonomy term names -->
<copyField source="tm_vid_*" dest="taxonomy_names"/>
<!-- Here, default is used to create a "timestamp" field indicating
when each document was indexed.-->
<field name="timestamp" type="tdate" indexed="true" stored="true" default="NOW" multiValued="false"/>
<!-- This field is used to build the spellchecker index -->
<field name="spell" type="textSpell" indexed="true" stored="true" multiValued="true"/>
<!-- copyField commands copy one field to another at the time a document
is added to the index. It's used either to index the same field differently,
or to add multiple fields to the same field for easier/faster searching. -->
<copyField source="label" dest="spell"/>
<copyField source="content" dest="spell"/>
<copyField source="ts_*" dest="spell"/>
<copyField source="tm_*" dest="spell"/>
<!-- Dynamic field definitions. If a field name is not found, dynamicFields
will be used if the name matches any of the patterns.
RESTRICTION: the glob-like pattern in the name attribute must have
a "*" only at the start or the end.
EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i)
Longer patterns will be matched first. if equal size patterns
both match, the first appearing in the schema will be used. -->
<!-- A set of fields to contain text extracted from HTML tag contents which we
can boost at query time. -->
<dynamicField name="tags_*" type="text" indexed="true" stored="false" omitNorms="true"/>
<!-- For 2 and 3 letter prefix dynamic fields, the 1st letter indicates the data type and
the last letter is 's' for single valued, 'm' for multi-valued -->
<!-- We use long for integer since 64 bit ints are now common in PHP. -->
<dynamicField name="is_*" type="long" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="im_*" type="long" indexed="true" stored="true" multiValued="true"/>
<!-- List of floats can be saved in a regular float field -->
<dynamicField name="fs_*" type="float" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="fm_*" type="float" indexed="true" stored="true" multiValued="true"/>
<!-- List of doubles can be saved in a regular double field -->
<dynamicField name="ps_*" type="double" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="pm_*" type="double" indexed="true" stored="true" multiValued="true"/>
<!-- List of booleans can be saved in a regular boolean field -->
<dynamicField name="bm_*" type="boolean" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="bs_*" type="boolean" indexed="true" stored="true" multiValued="false"/>
<!-- Regular text (without processing) can be stored in a string field-->
<dynamicField name="ss_*" type="string" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="sm_*" type="string" indexed="true" stored="true" multiValued="true"/>
<!-- Normal text fields are for full text - the relevance of a match depends on the length of the text -->
<dynamicField name="ts_*" type="text" indexed="true" stored="true" multiValued="false" termVectors="true"/>
<dynamicField name="tm_*" type="text" indexed="true" stored="true" multiValued="true" termVectors="true"/>
<!-- Unstemmed text fields for full text - the relevance of a match depends on the length of the text -->
<dynamicField name="tus_*" type="text_und" indexed="true" stored="true" multiValued="false" termVectors="true"/>
<dynamicField name="tum_*" type="text_und" indexed="true" stored="true" multiValued="true" termVectors="true"/>
<!-- These text fields omit norms - useful for extracted text like taxonomy_names -->
<dynamicField name="tos_*" type="text" indexed="true" stored="true" multiValued="false" termVectors="true" omitNorms="true"/>
<dynamicField name="tom_*" type="text" indexed="true" stored="true" multiValued="true" termVectors="true" omitNorms="true"/>
<!-- Special-purpose text fields -->
<dynamicField name="tes_*" type="edge_n2_kw_text" indexed="true" stored="true" multiValued="false" omitTermFreqAndPositions="true" />
<dynamicField name="tem_*" type="edge_n2_kw_text" indexed="true" stored="true" multiValued="true" omitTermFreqAndPositions="true" />
<dynamicField name="tws_*" type="text_ws" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="twm_*" type="text_ws" indexed="true" stored="true" multiValued="true"/>
<!-- trie dates are preferred, so give them the 2 letter prefix -->
<dynamicField name="ds_*" type="tdate" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="dm_*" type="tdate" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="its_*" type="tlong" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="itm_*" type="tlong" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="fts_*" type="tfloat" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="ftm_*" type="tfloat" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="pts_*" type="tdouble" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="ptm_*" type="tdouble" indexed="true" stored="true" multiValued="true"/>
<!-- Binary fields can be populated using base64 encoded data. Useful e.g. for embedding
a small image in a search result using the data URI scheme -->
<dynamicField name="xs_*" type="binary" indexed="false" stored="true" multiValued="false"/>
<dynamicField name="xm_*" type="binary" indexed="false" stored="true" multiValued="true"/>
<!-- In rare cases a date rather than tdate is needed for sortMissingLast -->
<dynamicField name="dds_*" type="date" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="ddm_*" type="date" indexed="true" stored="true" multiValued="true"/>
<!-- Sortable fields, good for sortMissingLast support &
We use long for integer since 64 bit ints are now common in PHP. -->
<dynamicField name="iss_*" type="slong" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="ism_*" type="slong" indexed="true" stored="true" multiValued="true"/>
<!-- In rare cases a sfloat rather than tfloat is needed for sortMissingLast -->
<dynamicField name="fss_*" type="sfloat" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="fsm_*" type="sfloat" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="pss_*" type="sdouble" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="psm_*" type="sdouble" indexed="true" stored="true" multiValued="true"/>
<!-- In case a 32 bit int is really needed, we provide these fields. 'h' is mnemonic for 'half word', i.e. 32 bit on 64 arch -->
<dynamicField name="hs_*" type="integer" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="hm_*" type="integer" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="hss_*" type="sint" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="hsm_*" type="sint" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="hts_*" type="tint" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="htm_*" type="tint" indexed="true" stored="true" multiValued="true"/>
<!-- Unindexed string fields that can be used to store values that won't be searchable -->
<dynamicField name="zs_*" type="string" indexed="false" stored="true" multiValued="false"/>
<dynamicField name="zm_*" type="string" indexed="false" stored="true" multiValued="true"/>
<!-- Fields for location searches.
http://wiki.apache.org/solr/SpatialSearch#geodist_-_The_distance_function -->
<dynamicField name="points_*" type="point" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="pointm_*" type="point" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="locs_*" type="location" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="locm_*" type="location" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="geos_*" type="geohash" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="geom_*" type="geohash" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="bboxs_*" type="bbox" indexed="true" stored="true" multiValued="false" />
<dynamicField name="bboxm_*" type="bbox" indexed="true" stored="true" multiValued="true" />
<dynamicField name="rpts_*" type="location_rpt" indexed="true" stored="true" multiValued="false" />
<dynamicField name="rptm_*" type="location_rpt" indexed="true" stored="true" multiValued="true" />
<!-- Special fields for Solr 5 functionality. -->
<dynamicField name="phons_*" type="phonetic" indexed="true" stored="true" multiValued="false" />
<dynamicField name="phonm_*" type="phonetic" indexed="true" stored="true" multiValued="true" />
<!-- External file fields -->
<dynamicField name="eff_*" type="file"/>
<!-- Sortable version of the dynamic string field -->
<dynamicField name="sort_*" type="sortString" indexed="true" stored="false"/>
<copyField source="ss_*" dest="sort_*"/>
<!-- A random sort field -->
<dynamicField name="random_*" type="rand" indexed="true" stored="true"/>
<!-- This field is used to store access information (e.g. node access grants), as opposed to field data -->
<dynamicField name="access_*" type="integer" indexed="true" stored="false" multiValued="true"/>
<!-- The following causes solr to ignore any fields that don't already match an existing
field name or dynamic field, rather than reporting them as an error.
Alternately, change the type="ignored" to some other type e.g. "text" if you want
unknown fields indexed and/or stored by default -->
<dynamicField name="*" type="ignored" multiValued="true" />
</fields>
<!-- Following is a dynamic way to include other fields, added by other contrib modules -->
<xi:include href="schema_extra_fields.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</xi:include>
<!-- Field to use to determine and enforce document uniqueness.
Unless this field is marked with required="false", it will be a required field
-->
<uniqueKey>id</uniqueKey>
<!-- Similarity is the scoring routine for each document vs. a query.
A custom Similarity or SimilarityFactory may be specified here, but
the default is fine for most applications.
For more info: http://wiki.apache.org/solr/SchemaXml#Similarity
-->
<!--
<similarity class="com.example.solr.CustomSimilarityFactory">
<str name="paramkey">param value</str>
</similarity>
-->
<!-- DEPRECATED: The defaultSearchField is consulted by various query parsers
when parsing a query string that isn't explicit about the field. Machine
(non-user) generated queries are best made explicit, or they can use the
"df" request parameter which takes precedence over this.
Note: Un-commenting defaultSearchField will be insufficient if your request
handler in solrconfig.xml defines "df", which takes precedence. That would
need to be removed.
<defaultSearchField>content</defaultSearchField> -->
<!-- DEPRECATED: The defaultOperator (AND|OR) is consulted by various query
parsers when parsing a query string to determine if a clause of the query
should be marked as required or optional, assuming the clause isn't already
marked by some operator. The default is OR, which is generally assumed so it
is not a good idea to change it globally here. The "q.op" request parameter
takes precedence over this.
<solrQueryParser defaultOperator="OR"/> -->
</schema>

View File

@@ -0,0 +1,23 @@
<fields>
<!--
Example: Adding German dynamic field types to our Solr Schema.
If you enable this, make sure you have a folder called lang containing
stopwords_de.txt and synonyms_de.txt.
This also requires to enable the content in schema_extra_types.xml.
-->
<!--
<field name="label_de" type="text_de" indexed="true" stored="true" termVectors="true" omitNorms="true"/>
<field name="content_de" type="text_de" indexed="true" stored="true" termVectors="true"/>
<field name="teaser_de" type="text_de" indexed="false" stored="true"/>
<field name="path_alias_de" type="text_de" indexed="true" stored="true" termVectors="true" omitNorms="true"/>
<field name="taxonomy_names_de" type="text_de" indexed="true" stored="false" termVectors="true" multiValued="true" omitNorms="true"/>
<field name="spell_de" type="text_de" indexed="true" stored="true" multiValued="true"/>
<copyField source="label_de" dest="spell_de"/>
<copyField source="content_de" dest="spell_de"/>
<dynamicField name="tags_de_*" type="text_de" indexed="true" stored="false" omitNorms="true"/>
<dynamicField name="ts_de_*" type="text_de" indexed="true" stored="true" multiValued="false" termVectors="true"/>
<dynamicField name="tm_de_*" type="text_de" indexed="true" stored="true" multiValued="true" termVectors="true"/>
<dynamicField name="tos_de_*" type="text_de" indexed="true" stored="true" multiValued="false" termVectors="true" omitNorms="true"/>
<dynamicField name="tom_de_*" type="text_de" indexed="true" stored="true" multiValued="true" termVectors="true" omitNorms="true"/>
-->
</fields>

View File

@@ -0,0 +1,34 @@
<types>
<!--
Example: Adding German language field types to our Solr Schema.
If you enable this, make sure you have a folder called lang containing
stopwords_de.txt and synonyms_de.txt.
For examples from other languages, see
./server/solr/configsets/sample_techproducts_configs/conf/schema.xml
from your Solr installation.
-->
<!--
<fieldType name="text_de" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" words="lang/stopwords_de.txt" format="snowball" ignoreCase="true"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" splitOnCaseChange="1" splitOnNumerics="1" catenateWords="1" catenateNumbers="1" catenateAll="0" protected="protwords.txt" preserveOriginal="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.GermanLightStemFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
<analyzer type="query">
<charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="lang/synonyms_de.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory" words="lang/stopwords_de.txt" format="snowball" ignoreCase="true"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" splitOnCaseChange="1" splitOnNumerics="1" catenateWords="0" catenateNumbers="0" catenateAll="0" protected="protwords.txt" preserveOriginal="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.GermanLightStemFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
-->
</types>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,80 @@
<!-- Spell Check
The spell check component can return a list of alternative spelling
suggestions.
http://wiki.apache.org/solr/SpellCheckComponent
-->
<searchComponent name="spellcheck" class="solr.SpellCheckComponent">
<str name="queryAnalyzerFieldType">textSpell</str>
<!-- Multiple "Spell Checkers" can be declared and used by this
component
-->
<!-- a spellchecker built from a field of the main index, and
written to disk
-->
<lst name="spellchecker">
<str name="name">default</str>
<str name="field">spell</str>
<str name="spellcheckIndexDir">spellchecker</str>
<str name="buildOnOptimize">true</str>
<!-- uncomment this to require terms to occur in 1% of the documents in order to be included in the dictionary
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>
<!--
Adding German spellhecker index to our Solr index
This also requires to enable the content in schema_extra_types.xml and schema_extra_fields.xml
-->
<!--
<lst name="spellchecker">
<str name="name">spellchecker_de</str>
<str name="field">spell_de</str>
<str name="spellcheckIndexDir">./spellchecker_de</str>
<str name="buildOnOptimize">true</str>
</lst>
-->
<!-- a spellchecker that uses a different distance measure -->
<!--
<lst name="spellchecker">
<str name="name">jarowinkler</str>
<str name="field">spell</str>
<str name="distanceMeasure">
org.apache.lucene.search.spell.JaroWinklerDistance
</str>
<str name="spellcheckIndexDir">spellcheckerJaro</str>
</lst>
-->
<!-- a spellchecker that use an alternate comparator
comparatorClass be one of:
1. score (default)
2. freq (Frequency first, then score)
3. A fully qualified class name
-->
<!--
<lst name="spellchecker">
<str name="name">freq</str>
<str name="field">lowerfilt</str>
<str name="spellcheckIndexDir">spellcheckerFreq</str>
<str name="comparatorClass">freq</str>
<str name="buildOnCommit">true</str>
-->
<!-- A spellchecker that reads the list of words from a file -->
<!--
<lst name="spellchecker">
<str name="classname">solr.FileBasedSpellChecker</str>
<str name="name">file</str>
<str name="sourceLocation">spellings.txt</str>
<str name="characterEncoding">UTF-8</str>
<str name="spellcheckIndexDir">spellcheckerFile</str>
</lst>
-->
</searchComponent>

View File

@@ -0,0 +1,20 @@
# Defines Solr properties for this specific core.
solr.replication.master=false
solr.replication.slave=false
solr.replication.pollInterval=00:00:60
solr.replication.masterUrl=http://localhost:8983/solr
solr.replication.confFiles=schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml
solr.mlt.timeAllowed=2000
# You should not set your luceneMatchVersion to anything lower than your Solr
# Version.
solr.luceneMatchVersion=6.0
solr.pinkPony.timeAllowed=-1
# autoCommit after 10000 docs
solr.autoCommit.MaxDocs=10000
# autoCommit after 2 minutes
solr.autoCommit.MaxTime=120000
# autoSoftCommit after 2000 docs
solr.autoSoftCommit.MaxDocs=2000
# autoSoftCommit after 10 seconds
solr.autoSoftCommit.MaxTime=10000
solr.contrib.dir=../../../contrib

View File

@@ -0,0 +1,4 @@
# Contains words which shouldn't be indexed for fulltext fields, e.g., because
# they're too common. For documentation of the format, see
# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.StopFilterFactory
# (Lines starting with a pound character # are ignored.)

View File

@@ -0,0 +1,3 @@
# Contains synonyms to use for your index. For the format used, see
# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.SynonymFilterFactory
# (Lines starting with a pound character # are ignored.)