remove elasticsearch binary

remotes/origin/HEAD
Max Yakovenko 8 years ago
parent f0c223ccae
commit a3fbcbfece
  1. 202
      elasticsearch-1.7.6/LICENSE.txt
  2. 5
      elasticsearch-1.7.6/NOTICE.txt
  3. 235
      elasticsearch-1.7.6/README.textile
  4. 251
      elasticsearch-1.7.6/bin/elasticsearch
  5. 0
      elasticsearch-1.7.6/bin/elasticsearch-service-mgr.exe
  6. 0
      elasticsearch-1.7.6/bin/elasticsearch-service-x64.exe
  7. 0
      elasticsearch-1.7.6/bin/elasticsearch-service-x86.exe
  8. 48
      elasticsearch-1.7.6/bin/elasticsearch.bat
  9. 83
      elasticsearch-1.7.6/bin/elasticsearch.in.bat
  10. 68
      elasticsearch-1.7.6/bin/elasticsearch.in.sh
  11. 108
      elasticsearch-1.7.6/bin/plugin
  12. 25
      elasticsearch-1.7.6/bin/plugin.bat
  13. 204
      elasticsearch-1.7.6/bin/service.bat
  14. 385
      elasticsearch-1.7.6/config/elasticsearch.yml
  15. 68
      elasticsearch-1.7.6/config/logging.yml
  16. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/_state/global-14.st
  17. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/_state/state-14.st
  18. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_1.cfe
  19. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_1.cfs
  20. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_1.si
  21. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_1z.cfe
  22. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_1z.cfs
  23. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_1z.si
  24. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_20.cfe
  25. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_20.cfs
  26. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/_20.si
  27. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/segments.gen
  28. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/segments_20
  29. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/index/write.lock
  30. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/0/translog/translog-1495799468774
  31. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/_state/state-14.st
  32. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1.cfe
  33. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1.cfs
  34. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1.si
  35. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1s.cfe
  36. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1s.cfs
  37. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1s.si
  38. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1t.cfe
  39. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1t.cfs
  40. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_1t.si
  41. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_2.cfe
  42. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_2.cfs
  43. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/_2.si
  44. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/segments.gen
  45. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/segments_1n
  46. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/index/write.lock
  47. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/1/translog/translog-1495799468843
  48. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/_state/state-14.st
  49. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_1.cfe
  50. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_1.cfs
  51. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_1.si
  52. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_2.cfe
  53. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_2.cfs
  54. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_2.si
  55. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_q.cfe
  56. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_q.cfs
  57. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/_q.si
  58. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/segments.gen
  59. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/segments_19
  60. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/index/write.lock
  61. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/2/translog/translog-1495799468862
  62. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/_state/state-14.st
  63. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/index/_1.cfe
  64. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/index/_1.cfs
  65. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/index/_1.si
  66. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/index/segments.gen
  67. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/index/segments_j
  68. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/index/write.lock
  69. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/3/translog/translog-1495799468778
  70. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/_state/state-14.st
  71. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/index/_1.cfe
  72. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/index/_1.cfs
  73. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/index/_1.si
  74. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/index/segments.gen
  75. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/index/segments_n
  76. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/index/write.lock
  77. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/4/translog/translog-1495799468906
  78. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/indices/haystack/_state/state-2.st
  79. 0
      elasticsearch-1.7.6/data/elasticsearch/nodes/0/node.lock
  80. 19
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-05-26
  81. 4
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-06-17
  82. 12
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-07-07
  83. 1
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-07-19
  84. 4
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-07-28
  85. 32
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-15
  86. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-16
  87. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-17
  88. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-22
  89. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-23
  90. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-29
  91. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-30
  92. 12
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-08-31
  93. 4
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-09-01
  94. 12
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-09-02
  95. 4
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-09-06
  96. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-09-07
  97. 16
      elasticsearch-1.7.6/logs/elasticsearch.log.2017-09-14

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,5 +0,0 @@
Elasticsearch
Copyright 2009-2015 Elasticsearch
This product includes software developed by The Apache Software
Foundation (http://www.apache.org/).

@ -1,235 +0,0 @@
h1. Elasticsearch
h2. A Distributed RESTful Search Engine
h3. "https://www.elastic.co/products/elasticsearch":https://www.elastic.co/products/elasticsearch
Elasticsearch is a distributed RESTful search engine built for the cloud. Features include:
* Distributed and Highly Available Search Engine.
** Each index is fully sharded with a configurable number of shards.
** Each shard can have one or more replicas.
** Read / Search operations performed on either one of the replica shard.
* Multi Tenant with Multi Types.
** Support for more than one index.
** Support for more than one type per index.
** Index level configuration (number of shards, index storage, ...).
* Various set of APIs
** HTTP RESTful API
** Native Java API.
** All APIs perform automatic node operation rerouting.
* Document oriented
** No need for upfront schema definition.
** Schema can be defined per type for customization of the indexing process.
* Reliable, Asynchronous Write Behind for long term persistency.
* (Near) Real Time Search.
* Built on top of Lucene
** Each shard is a fully functional Lucene index
** All the power of Lucene easily exposed through simple configuration / plugins.
* Per operation consistency
** Single document level operations are atomic, consistent, isolated and durable.
* Open Source under the Apache License, version 2 ("ALv2")
h2. Getting Started
First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
h3. Requirements
You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information.
h3. Installation
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
* Run @curl -X GET http://localhost:9200/@.
* Start more servers ...
h3. Indexing
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
<pre>
curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
{
"user": "kimchy",
"postDate": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
}'
curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
{
"user": "kimchy",
"postDate": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
}'
</pre>
Now, let's see if the information was added by GETting it:
<pre>
curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true'
curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true'
curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true'
</pre>
h3. Searching
Mmm search..., shouldn't it be elastic?
Let's find all the tweets that @kimchy@ posted:
<pre>
curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
</pre>
We can also use the JSON query language Elasticsearch provides instead of a query string:
<pre>
curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
{
"query" : {
"match" : { "user": "kimchy" }
}
}'
</pre>
Just for kicks, let's get all the documents stored (we should see the user as well):
<pre>
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
{
"query" : {
"matchAll" : {}
}
}'
</pre>
We can also do range search (the @postDate@ was automatically identified as date)
<pre>
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
{
"query" : {
"range" : {
"postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
}
}
}'
</pre>
There are many more options to perform search, after all, it's a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser.
h3. Multi Tenant - Indices and Types
Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data.
Elasticsearch supports multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@.
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
<pre>
curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
{
"user": "kimchy",
"postDate": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
}'
curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
{
"user": "kimchy",
"postDate": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
}'
</pre>
The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
<pre>
curl -XPUT http://localhost:9200/another_user/ -d '
{
"index" : {
"numberOfShards" : 1,
"numberOfReplicas" : 1
}
}'
</pre>
Search (and similar operations) are multi index aware. This means that we can easily search on more than one
index (twitter user), for example:
<pre>
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
{
"query" : {
"matchAll" : {}
}
}'
</pre>
Or on all the indices:
<pre>
curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
{
"query" : {
"matchAll" : {}
}
}'
</pre>
{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from friends of my friends).
h3. Distributed, Highly Available
Let's face it, things will fail....
Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards).
In order to play with the distributed nature of Elasticsearch, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed.
h3. Where to go from here?
We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website.
h3. Building from Source
Elasticsearch uses "Maven":http://maven.apache.org for its build system.
In order to create a distribution, simply run the @mvn clean package
-DskipTests@ command in the cloned directory.
The distribution will be created under @target/releases@.
See the "TESTING":TESTING.asciidoc file for more information about
running the Elasticsearch test suite.
h3. Upgrading to Elasticsearch 1.x?
In order to ensure a smooth upgrade process from earlier versions of Elasticsearch (< 1.0.0), it is recommended to perform a full cluster restart. Please see the "setup reference":https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process.
h1. License
<pre>
This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
Copyright 2009-2015 Elasticsearch <https://www.elastic.co>
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
</pre>

@ -1,251 +0,0 @@
#!/bin/sh
# OPTIONS:
# -d daemonize (run in background)
# -p pidfile write PID to <pidfile>
# -h
# --help print command line options
# -v print elasticsearch version, then exit
# -D prop set JAVA system property
# -X prop set non-standard JAVA system property
# --prop=val
# --prop val set elasticsearch property (i.e. -Des.<prop>=<val>)
# CONTROLLING STARTUP:
#
# This script relies on few environment variables to determine startup
# behavior, those variables are:
#
# ES_CLASSPATH -- A Java classpath containing everything necessary to run.
# JAVA_OPTS -- Additional arguments to the JVM for heap size, etc
# ES_JAVA_OPTS -- External Java Opts on top of the defaults set
#
#
# Optionally, exact memory values can be set using the following values, note,
# they can still be set using the `ES_JAVA_OPTS`. Sample format include "512m", and "10g".
#
# ES_HEAP_SIZE -- Sets both the minimum and maximum memory to allocate (recommended)
#
# As a convenience, a fragment of shell is sourced in order to set one or
# more of these variables. This so-called `include' can be placed in a
# number of locations and will be searched for in order. The lowest
# priority search path is the same directory as the startup script, and
# since this is the location of the sample in the project tree, it should
# almost work Out Of The Box.
#
# Any serious use-case though will likely require customization of the
# include. For production installations, it is recommended that you copy
# the sample to one of /usr/share/elasticsearch/elasticsearch.in.sh,
# /usr/local/share/elasticsearch/elasticsearch.in.sh, or
# /opt/elasticsearch/elasticsearch.in.sh and make your modifications there.
#
# Another option is to specify the full path to the include file in the
# environment. For example:
#
# $ ES_INCLUDE=/path/to/in.sh elasticsearch -p /var/run/es.pid
#
# Note: This is particularly handy for running multiple instances on a
# single installation, or for quick tests.
#
# If you would rather configure startup entirely from the environment, you
# can disable the include by exporting an empty ES_INCLUDE, or by
# ensuring that no include files exist in the aforementioned search list.
# Be aware that you will be entirely responsible for populating the needed
# environment variables.
# Maven will replace the project.name with elasticsearch below. If that
# hasn't been done, we assume that this is not a packaged version and the
# user has forgotten to run Maven to create a package.
IS_PACKAGED_VERSION='elasticsearch'
if [ "$IS_PACKAGED_VERSION" != "elasticsearch" ]; then
cat >&2 << EOF
Error: You must build the project with Maven or download a pre-built package
before you can run Elasticsearch. See 'Building from Source' in README.textile
or visit http://www.elasticsearch.org/download to get a pre-built package.
EOF
exit 1
fi
CDPATH=""
SCRIPT="$0"
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
$ES_HOME/bin/elasticsearch.in.sh \
"`dirname "$0"`"/elasticsearch.in.sh; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
# Special-case path variables.
case `uname` in
CYGWIN*)
ES_CLASSPATH=`cygpath -p -w "$ES_CLASSPATH"`
ES_HOME=`cygpath -p -w "$ES_HOME"`
;;
esac
launch_service()
{
pidpath=$1
daemonized=$2
props=$3
es_parms="-Delasticsearch"
if [ "x$pidpath" != "x" ]; then
es_parms="$es_parms -Des.pidfile=$pidpath"
fi
# Make sure we dont use any predefined locale, as we check some exception message strings and rely on english language
# As those strings are created by the OS, they are dependant on the configured locale
LANG=en_US.UTF-8
LC_ALL=en_US.UTF-8
export HOSTNAME=`hostname -s`
# The es-foreground option will tell Elasticsearch not to close stdout/stderr, but it's up to us not to daemonize.
if [ "x$daemonized" = "x" ]; then
es_parms="$es_parms -Des.foreground=yes"
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
org.elasticsearch.bootstrap.Elasticsearch
# exec without running it in the background, makes it replace this shell, we'll never get here...
# no need to return something
else
# Startup Elasticsearch, background it, and write the pid.
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
org.elasticsearch.bootstrap.Elasticsearch <&- &
return $?
fi
}
# Print command line usage / help
usage() {
echo "Usage: $0 [-vdh] [-p pidfile] [-D prop] [-X prop]"
echo "Start elasticsearch."
echo " -d daemonize (run in background)"
echo " -p pidfile write PID to <pidfile>"
echo " -h"
echo " --help print command line options"
echo " -v print elasticsearch version, then exit"
echo " -D prop set JAVA system property"
echo " -X prop set non-standard JAVA system property"
echo " --prop=val"
echo " --prop val set elasticsearch property (i.e. -Des.<prop>=<val>)"
}
# Parse any long getopt options and put them into properties before calling getopt below
# Be dash compatible to make sure running under ubuntu works
ARGV=""
while [ $# -gt 0 ]
do
case $1 in
--help) ARGV="$ARGV -h"; shift;;
--*=*) properties="$properties -Des.${1#--}"
shift 1
;;
--*) [ $# -le 1 ] && {
echo "Option requires an argument: '$1'."
shift
continue
}
properties="$properties -Des.${1#--}=$2"
shift 2
;;
*) ARGV="$ARGV $1" ; shift
esac
done
# Parse any command line options.
args=`getopt vdhp:D:X: $ARGV`
eval set -- "$args"
while true; do
case $1 in
-v)
"$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
org.elasticsearch.Version
exit 0
;;
-p)
pidfile="$2"
shift 2
;;
-d)
daemonized="yes"
shift
;;
-h)
usage
exit 0
;;
-D)
properties="$properties -D$2"
shift 2
;;
-X)
properties="$properties -X$2"
shift 2
;;
--)
shift
break
;;
*)
echo "Error parsing argument $1!" >&2
usage
exit 1
;;
esac
done
# Start up the service
launch_service "$pidfile" "$daemonized" "$properties"
exit $?

@ -1,48 +0,0 @@
@echo off
SETLOCAL enabledelayedexpansion
TITLE Elasticsearch 1.7.6
SET params='%*'
:loop
FOR /F "usebackq tokens=1* delims= " %%A IN (!params!) DO (
SET current=%%A
SET params='%%B'
SET silent=N
IF "!current!" == "-s" (
SET silent=Y
)
IF "!current!" == "--silent" (
SET silent=Y
)
IF "!silent!" == "Y" (
SET nopauseonerror=Y
) ELSE (
IF "x!newparams!" NEQ "x" (
SET newparams=!newparams! !current!
) ELSE (
SET newparams=!current!
)
)
IF "x!params!" NEQ "x" (
GOTO loop
)
)
SET HOSTNAME=%COMPUTERNAME%
CALL "%~dp0elasticsearch.in.bat"
IF ERRORLEVEL 1 (
IF NOT DEFINED nopauseonerror (
PAUSE
)
EXIT /B %ERRORLEVEL%
)
"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% %ES_PARAMS% !newparams! -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch"
ENDLOCAL

@ -1,83 +0,0 @@
@echo off
if DEFINED JAVA_HOME goto cont
:err
ECHO JAVA_HOME environment variable must be set! 1>&2
EXIT /B 1
:cont
set SCRIPT_DIR=%~dp0
for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
REM ***** JAVA options *****
if "%ES_MIN_MEM%" == "" (
set ES_MIN_MEM=256m
)
if "%ES_MAX_MEM%" == "" (
set ES_MAX_MEM=1g
)
if NOT "%ES_HEAP_SIZE%" == "" (
set ES_MIN_MEM=%ES_HEAP_SIZE%
set ES_MAX_MEM=%ES_HEAP_SIZE%
)
REM min and max heap sizes should be set to the same value to avoid
REM stop-the-world GC pauses during resize, and so that we can lock the
REM heap in memory on startup to prevent any of it from being swapped
REM out.
set JAVA_OPTS=%JAVA_OPTS% -Xms%ES_MIN_MEM% -Xmx%ES_MAX_MEM%
REM new generation
if NOT "%ES_HEAP_NEWSIZE%" == "" (
set JAVA_OPTS=%JAVA_OPTS% -Xmn%ES_HEAP_NEWSIZE%
)
REM max direct memory
if NOT "%ES_DIRECT_SIZE%" == "" (
set JAVA_OPTS=%JAVA_OPTS% -XX:MaxDirectMemorySize=%ES_DIRECT_SIZE%
)
REM set to headless, just in case
set JAVA_OPTS=%JAVA_OPTS% -Djava.awt.headless=true
REM Force the JVM to use IPv4 stack
if NOT "%ES_USE_IPV4%" == "" (
set JAVA_OPTS=%JAVA_OPTS% -Djava.net.preferIPv4Stack=true
)
set JAVA_OPTS=%JAVA_OPTS% -XX:+UseParNewGC
set JAVA_OPTS=%JAVA_OPTS% -XX:+UseConcMarkSweepGC
set JAVA_OPTS=%JAVA_OPTS% -XX:CMSInitiatingOccupancyFraction=75
set JAVA_OPTS=%JAVA_OPTS% -XX:+UseCMSInitiatingOccupancyOnly
REM When running under Java 7
REM JAVA_OPTS=%JAVA_OPTS% -XX:+UseCondCardMark
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDetails
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCTimeStamps
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDateStamps
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintClassHistogram
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintTenuringDistribution
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCApplicationStoppedTime
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -Xloggc:%ES_HOME%/logs/gc.log
REM Causes the JVM to dump its heap on OutOfMemory.
set JAVA_OPTS=%JAVA_OPTS% -XX:+HeapDumpOnOutOfMemoryError
REM The path to the heap dump location, note directory must exists and have enough
REM space for a full heap dump.
REM JAVA_OPTS=%JAVA_OPTS% -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof
REM Disables explicit GC
set JAVA_OPTS=%JAVA_OPTS% -XX:+DisableExplicitGC
REM Ensure UTF-8 encoding by default (e.g. filenames)
set JAVA_OPTS=%JAVA_OPTS% -Dfile.encoding=UTF-8
set ES_CLASSPATH=%ES_CLASSPATH%;%ES_HOME%/lib/elasticsearch-1.7.6.jar;%ES_HOME%/lib/*;%ES_HOME%/lib/sigar/*
set ES_PARAMS=-Delasticsearch -Des-foreground=yes -Des.path.home="%ES_HOME%"

@ -1,68 +0,0 @@
#!/bin/sh
ES_CLASSPATH=$ES_CLASSPATH:$ES_HOME/lib/elasticsearch-1.7.6.jar:$ES_HOME/lib/*:$ES_HOME/lib/sigar/*
if [ "x$ES_MIN_MEM" = "x" ]; then
ES_MIN_MEM=256m
fi
if [ "x$ES_MAX_MEM" = "x" ]; then
ES_MAX_MEM=1g
fi
if [ "x$ES_HEAP_SIZE" != "x" ]; then
ES_MIN_MEM=$ES_HEAP_SIZE
ES_MAX_MEM=$ES_HEAP_SIZE
fi
# min and max heap sizes should be set to the same value to avoid
# stop-the-world GC pauses during resize, and so that we can lock the
# heap in memory on startup to prevent any of it from being swapped
# out.
JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}"
JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}"
# new generation
if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}"
fi
# max direct memory
if [ "x$ES_DIRECT_SIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}"
fi
# set to headless, just in case
JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true"
# Force the JVM to use IPv4 stack
if [ "x$ES_USE_IPV4" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true"
fi
JAVA_OPTS="$JAVA_OPTS -XX:+UseParNewGC"
JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC"
JAVA_OPTS="$JAVA_OPTS -XX:CMSInitiatingOccupancyFraction=75"
JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
# GC logging options
if [ "x$ES_USE_GC_LOGGING" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDateStamps"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime"
JAVA_OPTS="$JAVA_OPTS -Xloggc:/var/log/elasticsearch/gc.log"
fi
# Causes the JVM to dump its heap on OutOfMemory.
JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError"
# The path to the heap dump location, note directory must exists and have enough
# space for a full heap dump.
#JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof"
# Disables explicit GC
JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC"
# Ensure UTF-8 encoding by default (e.g. filenames)
JAVA_OPTS="$JAVA_OPTS -Dfile.encoding=UTF-8"

@ -1,108 +0,0 @@
#!/bin/sh
CDPATH=""
SCRIPT="$0"
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# Sets the default values for elasticsearch variables used in this script
if [ -z "$CONF_DIR" ]; then
CONF_DIR="$ES_HOME/config"
if [ -z "$CONF_FILE" ]; then
CONF_FILE="$CONF_DIR/elasticsearch.yml"
fi
fi
if [ -z "$CONF_FILE" ]; then
CONF_FILE="$ES_HOME/config/elasticsearch.yml"
fi
# The default env file is defined at building/packaging time.
# For a tar.gz package, the value is "".
ES_ENV_FILE=""
# If an include is specified with the ES_INCLUDE environment variable, use it
if [ -n "$ES_INCLUDE" ]; then
ES_ENV_FILE="$ES_INCLUDE"
fi
# Source the environment file
if [ -n "$ES_ENV_FILE" ]; then
# If the ES_ENV_FILE is not found, try to resolve the path
# against the ES_HOME directory
if [ ! -f "$ES_ENV_FILE" ]; then
ES_ENV_FILE="$ELASTIC_HOME/$ES_ENV_FILE"
fi
. "$ES_ENV_FILE"
if [ $? -ne 0 ]; then
echo "Unable to source environment file: $ES_ENV_FILE" >&2
exit 1
fi
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA=$JAVA_HOME/bin/java
else
JAVA=`which java`
fi
# real getopt cannot be used because we need to hand options over to the PluginManager
while [ $# -gt 0 ]; do
case $1 in
-D*=*)
properties="$properties \"$1\""
;;
-D*)
var=$1
shift
properties="$properties \"$var\"=\"$1\""
;;
*)
args="$args \"$1\""
esac
shift
done
# check if properties already has a config file or config dir
if [ -e "$CONF_DIR" ]; then
case "$properties" in
*-Des.default.path.conf=*|*-Des.path.conf=*)
;;
*)
properties="$properties -Des.default.path.conf=\"$CONF_DIR\""
;;
esac
fi
if [ -e "$CONF_FILE" ]; then
case "$properties" in
*-Des.default.config=*|*-Des.config=*)
;;
*)
properties="$properties -Des.default.config=\"$CONF_FILE\""
;;
esac
fi
export HOSTNAME=`hostname -s`
eval "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home=\""$ES_HOME"\" $properties -cp \""$ES_HOME/lib/*"\" org.elasticsearch.plugins.PluginManager $args

@ -1,25 +0,0 @@
@echo off
SETLOCAL
if NOT DEFINED JAVA_HOME goto err
set SCRIPT_DIR=%~dp0
for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
TITLE Elasticsearch Plugin Manager 1.7.6
SET HOSTNAME=%COMPUTERNAME%
"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% -Xmx64m -Xms16m -Des.path.home="%ES_HOME%" -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginManager" %*
goto finally
:err
echo JAVA_HOME environment variable must be set!
pause
:finally
ENDLOCAL

@ -1,204 +0,0 @@
@echo off
SETLOCAL
TITLE Elasticsearch Service 1.7.6
if NOT DEFINED JAVA_HOME goto err
set SCRIPT_DIR=%~dp0
for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
rem Detect JVM version to figure out appropriate executable to use
if not exist "%JAVA_HOME%\bin\java.exe" (
echo JAVA_HOME points to an invalid Java installation (no java.exe found in "%JAVA_HOME%"^). Exiting...
goto:eof
)
"%JAVA_HOME%\bin\java" -version 2>&1 | "%windir%\System32\find" "64-Bit" >nul:
if errorlevel 1 goto x86
set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x64.exe
set SERVICE_ID=elasticsearch-service-x64
set ARCH=64-bit
goto checkExe
:x86
set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x86.exe
set SERVICE_ID=elasticsearch-service-x86
set ARCH=32-bit
:checkExe
if EXIST "%EXECUTABLE%" goto okExe
echo elasticsearch-service-(x86|x64).exe was not found...
:okExe
set ES_VERSION=1.7.6
if "%LOG_DIR%" == "" set LOG_DIR=%ES_HOME%\logs
if "x%1x" == "xx" goto displayUsage
set SERVICE_CMD=%1
shift
if "x%1x" == "xx" goto checkServiceCmd
set SERVICE_ID=%1
:checkServiceCmd
if "%LOG_OPTS%" == "" set LOG_OPTS=--LogPath "%LOG_DIR%" --LogPrefix "%SERVICE_ID%" --StdError auto --StdOutput auto
if /i %SERVICE_CMD% == install goto doInstall
if /i %SERVICE_CMD% == remove goto doRemove
if /i %SERVICE_CMD% == start goto doStart
if /i %SERVICE_CMD% == stop goto doStop
if /i %SERVICE_CMD% == manager goto doManagment
echo Unknown option "%SERVICE_CMD%"
:displayUsage
echo.
echo Usage: service.bat install^|remove^|start^|stop^|manager [SERVICE_ID]
goto:eof
:doStart
"%EXECUTABLE%" //ES//%SERVICE_ID% %LOG_OPTS%
if not errorlevel 1 goto started
echo Failed starting '%SERVICE_ID%' service
goto:eof
:started
echo The service '%SERVICE_ID%' has been started
goto:eof
:doStop
"%EXECUTABLE%" //SS//%SERVICE_ID% %LOG_OPTS%
if not errorlevel 1 goto stopped
echo Failed stopping '%SERVICE_ID%' service
goto:eof
:stopped
echo The service '%SERVICE_ID%' has been stopped
goto:eof
:doManagment
set EXECUTABLE_MGR=%ES_HOME%\bin\elasticsearch-service-mgr.exe
"%EXECUTABLE_MGR%" //ES//%SERVICE_ID%
if not errorlevel 1 goto managed
echo Failed starting service manager for '%SERVICE_ID%'
goto:eof
:managed
echo Succesfully started service manager for '%SERVICE_ID%'.
goto:eof
:doRemove
rem Remove the service
"%EXECUTABLE%" //DS//%SERVICE_ID% %LOG_OPTS%
if not errorlevel 1 goto removed
echo Failed removing '%SERVICE_ID%' service
goto:eof
:removed
echo The service '%SERVICE_ID%' has been removed
goto:eof
:doInstall
echo Installing service : "%SERVICE_ID%"
echo Using JAVA_HOME (%ARCH%): "%JAVA_HOME%"
rem Check JVM server dll first
set JVM_DLL=%JAVA_HOME%\jre\bin\server\jvm.dll
if exist "%JVM_DLL%" goto foundJVM
rem Check 'server' JRE (JRE installed on Windows Server)
set JVM_DLL=%JAVA_HOME%\bin\server\jvm.dll
if exist "%JVM_DLL%" goto foundJVM
rem Fallback to 'client' JRE
set JVM_DLL=%JAVA_HOME%\bin\client\jvm.dll
if exist "%JVM_DLL%" (
echo Warning: JAVA_HOME points to a JRE and not JDK installation; a client (not a server^) JVM will be used...
) else (
echo JAVA_HOME points to an invalid Java installation (no jvm.dll found in "%JAVA_HOME%"^). Existing...
goto:eof
)
:foundJVM
if "%ES_MIN_MEM%" == "" set ES_MIN_MEM=256m
if "%ES_MAX_MEM%" == "" set ES_MAX_MEM=1g
if NOT "%ES_HEAP_SIZE%" == "" set ES_MIN_MEM=%ES_HEAP_SIZE%
if NOT "%ES_HEAP_SIZE%" == "" set ES_MAX_MEM=%ES_HEAP_SIZE%
call:convertxm %ES_MIN_MEM% JVM_XMS
call:convertxm %ES_MAX_MEM% JVM_XMX
REM java_opts might be empty - init to avoid tripping commons daemon (if the command starts with ;)
if "%JAVA_OPTS%" == "" set JAVA_OPTS=-XX:+UseParNewGC
CALL "%ES_HOME%\bin\elasticsearch.in.bat"
rem thread stack size
set JVM_SS=256
if "%DATA_DIR%" == "" set DATA_DIR=%ES_HOME%\data
if "%WORK_DIR%" == "" set WORK_DIR=%ES_HOME%
if "%CONF_DIR%" == "" set CONF_DIR=%ES_HOME%\config
if "%CONF_FILE%" == "" set CONF_FILE=%ES_HOME%\config\elasticsearch.yml
set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.config="%CONF_FILE%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.work="%WORK_DIR%";-Des.default.path.conf="%CONF_DIR%"
set JVM_OPTS=%JAVA_OPTS: =;%
if not "%ES_JAVA_OPTS%" == "" set JVM_ES_JAVA_OPTS=%ES_JAVA_OPTS: =#%
if not "%ES_JAVA_OPTS%" == "" set JVM_OPTS=%JVM_OPTS%;%JVM_ES_JAVA_OPTS%
if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual
if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0
"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmSs %JVM_SS% --JvmMs %JVM_XMS% --JvmMx %JVM_XMX% --JvmOptions %JVM_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "Elasticsearch %ES_VERSION% (%SERVICE_ID%)" --Description "Elasticsearch %ES_VERSION% Windows Service - http://elasticsearch.org" --Jvm "%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%"
if not errorlevel 1 goto installed
echo Failed installing '%SERVICE_ID%' service
goto:eof
:installed
echo The service '%SERVICE_ID%' has been installed.
goto:eof
:err
echo JAVA_HOME environment variable must be set!
pause
goto:eof
rem ---
rem Function for converting Xm[s|x] values into MB which Commons Daemon accepts
rem ---
:convertxm
set value=%~1
rem extract last char (unit)
set unit=%value:~-1%
rem assume the unit is specified
set conv=%value:~0,-1%
if "%unit%" == "k" goto kilo
if "%unit%" == "K" goto kilo
if "%unit%" == "m" goto mega
if "%unit%" == "M" goto mega
if "%unit%" == "g" goto giga
if "%unit%" == "G" goto giga
rem no unit found, must be bytes; consider the whole value
set conv=%value%
rem convert to KB
set /a conv=%conv% / 1024
:kilo
rem convert to MB
set /a conv=%conv% / 1024
goto mega
:giga
rem convert to MB
set /a conv=%conv% * 1024
:mega
set "%~2=%conv%"
goto:eof
ENDLOCAL

@ -1,385 +0,0 @@
##################### Elasticsearch Configuration Example #####################
# This file contains an overview of various configuration settings,
# targeted at operations staff. Application developers should
# consult the guide at <http://elasticsearch.org/guide>.
#
# The installation procedure is covered at
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
#
# Elasticsearch comes with reasonable defaults for most settings,
# so you can try it out without bothering with configuration.
#
# Most of the time, these defaults are just fine for running a production
# cluster. If you're fine-tuning your cluster, or wondering about the
# effect of certain configuration option, please _do ask_ on the
# mailing list or IRC channel [http://elasticsearch.org/community].
# Any element in the configuration can be replaced with environment variables
# by placing them in ${...} notation. For example:
#
#node.rack: ${RACK_ENV_VAR}
# For information on supported formats and syntax for the config file, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
################################### Cluster ###################################
# Cluster name identifies your cluster for auto-discovery. If you're running
# multiple clusters on the same network, make sure you're using unique names.
#
#cluster.name: elasticsearch
#################################### Node #####################################
# Node names are generated dynamically on startup, so you're relieved
# from configuring them manually. You can tie this node to a specific name:
#
#node.name: "Franz Kafka"
# Every node can be configured to allow or deny being eligible as the master,
# and to allow or deny to store the data.
#
# Allow this node to be eligible as a master node (enabled by default):
#
#node.master: true
#
# Allow this node to store data (enabled by default):
#
#node.data: true
# You can exploit these settings to design advanced cluster topologies.
#
# 1. You want this node to never become a master node, only to hold data.
# This will be the "workhorse" of your cluster.
#
#node.master: false
#node.data: true
#
# 2. You want this node to only serve as a master: to not store any data and
# to have free resources. This will be the "coordinator" of your cluster.
#
#node.master: true
#node.data: false
#
# 3. You want this node to be neither master nor data node, but
# to act as a "search load balancer" (fetching data from nodes,
# aggregating results, etc.)
#
#node.master: false
#node.data: false
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
# Node Info API [http://localhost:9200/_nodes] or GUI tools
# such as <http://www.elasticsearch.org/overview/marvel/>,
# <http://github.com/karmi/elasticsearch-paramedic>,
# <http://github.com/lukas-vlcek/bigdesk> and
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
# A node can have generic attributes associated with it, which can later be used
# for customized shard allocation filtering, or allocation awareness. An attribute
# is a simple key value pair, similar to node.key: value, here is an example:
#
#node.rack: rack314
# By default, multiple nodes are allowed to start from the same installation location
# to disable it, set the following:
#node.max_local_storage_nodes: 1
#################################### Index ####################################
# You can set a number of options (such as shard/replica options, mapping
# or analyzer definitions, translog settings, ...) for indices globally,
# in this file.
#
# Note, that it makes more sense to configure index settings specifically for
# a certain index, either when creating it or by using the index templates API.
#
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
# for more information.
# Set the number of shards (splits) of an index (5 by default):
#
#index.number_of_shards: 5
# Set the number of replicas (additional copies) of an index (1 by default):
#
#index.number_of_replicas: 1
# Note, that for development on a local machine, with small indices, it usually
# makes sense to "disable" the distributed features:
#
#index.number_of_shards: 1
#index.number_of_replicas: 0
# These settings directly affect the performance of index and search operations
# in your cluster. Assuming you have enough machines to hold shards and
# replicas, the rule of thumb is:
#
# 1. Having more *shards* enhances the _indexing_ performance and allows to
# _distribute_ a big index across machines.
# 2. Having more *replicas* enhances the _search_ performance and improves the
# cluster _availability_.
#
# The "number_of_shards" is a one-time setting for an index.
#
# The "number_of_replicas" can be increased or decreased anytime,
# by using the Index Update Settings API.
#
# Elasticsearch takes care about load balancing, relocating, gathering the
# results from nodes, etc. Experiment with different settings to fine-tune
# your setup.
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
# the index status.
#################################### Paths ####################################
# Path to directory containing configuration (this file and logging.yml):
#
#path.conf: /path/to/conf
# Path to directory where to store index data allocated for this node.
#
#path.data: /path/to/data
#
# Can optionally include more than one location, causing data to be striped across
# the locations (a la RAID 0) on a file level, favouring locations with most free
# space on creation. For example:
#
#path.data: /path/to/data1,/path/to/data2
# Path to temporary files:
#
#path.work: /path/to/work
# Path to log files:
#
#path.logs: /path/to/logs
# Path to where plugins are installed:
#
#path.plugins: /path/to/plugins
#################################### Plugin ###################################
# If a plugin listed here is not installed for current node, the node will not start.
#
#plugin.mandatory: mapper-attachments,lang-groovy
################################### Memory ####################################
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
#
# Set this property to true to lock the memory:
#
#bootstrap.mlockall: true
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate
# for Elasticsearch, leaving enough memory for the operating system itself.
#
# You should also make sure that the Elasticsearch process is allowed to lock
# the memory, eg. by using `ulimit -l unlimited`.
############################## Network And HTTP ###############################
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
# communication. (the range means that if the port is busy, it will automatically
# try the next port).
# Set the bind address specifically (IPv4 or IPv6):
#
#network.bind_host: 192.168.0.1
# Set the address other nodes will use to communicate with this node. If not
# set, it is automatically derived. It must point to an actual IP address.
#
#network.publish_host: 192.168.0.1
# Set both 'bind_host' and 'publish_host':
#
#network.host: 192.168.0.1
# Set a custom port for the node to node communication (9300 by default):
#
#transport.tcp.port: 9300
# Enable compression for all communication between nodes (disabled by default):
#
#transport.tcp.compress: true
# Set a custom port to listen for HTTP traffic:
#
#http.port: 9200
# Set a custom allowed content length:
#
#http.max_content_length: 100mb
# Disable HTTP completely:
#
#http.enabled: false
################################### Gateway ###################################
# The gateway allows for persisting the cluster state between full cluster
# restarts. Every change to the state (such as adding an index) will be stored
# in the gateway, and when the cluster starts up for the first time,
# it will read its state from the gateway.
# There are several types of gateway implementations. For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
# The default gateway type is the "local" gateway (recommended):
#
#gateway.type: local
# Settings below control how and when to start the initial recovery process on
# a full cluster restart (to reuse as much local data as possible when using shared
# gateway).
# Allow recovery process after N nodes in a cluster are up:
#
#gateway.recover_after_nodes: 1
# Set the timeout to initiate the recovery process, once the N nodes
# from previous setting are up (accepts time value):
#
#gateway.recover_after_time: 5m
# Set how many nodes are expected in this cluster. Once these N nodes
# are up (and recover_after_nodes is met), begin recovery process immediately
# (without waiting for recover_after_time to expire):
#
#gateway.expected_nodes: 2
############################# Recovery Throttling #############################
# These settings allow to control the process of shards allocation between
# nodes during initial recovery, replica allocation, rebalancing,
# or when adding and removing nodes.
# Set the number of concurrent recoveries happening on a node:
#
# 1. During the initial recovery
#
#cluster.routing.allocation.node_initial_primaries_recoveries: 4
#
# 2. During adding/removing nodes, rebalancing, etc
#
#cluster.routing.allocation.node_concurrent_recoveries: 2
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
#
#indices.recovery.max_bytes_per_sec: 20mb
# Set to limit the number of open concurrent streams when
# recovering a shard from a peer:
#
#indices.recovery.concurrent_streams: 5
################################## Discovery ##################################
# Discovery infrastructure ensures nodes can be found within a cluster
# and master node is elected. Multicast discovery is the default.
# Set to ensure a node sees N other master eligible nodes to be considered
# operational within the cluster. This should be set to a quorum/majority of
# the master-eligible nodes in the cluster.
#
#discovery.zen.minimum_master_nodes: 1
# Set the time to wait for ping responses from other nodes when discovering.
# Set this option to a higher value on a slow or congested network
# to minimize discovery failures:
#
#discovery.zen.ping.timeout: 3s
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
#
# 1. Disable multicast discovery (enabled by default):
#
#discovery.zen.ping.multicast.enabled: false
#
# 2. Configure an initial list of master nodes in the cluster
# to perform discovery when new nodes (master or data) are started:
#
#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
#
# You have to install the cloud-aws plugin for enabling the EC2 discovery.
#
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
#
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
# for a step-by-step tutorial.
# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
#
# You have to install the cloud-gce plugin for enabling the GCE discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
# Azure discovery allows to use Azure API in order to perform discovery.
#
# You have to install the cloud-azure plugin for enabling the Azure discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
################################## Slow Log ##################################
# Shard level query and fetch threshold logging.
#index.search.slowlog.threshold.query.warn: 10s
#index.search.slowlog.threshold.query.info: 5s
#index.search.slowlog.threshold.query.debug: 2s
#index.search.slowlog.threshold.query.trace: 500ms
#index.search.slowlog.threshold.fetch.warn: 1s
#index.search.slowlog.threshold.fetch.info: 800ms
#index.search.slowlog.threshold.fetch.debug: 500ms
#index.search.slowlog.threshold.fetch.trace: 200ms
#index.indexing.slowlog.threshold.index.warn: 10s
#index.indexing.slowlog.threshold.index.info: 5s
#index.indexing.slowlog.threshold.index.debug: 2s
#index.indexing.slowlog.threshold.index.trace: 500ms
################################## GC Logging ################################
#monitor.jvm.gc.young.warn: 1000ms
#monitor.jvm.gc.young.info: 700ms
#monitor.jvm.gc.young.debug: 400ms
#monitor.jvm.gc.old.warn: 10s
#monitor.jvm.gc.old.info: 5s
#monitor.jvm.gc.old.debug: 2s
################################## Security ################################
# Uncomment if you want to enable JSONP as a valid return transport on the
# http server. With this enabled, it may pose a security risk, so disabling
# it unless you need it is recommended (it is disabled by default).
#
#http.jsonp.enable: true

@ -1,68 +0,0 @@
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console, file
logger:
# log action execution errors for easier debugging
action: DEBUG
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
org.apache.http: INFO
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
# peer shard recovery
#indices.recovery: DEBUG
# discovery
#discovery: TRACE
index.search.slowlog: TRACE, index_search_slow_log_file
index.indexing.slowlog: TRACE, index_indexing_slow_log_file
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
# Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
# For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
#file:
#type: extrasRollingFile
#file: ${path.logs}/${cluster.name}.log
#rollingPolicy: timeBased
#rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
#layout:
#type: pattern
#conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_search_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_indexing_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"

@ -1,19 +0,0 @@
[2017-05-26 12:47:37,459][INFO ][node ] [Arclight] version[1.7.6], pid[20177], build[c730b59/2016-11-18T15:21:16Z]
[2017-05-26 12:47:37,460][INFO ][node ] [Arclight] initializing ...
[2017-05-26 12:47:37,727][INFO ][plugins ] [Arclight] loaded [], sites []
[2017-05-26 12:47:37,946][INFO ][env ] [Arclight] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.4gb], net total_space [15.6gb], types [ext4]
[2017-05-26 12:47:45,422][INFO ][node ] [Arclight] initialized
[2017-05-26 12:47:45,423][INFO ][node ] [Arclight] starting ...
[2017-05-26 12:47:45,908][INFO ][transport ] [Arclight] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-05-26 12:47:46,128][INFO ][discovery ] [Arclight] elasticsearch/8dGBD5QkRIixHI9QPyxgvA
[2017-05-26 12:47:50,020][INFO ][cluster.service ] [Arclight] new_master [Arclight][8dGBD5QkRIixHI9QPyxgvA][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-05-26 12:47:50,098][INFO ][http ] [Arclight] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-05-26 12:47:50,099][INFO ][node ] [Arclight] started
[2017-05-26 12:47:50,255][INFO ][gateway ] [Arclight] recovered [0] indices into cluster_state
[2017-05-26 12:51:25,957][INFO ][cluster.metadata ] [Arclight] [haystack] creating index, cause [api], templates [], shards [5]/[1], mappings []
[2017-05-26 12:51:28,548][INFO ][cluster.metadata ] [Arclight] [haystack] create_mapping [modelresult]
[2017-05-26 12:51:29,477][INFO ][cluster.metadata ] [Arclight] [haystack] update_mapping [modelresult] (dynamic)
[2017-05-26 14:51:08,029][INFO ][cluster.metadata ] [Arclight] [haystack] deleting index
[2017-05-26 14:51:08,528][INFO ][cluster.metadata ] [Arclight] [haystack] creating index, cause [api], templates [], shards [5]/[1], mappings []
[2017-05-26 14:51:09,024][INFO ][cluster.metadata ] [Arclight] [haystack] create_mapping [modelresult]
[2017-05-26 14:51:09,223][INFO ][cluster.metadata ] [Arclight] [haystack] update_mapping [modelresult] (dynamic)

@ -1,4 +0,0 @@
[2017-06-17 20:50:55,204][INFO ][node ] [Arclight] stopping ...
[2017-06-17 20:50:55,614][INFO ][node ] [Arclight] stopped
[2017-06-17 20:50:55,615][INFO ][node ] [Arclight] closing ...
[2017-06-17 20:50:55,696][INFO ][node ] [Arclight] closed

@ -1,12 +0,0 @@
[2017-07-07 11:58:51,311][INFO ][node ] [Bentley Wittman] version[1.7.6], pid[20877], build[c730b59/2016-11-18T15:21:16Z]
[2017-07-07 11:58:51,315][INFO ][node ] [Bentley Wittman] initializing ...
[2017-07-07 11:58:51,868][INFO ][plugins ] [Bentley Wittman] loaded [], sites []
[2017-07-07 11:58:52,228][INFO ][env ] [Bentley Wittman] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.2gb], net total_space [15.6gb], types [ext4]
[2017-07-07 11:59:07,933][INFO ][node ] [Bentley Wittman] initialized
[2017-07-07 11:59:07,935][INFO ][node ] [Bentley Wittman] starting ...
[2017-07-07 11:59:09,450][INFO ][transport ] [Bentley Wittman] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-07-07 11:59:09,673][INFO ][discovery ] [Bentley Wittman] elasticsearch/6JSNOrSOQ5u-Tj8SGLxJSw
[2017-07-07 11:59:13,693][INFO ][cluster.service ] [Bentley Wittman] new_master [Bentley Wittman][6JSNOrSOQ5u-Tj8SGLxJSw][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-07-07 11:59:13,874][INFO ][http ] [Bentley Wittman] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-07-07 11:59:13,877][INFO ][node ] [Bentley Wittman] started
[2017-07-07 11:59:14,214][INFO ][gateway ] [Bentley Wittman] recovered [1] indices into cluster_state

@ -1 +0,0 @@
[2017-07-19 15:29:29,020][WARN ][monitor.jvm ] [Bentley Wittman] [gc][young][14006][5] duration [1s], collections [1]/[2.3s], total [1s]/[1.4s], memory [97.9mb]->[32.6mb]/[1015.6mb], all_pools {[young] [66.5mb]->[572.3kb]/[66.5mb]}{[survivor] [6.3mb]->[3.6mb]/[8.3mb]}{[old] [25mb]->[28.4mb]/[940.8mb]}

@ -1,4 +0,0 @@
[2017-07-28 15:03:10,357][INFO ][node ] [Bentley Wittman] stopping ...
[2017-07-28 15:03:11,735][INFO ][node ] [Bentley Wittman] stopped
[2017-07-28 15:03:11,736][INFO ][node ] [Bentley Wittman] closing ...
[2017-07-28 15:03:11,813][INFO ][node ] [Bentley Wittman] closed

@ -1,32 +0,0 @@
[2017-08-15 13:38:51,674][INFO ][node ] [Quentin Beck] version[1.7.6], pid[28446], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-15 13:38:51,678][INFO ][node ] [Quentin Beck] initializing ...
[2017-08-15 13:38:52,206][INFO ][plugins ] [Quentin Beck] loaded [], sites []
[2017-08-15 13:38:52,559][INFO ][env ] [Quentin Beck] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.2gb], net total_space [15.6gb], types [ext4]
[2017-08-15 13:39:14,039][INFO ][node ] [Quentin Beck] initialized
[2017-08-15 13:39:14,045][INFO ][node ] [Quentin Beck] starting ...
[2017-08-15 13:39:14,881][INFO ][transport ] [Quentin Beck] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-15 13:39:15,158][INFO ][discovery ] [Quentin Beck] elasticsearch/HhNRRNq4RQGxhQtyerTtJg
[2017-08-15 13:39:19,157][INFO ][cluster.service ] [Quentin Beck] new_master [Quentin Beck][HhNRRNq4RQGxhQtyerTtJg][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-15 13:39:19,336][INFO ][http ] [Quentin Beck] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-15 13:39:19,339][INFO ][node ] [Quentin Beck] started
[2017-08-15 13:39:19,771][INFO ][gateway ] [Quentin Beck] recovered [1] indices into cluster_state
[2017-08-15 13:40:43,547][INFO ][node ] [Quentin Beck] stopping ...
[2017-08-15 13:40:43,809][INFO ][node ] [Quentin Beck] stopped
[2017-08-15 13:40:43,810][INFO ][node ] [Quentin Beck] closing ...
[2017-08-15 13:40:43,849][INFO ][node ] [Quentin Beck] closed
[2017-08-15 14:56:57,495][INFO ][node ] [Quincy Harker] version[1.7.6], pid[29345], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-15 14:56:57,504][INFO ][node ] [Quincy Harker] initializing ...
[2017-08-15 14:56:58,011][INFO ][plugins ] [Quincy Harker] loaded [], sites []
[2017-08-15 14:56:58,312][INFO ][env ] [Quincy Harker] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.2gb], net total_space [15.6gb], types [ext4]
[2017-08-15 14:57:21,644][INFO ][node ] [Quincy Harker] initialized
[2017-08-15 14:57:21,645][INFO ][node ] [Quincy Harker] starting ...
[2017-08-15 14:57:22,543][INFO ][transport ] [Quincy Harker] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-15 14:57:22,768][INFO ][discovery ] [Quincy Harker] elasticsearch/6iS2w1EOSjKeYUdStM2r9A
[2017-08-15 14:57:26,751][INFO ][cluster.service ] [Quincy Harker] new_master [Quincy Harker][6iS2w1EOSjKeYUdStM2r9A][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-15 14:57:26,980][INFO ][http ] [Quincy Harker] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-15 14:57:26,989][INFO ][node ] [Quincy Harker] started
[2017-08-15 14:57:27,546][INFO ][gateway ] [Quincy Harker] recovered [1] indices into cluster_state
[2017-08-15 21:49:55,880][INFO ][node ] [Quincy Harker] stopping ...
[2017-08-15 21:49:56,205][INFO ][node ] [Quincy Harker] stopped
[2017-08-15 21:49:56,207][INFO ][node ] [Quincy Harker] closing ...
[2017-08-15 21:49:56,257][INFO ][node ] [Quincy Harker] closed

@ -1,16 +0,0 @@
[2017-08-16 17:32:58,036][INFO ][node ] [Mass Master] version[1.7.6], pid[4428], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-16 17:32:58,038][INFO ][node ] [Mass Master] initializing ...
[2017-08-16 17:32:58,489][INFO ][plugins ] [Mass Master] loaded [], sites []
[2017-08-16 17:32:58,724][INFO ][env ] [Mass Master] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.1gb], net total_space [15.6gb], types [ext4]
[2017-08-16 17:33:08,102][INFO ][node ] [Mass Master] initialized
[2017-08-16 17:33:08,107][INFO ][node ] [Mass Master] starting ...
[2017-08-16 17:33:08,665][INFO ][transport ] [Mass Master] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-16 17:33:08,925][INFO ][discovery ] [Mass Master] elasticsearch/RyELkcIZQWew44bd7iKbbg
[2017-08-16 17:33:13,026][INFO ][cluster.service ] [Mass Master] new_master [Mass Master][RyELkcIZQWew44bd7iKbbg][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-16 17:33:13,228][INFO ][http ] [Mass Master] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-16 17:33:13,230][INFO ][node ] [Mass Master] started
[2017-08-16 17:33:13,539][INFO ][gateway ] [Mass Master] recovered [1] indices into cluster_state
[2017-08-16 22:06:00,926][INFO ][node ] [Mass Master] stopping ...
[2017-08-16 22:06:01,233][INFO ][node ] [Mass Master] stopped
[2017-08-16 22:06:01,234][INFO ][node ] [Mass Master] closing ...
[2017-08-16 22:06:01,303][INFO ][node ] [Mass Master] closed

@ -1,16 +0,0 @@
[2017-08-17 13:50:21,513][INFO ][node ] [Wraith] version[1.7.6], pid[9679], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-17 13:50:21,522][INFO ][node ] [Wraith] initializing ...
[2017-08-17 13:50:22,320][INFO ][plugins ] [Wraith] loaded [], sites []
[2017-08-17 13:50:22,670][INFO ][env ] [Wraith] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.1gb], net total_space [15.6gb], types [ext4]
[2017-08-17 13:50:42,180][INFO ][node ] [Wraith] initialized
[2017-08-17 13:50:42,194][INFO ][node ] [Wraith] starting ...
[2017-08-17 13:50:43,313][INFO ][transport ] [Wraith] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-17 13:50:43,558][INFO ][discovery ] [Wraith] elasticsearch/LDs7nq_zQeeUYQnix3pQ3g
[2017-08-17 13:50:47,595][INFO ][cluster.service ] [Wraith] new_master [Wraith][LDs7nq_zQeeUYQnix3pQ3g][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-17 13:50:47,909][INFO ][http ] [Wraith] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-17 13:50:47,913][INFO ][node ] [Wraith] started
[2017-08-17 13:50:48,453][INFO ][gateway ] [Wraith] recovered [1] indices into cluster_state
[2017-08-17 21:56:52,733][INFO ][node ] [Wraith] stopping ...
[2017-08-17 21:56:53,608][INFO ][node ] [Wraith] stopped
[2017-08-17 21:56:53,609][INFO ][node ] [Wraith] closing ...
[2017-08-17 21:56:53,654][INFO ][node ] [Wraith] closed

@ -1,16 +0,0 @@
[2017-08-22 15:54:04,779][INFO ][node ] [Lord Chaos] version[1.7.6], pid[23013], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-22 15:54:04,830][INFO ][node ] [Lord Chaos] initializing ...
[2017-08-22 15:54:05,462][INFO ][plugins ] [Lord Chaos] loaded [], sites []
[2017-08-22 15:54:05,838][INFO ][env ] [Lord Chaos] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.1gb], net total_space [15.6gb], types [ext4]
[2017-08-22 15:54:25,606][INFO ][node ] [Lord Chaos] initialized
[2017-08-22 15:54:25,607][INFO ][node ] [Lord Chaos] starting ...
[2017-08-22 15:54:26,709][INFO ][transport ] [Lord Chaos] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-22 15:54:27,074][INFO ][discovery ] [Lord Chaos] elasticsearch/XWPyXk-qTQWalGlghc48xA
[2017-08-22 15:54:31,031][INFO ][cluster.service ] [Lord Chaos] new_master [Lord Chaos][XWPyXk-qTQWalGlghc48xA][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-22 15:54:31,261][INFO ][http ] [Lord Chaos] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-22 15:54:31,262][INFO ][node ] [Lord Chaos] started
[2017-08-22 15:54:31,806][INFO ][gateway ] [Lord Chaos] recovered [1] indices into cluster_state
[2017-08-22 22:12:57,358][INFO ][node ] [Lord Chaos] stopping ...
[2017-08-22 22:12:57,742][INFO ][node ] [Lord Chaos] stopped
[2017-08-22 22:12:57,745][INFO ][node ] [Lord Chaos] closing ...
[2017-08-22 22:12:57,800][INFO ][node ] [Lord Chaos] closed

@ -1,16 +0,0 @@
[2017-08-23 08:49:52,348][INFO ][node ] [Al MacKenzie] version[1.7.6], pid[27703], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-23 08:49:52,355][INFO ][node ] [Al MacKenzie] initializing ...
[2017-08-23 08:49:53,361][INFO ][plugins ] [Al MacKenzie] loaded [], sites []
[2017-08-23 08:49:53,968][INFO ][env ] [Al MacKenzie] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9.1gb], net total_space [15.6gb], types [ext4]
[2017-08-23 08:50:34,920][INFO ][node ] [Al MacKenzie] initialized
[2017-08-23 08:50:34,921][INFO ][node ] [Al MacKenzie] starting ...
[2017-08-23 08:50:36,244][INFO ][transport ] [Al MacKenzie] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-23 08:50:36,682][INFO ][discovery ] [Al MacKenzie] elasticsearch/yZpEcrFeR8WdLtPOqnhytA
[2017-08-23 08:50:40,853][INFO ][cluster.service ] [Al MacKenzie] new_master [Al MacKenzie][yZpEcrFeR8WdLtPOqnhytA][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-23 08:50:41,203][INFO ][http ] [Al MacKenzie] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-23 08:50:41,207][INFO ][node ] [Al MacKenzie] started
[2017-08-23 08:50:41,686][INFO ][gateway ] [Al MacKenzie] recovered [1] indices into cluster_state
[2017-08-23 20:41:57,691][INFO ][node ] [Al MacKenzie] stopping ...
[2017-08-23 20:41:58,153][INFO ][node ] [Al MacKenzie] stopped
[2017-08-23 20:41:58,154][INFO ][node ] [Al MacKenzie] closing ...
[2017-08-23 20:41:58,218][INFO ][node ] [Al MacKenzie] closed

@ -1,16 +0,0 @@
[2017-08-29 13:15:17,274][INFO ][node ] [Spyder] version[1.7.6], pid[13540], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-29 13:15:17,307][INFO ][node ] [Spyder] initializing ...
[2017-08-29 13:15:17,867][INFO ][plugins ] [Spyder] loaded [], sites []
[2017-08-29 13:15:18,165][INFO ][env ] [Spyder] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [9gb], net total_space [15.6gb], types [ext4]
[2017-08-29 13:15:32,716][INFO ][node ] [Spyder] initialized
[2017-08-29 13:15:32,723][INFO ][node ] [Spyder] starting ...
[2017-08-29 13:15:33,326][INFO ][transport ] [Spyder] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-29 13:15:33,617][INFO ][discovery ] [Spyder] elasticsearch/sRG0USqaQ3mkWahLBTfHPA
[2017-08-29 13:15:37,560][INFO ][cluster.service ] [Spyder] new_master [Spyder][sRG0USqaQ3mkWahLBTfHPA][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-29 13:15:37,749][INFO ][http ] [Spyder] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-29 13:15:37,757][INFO ][node ] [Spyder] started
[2017-08-29 13:15:38,148][INFO ][gateway ] [Spyder] recovered [1] indices into cluster_state
[2017-08-29 21:54:24,432][INFO ][node ] [Spyder] stopping ...
[2017-08-29 21:54:25,124][INFO ][node ] [Spyder] stopped
[2017-08-29 21:54:25,125][INFO ][node ] [Spyder] closing ...
[2017-08-29 21:54:25,202][INFO ][node ] [Spyder] closed

@ -1,16 +0,0 @@
[2017-08-30 10:57:02,860][INFO ][node ] [Letha] version[1.7.6], pid[17344], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-30 10:57:02,897][INFO ][node ] [Letha] initializing ...
[2017-08-30 10:57:03,683][INFO ][plugins ] [Letha] loaded [], sites []
[2017-08-30 10:57:04,116][INFO ][env ] [Letha] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [8.9gb], net total_space [15.6gb], types [ext4]
[2017-08-30 10:57:21,785][INFO ][node ] [Letha] initialized
[2017-08-30 10:57:21,792][INFO ][node ] [Letha] starting ...
[2017-08-30 10:57:22,658][INFO ][transport ] [Letha] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-30 10:57:22,905][INFO ][discovery ] [Letha] elasticsearch/iBVu0AsHS5-1ZCU1c1io1w
[2017-08-30 10:57:26,871][INFO ][cluster.service ] [Letha] new_master [Letha][iBVu0AsHS5-1ZCU1c1io1w][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-30 10:57:27,060][INFO ][http ] [Letha] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-30 10:57:27,061][INFO ][node ] [Letha] started
[2017-08-30 10:57:27,700][INFO ][gateway ] [Letha] recovered [1] indices into cluster_state
[2017-08-30 17:33:36,181][INFO ][node ] [Letha] stopping ...
[2017-08-30 17:33:39,847][INFO ][node ] [Letha] stopped
[2017-08-30 17:33:39,848][INFO ][node ] [Letha] closing ...
[2017-08-30 17:33:40,057][INFO ][node ] [Letha] closed

@ -1,12 +0,0 @@
[2017-08-31 18:11:19,877][INFO ][node ] [Conrad Josten] version[1.7.6], pid[2484], build[c730b59/2016-11-18T15:21:16Z]
[2017-08-31 18:11:19,879][INFO ][node ] [Conrad Josten] initializing ...
[2017-08-31 18:11:20,629][INFO ][plugins ] [Conrad Josten] loaded [], sites []
[2017-08-31 18:11:20,959][INFO ][env ] [Conrad Josten] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [8.7gb], net total_space [15.6gb], types [ext4]
[2017-08-31 18:11:39,557][INFO ][node ] [Conrad Josten] initialized
[2017-08-31 18:11:39,570][INFO ][node ] [Conrad Josten] starting ...
[2017-08-31 18:11:40,276][INFO ][transport ] [Conrad Josten] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-08-31 18:11:40,466][INFO ][discovery ] [Conrad Josten] elasticsearch/YJkKc29eRMyPIbQUSuqj6g
[2017-08-31 18:11:44,385][INFO ][cluster.service ] [Conrad Josten] new_master [Conrad Josten][YJkKc29eRMyPIbQUSuqj6g][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-08-31 18:11:44,579][INFO ][http ] [Conrad Josten] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-08-31 18:11:44,598][INFO ][node ] [Conrad Josten] started
[2017-08-31 18:11:45,045][INFO ][gateway ] [Conrad Josten] recovered [1] indices into cluster_state

@ -1,4 +0,0 @@
[2017-09-01 22:46:46,882][INFO ][node ] [Conrad Josten] stopping ...
[2017-09-01 22:46:47,120][INFO ][node ] [Conrad Josten] stopped
[2017-09-01 22:46:47,121][INFO ][node ] [Conrad Josten] closing ...
[2017-09-01 22:46:47,159][INFO ][node ] [Conrad Josten] closed

@ -1,12 +0,0 @@
[2017-09-02 09:40:46,310][INFO ][node ] [Deborah Ritter] version[1.7.6], pid[20471], build[c730b59/2016-11-18T15:21:16Z]
[2017-09-02 09:40:46,317][INFO ][node ] [Deborah Ritter] initializing ...
[2017-09-02 09:40:46,778][INFO ][plugins ] [Deborah Ritter] loaded [], sites []
[2017-09-02 09:40:47,002][INFO ][env ] [Deborah Ritter] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [8.7gb], net total_space [15.6gb], types [ext4]
[2017-09-02 09:41:02,184][INFO ][node ] [Deborah Ritter] initialized
[2017-09-02 09:41:02,194][INFO ][node ] [Deborah Ritter] starting ...
[2017-09-02 09:41:03,023][INFO ][transport ] [Deborah Ritter] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-09-02 09:41:03,294][INFO ][discovery ] [Deborah Ritter] elasticsearch/BDgYqKwgQaiWDjCyPvQypA
[2017-09-02 09:41:07,327][INFO ][cluster.service ] [Deborah Ritter] new_master [Deborah Ritter][BDgYqKwgQaiWDjCyPvQypA][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-09-02 09:41:07,561][INFO ][http ] [Deborah Ritter] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-09-02 09:41:07,574][INFO ][node ] [Deborah Ritter] started
[2017-09-02 09:41:08,085][INFO ][gateway ] [Deborah Ritter] recovered [1] indices into cluster_state

@ -1,4 +0,0 @@
[2017-09-06 10:28:30,431][INFO ][node ] [Deborah Ritter] stopping ...
[2017-09-06 10:28:30,821][INFO ][node ] [Deborah Ritter] stopped
[2017-09-06 10:28:30,822][INFO ][node ] [Deborah Ritter] closing ...
[2017-09-06 10:28:30,893][INFO ][node ] [Deborah Ritter] closed

@ -1,16 +0,0 @@
[2017-09-07 09:37:35,115][INFO ][node ] [Crystal] version[1.7.6], pid[18821], build[c730b59/2016-11-18T15:21:16Z]
[2017-09-07 09:37:35,206][INFO ][node ] [Crystal] initializing ...
[2017-09-07 09:37:36,131][INFO ][plugins ] [Crystal] loaded [], sites []
[2017-09-07 09:37:36,576][INFO ][env ] [Crystal] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [8.7gb], net total_space [15.6gb], types [ext4]
[2017-09-07 09:38:10,529][INFO ][node ] [Crystal] initialized
[2017-09-07 09:38:10,544][INFO ][node ] [Crystal] starting ...
[2017-09-07 09:38:11,801][INFO ][transport ] [Crystal] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-09-07 09:38:12,255][INFO ][discovery ] [Crystal] elasticsearch/ODWveRO0QriOYfQF_0rTWg
[2017-09-07 09:38:16,625][INFO ][cluster.service ] [Crystal] new_master [Crystal][ODWveRO0QriOYfQF_0rTWg][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-09-07 09:38:17,080][INFO ][http ] [Crystal] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-09-07 09:38:17,112][INFO ][node ] [Crystal] started
[2017-09-07 09:38:17,962][INFO ][gateway ] [Crystal] recovered [1] indices into cluster_state
[2017-09-07 18:40:50,244][INFO ][node ] [Crystal] stopping ...
[2017-09-07 18:40:50,546][INFO ][node ] [Crystal] stopped
[2017-09-07 18:40:50,547][INFO ][node ] [Crystal] closing ...
[2017-09-07 18:40:50,589][INFO ][node ] [Crystal] closed

@ -1,16 +0,0 @@
[2017-09-14 15:29:07,156][INFO ][node ] [Johnny Ohm] version[1.7.6], pid[8639], build[c730b59/2016-11-18T15:21:16Z]
[2017-09-14 15:29:07,161][INFO ][node ] [Johnny Ohm] initializing ...
[2017-09-14 15:29:07,804][INFO ][plugins ] [Johnny Ohm] loaded [], sites []
[2017-09-14 15:29:08,089][INFO ][env ] [Johnny Ohm] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [8.7gb], net total_space [15.6gb], types [ext4]
[2017-09-14 15:29:21,936][INFO ][node ] [Johnny Ohm] initialized
[2017-09-14 15:29:21,942][INFO ][node ] [Johnny Ohm] starting ...
[2017-09-14 15:29:22,444][INFO ][transport ] [Johnny Ohm] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.2.15:9300]}
[2017-09-14 15:29:22,689][INFO ][discovery ] [Johnny Ohm] elasticsearch/gZuvvXmBQ8CQGnZ3krxemA
[2017-09-14 15:29:26,807][INFO ][cluster.service ] [Johnny Ohm] new_master [Johnny Ohm][gZuvvXmBQ8CQGnZ3krxemA][denis-VirtualBox][inet[/10.0.2.15:9300]], reason: zen-disco-join (elected_as_master)
[2017-09-14 15:29:27,204][INFO ][http ] [Johnny Ohm] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.2.15:9200]}
[2017-09-14 15:29:27,211][INFO ][node ] [Johnny Ohm] started
[2017-09-14 15:29:27,977][INFO ][gateway ] [Johnny Ohm] recovered [1] indices into cluster_state
[2017-09-14 21:17:18,254][INFO ][node ] [Johnny Ohm] stopping ...
[2017-09-14 21:17:18,564][INFO ][node ] [Johnny Ohm] stopped
[2017-09-14 21:17:18,565][INFO ][node ] [Johnny Ohm] closing ...
[2017-09-14 21:17:18,611][INFO ][node ] [Johnny Ohm] closed
Loading…
Cancel
Save