parent
ed5803ba40
commit
f0c223ccae
720 changed files with 71754 additions and 0 deletions
@ -0,0 +1 @@ |
||||
from .celery import app as celery_app |
||||
@ -0,0 +1,10 @@ |
||||
import os |
||||
from celery import Celery |
||||
from django.conf import settings |
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Eshop.settings') |
||||
|
||||
app = Celery('Eshop') |
||||
|
||||
app.config_from_object('django.conf:settings') |
||||
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) |
||||
@ -0,0 +1,215 @@ |
||||
""" |
||||
Django settings for Eshop project. |
||||
|
||||
Generated by 'django-admin startproject' using Django 1.10.6. |
||||
|
||||
For more information on this file, see |
||||
https://docs.djangoproject.com/en/1.10/topics/settings/ |
||||
|
||||
For the full list of settings and their values, see |
||||
https://docs.djangoproject.com/en/1.10/ref/settings/ |
||||
""" |
||||
|
||||
import os |
||||
|
||||
# Build paths inside the project like this: os.path.join(BASE_DIR, ...) |
||||
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
||||
|
||||
|
||||
# Quick-start development settings - unsuitable for production |
||||
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ |
||||
|
||||
# SECURITY WARNING: keep the secret key used in production secret! |
||||
# SECRET_KEY = '5bad1g&sjplz#xd@kz0d=ej%xw(n&_6ng#)()np9(vl)lw_h8u' |
||||
SECRET_KEY = os.environ.get('SOME_SECRET_KEY', '5bad1g&sjplz#xd@kz0d=ej%xw(n&_6ng#)()np9(vl)lw_h8u') |
||||
|
||||
# SECURITY WARNING: don't run with debug turned on in production! |
||||
DEBUG = eval(os.environ.get('DEBUG_MODE', 'True')) |
||||
|
||||
TEMPLATE_DEBUG = DEBUG |
||||
|
||||
ALLOWED_HOSTS = [] if DEBUG else ['78.155.219.170'] |
||||
|
||||
|
||||
# Application definition |
||||
|
||||
INSTALLED_APPS = [ |
||||
'suit', |
||||
'django.contrib.admin', |
||||
'django.contrib.auth', |
||||
'django.contrib.contenttypes', |
||||
'django.contrib.sessions', |
||||
'django.contrib.messages', |
||||
'django.contrib.staticfiles', |
||||
'django.contrib.postgres', |
||||
|
||||
'import_export', |
||||
'djcelery_email', |
||||
'mptt', |
||||
'landing', |
||||
'orders', |
||||
'loginsys', |
||||
'userprofile', |
||||
# 'haystack', |
||||
'products', |
||||
'cart', |
||||
# 'paypal.standard.ipn', |
||||
# 'payment', |
||||
'discount', |
||||
|
||||
] |
||||
|
||||
MIDDLEWARE = [ |
||||
'django.middleware.security.SecurityMiddleware', |
||||
'django.contrib.sessions.middleware.SessionMiddleware', |
||||
'django.middleware.common.CommonMiddleware', |
||||
'django.middleware.csrf.CsrfViewMiddleware', |
||||
'django.contrib.auth.middleware.AuthenticationMiddleware', |
||||
'django.contrib.messages.middleware.MessageMiddleware', |
||||
'django.middleware.clickjacking.XFrameOptionsMiddleware', |
||||
] |
||||
|
||||
ROOT_URLCONF = 'Eshop.urls' |
||||
|
||||
TEMPLATES = [ |
||||
{ |
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates', |
||||
'DIRS': [os.path.join(BASE_DIR, 'templates')], |
||||
'APP_DIRS': True, |
||||
'OPTIONS': { |
||||
'context_processors': [ |
||||
'django.template.context_processors.debug', |
||||
'django.template.context_processors.request', |
||||
'django.contrib.auth.context_processors.auth', |
||||
'django.contrib.messages.context_processors.messages', |
||||
'cart.context_processors.cart', |
||||
# 'orders.context_processors.getting_basket_info', |
||||
], |
||||
}, |
||||
}, |
||||
] |
||||
|
||||
WSGI_APPLICATION = 'Eshop.wsgi.application' |
||||
|
||||
MPTT_ADMIN_LEVEL_INDENT = 20 |
||||
# Database |
||||
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases |
||||
|
||||
|
||||
if DEBUG: |
||||
DATABASES = { |
||||
'default': { |
||||
'ENGINE': 'django.db.backends.postgresql_psycopg2', |
||||
'NAME': 'eshop_db', |
||||
'USER': 'denis', |
||||
'PASSWORD': '12345678', |
||||
'HOST': 'localhost', |
||||
'PORT': '', |
||||
} |
||||
} |
||||
else: |
||||
DATABASES = { |
||||
'default': { |
||||
'ENGINE': 'django.db.backends.postgresql_psycopg2', |
||||
'NAME': 'db1', |
||||
'USER': 'django_shop', |
||||
'PASSWORD': 'django_shop12345', |
||||
'HOST': 'localhost', |
||||
'PORT': '', |
||||
} |
||||
} |
||||
|
||||
|
||||
# Password validation |
||||
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators |
||||
|
||||
AUTH_PASSWORD_VALIDATORS = [ |
||||
{ |
||||
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', |
||||
}, |
||||
{ |
||||
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', |
||||
}, |
||||
{ |
||||
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', |
||||
}, |
||||
{ |
||||
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', |
||||
}, |
||||
] |
||||
|
||||
|
||||
# Internationalization |
||||
# https://docs.djangoproject.com/en/1.10/topics/i18n/ |
||||
|
||||
LANGUAGE_CODE = 'ru-RU' |
||||
|
||||
DATE_FORMAT = 'd E Y' |
||||
|
||||
TIME_ZONE = 'UTC' |
||||
|
||||
USE_I18N = True |
||||
|
||||
USE_L10N = True |
||||
|
||||
USE_TZ = True |
||||
|
||||
|
||||
# Static files (CSS, JavaScript, Images) |
||||
# https://docs.djangoproject.com/en/1.10/howto/static-files/ |
||||
|
||||
STATIC_URL = '/static/' |
||||
|
||||
STATICFILES_DIRS = ( |
||||
os.path.join(BASE_DIR, "static", "static_dev"), |
||||
) |
||||
|
||||
STATIC_ROOT = os.path.join(BASE_DIR, "static/")#, "static_dev") |
||||
|
||||
MEDIA_URL = '/media/' |
||||
|
||||
MEDIA_ROOT = os.path.join(BASE_DIR, "static", "media") |
||||
|
||||
AUTH_PROFILE_MODULE = 'userprofile.UserProfile' |
||||
|
||||
CART_SESSION_ID = 'cart' |
||||
|
||||
# Email |
||||
ADMINS = ( |
||||
('Denis Balyasnikov', 'bda2291@mail.ru'), |
||||
) |
||||
|
||||
MANAGERS = ADMINS |
||||
|
||||
EMAIL_USE_TLS = True |
||||
EMAIL_HOST = 'smtp.gmail.com' |
||||
EMAIL_PORT = 587 |
||||
EMAIL_HOST_USER = 'balyasnikovdenis22@gmail.com' |
||||
EMAIL_HOST_PASSWORD = 'ltybcbrhbcnbyf22' |
||||
|
||||
FROM_EMAIL = 'notreply@russianprograms' |
||||
EMAIL_BACKEND = 'djcelery_email.backends.CeleryEmailBackend' |
||||
|
||||
# for import-export excel data |
||||
IMPORT_EXPORT_USE_TRANSACTIONS = True |
||||
|
||||
# WHOOSH_INDEX = os.path.join(os.path.dirname(__file__), "whoosh/") |
||||
|
||||
# Uncomment for elasticsearch |
||||
|
||||
# HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor' |
||||
# HAYSTACK_SEARCH_RESULTS_PER_PAGE = 12 |
||||
# HAYSTACK_CONNECTIONS = { |
||||
# 'default': { |
||||
# 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine', |
||||
# 'URL': 'http://127.0.0.1:9200', |
||||
# 'INDEX_NAME': 'haystack', |
||||
# # 'INCLUDE_SPELLING': True, |
||||
# }, |
||||
# } |
||||
|
||||
import dj_database_url |
||||
db_from_env = dj_database_url.config(conn_max_age=500) |
||||
DATABASES['default'].update(db_from_env) |
||||
|
||||
#STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' |
||||
@ -0,0 +1,36 @@ |
||||
"""Eshop URL Configuration |
||||
|
||||
The `urlpatterns` list routes URLs to views. For more information please see: |
||||
https://docs.djangoproject.com/en/1.10/topics/http/urls/ |
||||
Examples: |
||||
Function views |
||||
1. Add an import: from my_app import views |
||||
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') |
||||
Class-based views |
||||
1. Add an import: from other_app.views import Home |
||||
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') |
||||
Including another URLconf |
||||
1. Import the include() function: from django.conf.urls import url, include |
||||
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) |
||||
""" |
||||
from django.conf.urls import url, include |
||||
from django.contrib import admin |
||||
from django.conf import settings |
||||
from django.conf.urls.static import static |
||||
|
||||
admin.autodiscover() |
||||
|
||||
urlpatterns = [ |
||||
url(r'^admin/', admin.site.urls), |
||||
url(r'^auth/', include('loginsys.urls', namespace='auth')), |
||||
url(r'^accounts/', include('userprofile.urls', namespace='profile')), |
||||
url(r'^cart/', include('cart.urls', namespace='cart')), |
||||
url(r'^order/', include('orders.urls', namespace='orders')), |
||||
url(r'^discount/', include('discount.urls', namespace='discount')), |
||||
url(r'^search/', include('products.urls', namespace='products_search')), |
||||
url(r'^', include('products.urls', namespace='products')), |
||||
url(r'^', include('landing.urls')), |
||||
url(r'^', include('orders.urls')), |
||||
]\ |
||||
+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \ |
||||
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
||||
@ -0,0 +1,18 @@ |
||||
""" |
||||
WSGI config for Eshop project. |
||||
|
||||
It exposes the WSGI callable as a module-level variable named ``application``. |
||||
|
||||
For more information on this file, see |
||||
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ |
||||
""" |
||||
|
||||
import os |
||||
|
||||
from django.core.wsgi import get_wsgi_application |
||||
# from whitenoise.django import DjangoWhiteNoise |
||||
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Eshop.settings") |
||||
|
||||
application = get_wsgi_application() |
||||
# application = DjangoWhiteNoise(application) |
||||
Binary file not shown.
Binary file not shown.
@ -0,0 +1,3 @@ |
||||
from django.contrib import admin |
||||
|
||||
# Register your models here. |
||||
@ -0,0 +1,5 @@ |
||||
from django.apps import AppConfig |
||||
|
||||
|
||||
class CartConfig(AppConfig): |
||||
name = 'cart' |
||||
@ -0,0 +1,86 @@ |
||||
from decimal import Decimal |
||||
from django.conf import settings |
||||
from django.contrib import auth |
||||
from products.models import Product, Offer |
||||
# from discount.models import Discount |
||||
|
||||
class Cart(object): |
||||
def __init__(self, request): |
||||
self.session = request.session |
||||
self.discount_id = self.session.get('discount_id') |
||||
if request.user.is_authenticated(): |
||||
self.points = self.session.get('points') |
||||
self.points_quant = auth.get_user(request).profile.user_points |
||||
cart = self.session.get(settings.CART_SESSION_ID) |
||||
if not cart: |
||||
request.session['points'] = False |
||||
cart = self.session[settings.CART_SESSION_ID] = {} |
||||
self.cart = cart |
||||
|
||||
def add(self, offer, price_per_itom, quantity=1, update_quantity=False): |
||||
offer_slug = offer.slug |
||||
if offer_slug not in self.cart: |
||||
self.cart[offer_slug] = {'quantity': 0, |
||||
'price': str(price_per_itom)} |
||||
if update_quantity: |
||||
self.cart[offer_slug]['quantity'] = int(quantity) |
||||
else: |
||||
self.cart[offer_slug]['quantity'] += int(quantity) |
||||
self.save() |
||||
|
||||
def save(self): |
||||
self.session[settings.CART_SESSION_ID] = self.cart |
||||
self.session.modified = True |
||||
|
||||
def remove(self, offer_slug): |
||||
# product_id = str(product.id) |
||||
if offer_slug in self.cart: |
||||
del self.cart[offer_slug] |
||||
self.save() |
||||
|
||||
def __iter__(self): |
||||
offers_ids = self.cart.keys() |
||||
offers = Offer.objects.filter(slug__in=offers_ids) |
||||
|
||||
for offer in offers: |
||||
self.cart[str(offer.slug)]['offer'] = offer |
||||
|
||||
for item in self.cart.values(): |
||||
item['price'] = Decimal(item['price']) |
||||
item['total_price'] = item['price'] * item['quantity'] |
||||
yield item |
||||
|
||||
def __len__(self): |
||||
return sum(item['quantity'] for item in self.cart.values()) |
||||
|
||||
def get_total_price(self): |
||||
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values()) |
||||
|
||||
def clear(self): |
||||
del self.session[settings.CART_SESSION_ID] |
||||
self.session.modified = True |
||||
|
||||
# @property |
||||
# def discount(self): |
||||
# if self.discount_id: |
||||
# return Discount.objects.get(id=self.discount_id) |
||||
# return None |
||||
|
||||
# def get_discount(self): |
||||
# if self.discount: |
||||
# return (self.discount.discount / Decimal('100')) * self.get_total_price() |
||||
# return Decimal('0') |
||||
|
||||
# def get_total_price_after_discount(self): |
||||
# return self.get_total_price() - self.get_discount() |
||||
|
||||
def get_total_deduct_points(self): |
||||
total_price = self.get_total_price() |
||||
print(total_price, self.points_quant) |
||||
if total_price <= self.points_quant: |
||||
print('Less') |
||||
self.points_quant = self.points_quant - total_price + 1 |
||||
return 1 |
||||
print('More') |
||||
return total_price - self.points_quant |
||||
|
||||
@ -0,0 +1,6 @@ |
||||
from .cart import Cart |
||||
|
||||
|
||||
def cart(request): |
||||
return {'cart': Cart(request)} |
||||
|
||||
@ -0,0 +1,22 @@ |
||||
from django import forms |
||||
|
||||
#PRODUCT_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)] |
||||
|
||||
class CartAddProductForm(forms.Form): |
||||
#quantity = forms.TypedChoiceField(choices=PRODUCT_QUANTITY_CHOICES, coerce=int) |
||||
quantity = forms.CharField(required=True, widget=forms.TextInput(attrs={ |
||||
'id': 'quantity', |
||||
'name': 'quantity', |
||||
'type': 'number', |
||||
'value': '0', |
||||
'onchange': 'calculate()'})) |
||||
product_slug = forms.CharField(label="product_slug", widget=forms.TextInput(attrs={ |
||||
'id': 'product_slug', |
||||
'name': 'product_slug', |
||||
'type': 'hidden'})) |
||||
price_per_itom = forms.IntegerField(label="price_per_itom", widget=forms.TextInput(attrs={ |
||||
'id': 'price_per_itom', |
||||
'name': 'price_per_itom', |
||||
'type': 'hidden'})) |
||||
update = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput) |
||||
|
||||
@ -0,0 +1,3 @@ |
||||
from django.db import models |
||||
|
||||
# Create your models here. |
||||
@ -0,0 +1,3 @@ |
||||
from django.test import TestCase |
||||
|
||||
# Create your tests here. |
||||
@ -0,0 +1,8 @@ |
||||
from django.conf.urls import url |
||||
from . import views |
||||
|
||||
urlpatterns = [ |
||||
url(r'^$', views.CartDetail, name='CartDetail'), |
||||
url(r'^remove/(?P<offer_slug>[-\w]+)/$', views.CartRemove, name='CartRemove'), |
||||
url(r'^add/$', views.CartAdd, name='CartAdd'), |
||||
] |
||||
@ -0,0 +1,41 @@ |
||||
from django.shortcuts import render, redirect, get_object_or_404 |
||||
from django.views.decorators.http import require_POST |
||||
from django.views.decorators.csrf import csrf_exempt |
||||
from django.contrib import auth |
||||
from products.models import Product, Offer |
||||
from .cart import Cart |
||||
from .forms import CartAddProductForm |
||||
# from discount.forms import DiscountApllyForm |
||||
|
||||
@csrf_exempt |
||||
@require_POST |
||||
def CartAdd(request): |
||||
cart = Cart(request) |
||||
form = CartAddProductForm(request.POST) |
||||
if form.is_valid(): |
||||
cd = form.cleaned_data |
||||
offer = get_object_or_404(Offer, slug=cd['product_slug']) |
||||
cart.add(offer=offer, price_per_itom=cd['price_per_itom'], quantity=cd['quantity'], |
||||
update_quantity=cd['update']) |
||||
return redirect('cart:CartDetail') |
||||
|
||||
def CartRemove(request, offer_slug): |
||||
cart = Cart(request) |
||||
# offer = get_object_or_404(Offer, slug=offer_slug) |
||||
cart.remove(offer_slug) |
||||
return redirect('cart:CartDetail') |
||||
|
||||
def CartDetail(request, points=False): |
||||
user = auth.get_user(request) |
||||
cart = Cart(request) |
||||
for item in cart: |
||||
item['update_quantity_form'] = CartAddProductForm( |
||||
initial={ |
||||
'quantity': item['quantity'], |
||||
'product_slug': item['offer'].slug, |
||||
'price_per_itom': item['price'], |
||||
'update': True |
||||
}) |
||||
# discount_apply_form = DiscountApllyForm() |
||||
return render(request, 'cart/detail.html', {'username': user.username, 'points': points}) |
||||
# 'discount_apply_form': discount_apply_form}) |
||||
@ -0,0 +1,10 @@ |
||||
from django.contrib import admin |
||||
from .models import Discount |
||||
|
||||
|
||||
class DiscountAdmin(admin.ModelAdmin): |
||||
list_display = ['code', 'valid_from', 'valid_to', 'discount', 'active'] |
||||
list_filter = ['valid_from', 'valid_to', 'active'] |
||||
search_field = ['code'] |
||||
|
||||
admin.site.register(Discount, DiscountAdmin) |
||||
@ -0,0 +1,5 @@ |
||||
from django.apps import AppConfig |
||||
|
||||
|
||||
class DiscountConfig(AppConfig): |
||||
name = 'discount' |
||||
@ -0,0 +1,4 @@ |
||||
from django import forms |
||||
|
||||
class DiscountApllyForm(forms.Form): |
||||
code = forms.CharField() |
||||
@ -0,0 +1,26 @@ |
||||
from django.db import models |
||||
import uuid |
||||
from django.db.models.signals import post_save |
||||
from datetime import datetime, timedelta |
||||
from django.core.validators import MinValueValidator, MaxValueValidator |
||||
from django.contrib.auth.models import User |
||||
|
||||
class Discount(models.Model): |
||||
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True) |
||||
code = models.CharField(max_length=50, blank=True, unique=True, default=str(uuid.uuid4())) |
||||
valid_from = models.DateTimeField(default=datetime.now, blank=True) |
||||
valid_to = models.DateTimeField(default=datetime.now()+timedelta(days=7), blank=True) |
||||
discount = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)], default=10) |
||||
active = models.BooleanField(default=True) |
||||
|
||||
def __str__(self): |
||||
return self.code |
||||
|
||||
def create_discount(sender, **kwargs): |
||||
if kwargs['created']: |
||||
user_discount = Discount.objects.create(user=kwargs['instance']) |
||||
|
||||
# post_save.connect(create_discount, sender=User) |
||||
|
||||
User.discount = property(lambda u: Discount.objects.get_or_create(user=u)[0]) |
||||
|
||||
@ -0,0 +1,3 @@ |
||||
from django.test import TestCase |
||||
|
||||
# Create your tests here. |
||||
@ -0,0 +1,9 @@ |
||||
from django.conf.urls import url |
||||
from . import views |
||||
|
||||
|
||||
urlpatterns = [ |
||||
url(r'^apply', views.DiscountApply, name='apply'), |
||||
url(r'^create', views.CreateDiscount, name='create'), |
||||
url(r'^points', views.PointsApply, name='points') |
||||
] |
||||
@ -0,0 +1,47 @@ |
||||
import uuid |
||||
from datetime import datetime |
||||
from django.shortcuts import render, redirect |
||||
from django.views.decorators.csrf import csrf_exempt |
||||
from datetime import datetime, timedelta |
||||
from django.contrib import auth |
||||
from django.views.decorators.http import require_POST |
||||
from django.contrib.auth.decorators import login_required |
||||
from .models import Discount |
||||
from .forms import DiscountApllyForm |
||||
|
||||
@login_required |
||||
@require_POST |
||||
@csrf_exempt |
||||
def PointsApply(request): |
||||
# request.session['points'] = True |
||||
return redirect('cart:CartDetail', points=True) |
||||
|
||||
@require_POST |
||||
def DiscountApply(request): |
||||
now = datetime.now() |
||||
form = DiscountApllyForm(request.POST) |
||||
if form.is_valid(): |
||||
code = form.cleaned_data['code'] |
||||
try: |
||||
discount = Discount.objects.get(code__iexact=code, |
||||
valid_from__lte=now, |
||||
valid_to__gte=now, |
||||
active=True) |
||||
request.session['discount_id'] = discount.id |
||||
except Discount.DoesNotExist: |
||||
request.session['discount_id'] = None |
||||
|
||||
return redirect('cart:CartDetail') |
||||
|
||||
@login_required |
||||
@require_POST |
||||
@csrf_exempt |
||||
def CreateDiscount(request): |
||||
user = auth.get_user(request) |
||||
Discount.objects.update_or_create(user=user, defaults={'code': str(uuid.uuid4()), 'valid_from': datetime.now(), |
||||
'valid_to': datetime.now()+timedelta(days=7), 'active': True}) |
||||
return redirect('profile:user_profile') |
||||
|
||||
|
||||
|
||||
|
||||
@ -0,0 +1,202 @@ |
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
APPENDIX: How to apply the Apache License to your work. |
||||
|
||||
To apply the Apache License to your work, attach the following |
||||
boilerplate notice, with the fields enclosed by brackets "[]" |
||||
replaced with your own identifying information. (Don't include |
||||
the brackets!) The text should be enclosed in the appropriate |
||||
comment syntax for the file format. We also recommend that a |
||||
file or class name and description of purpose be included on the |
||||
same "printed page" as the copyright notice for easier |
||||
identification within third-party archives. |
||||
|
||||
Copyright [yyyy] [name of copyright owner] |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
@ -0,0 +1,5 @@ |
||||
Elasticsearch |
||||
Copyright 2009-2015 Elasticsearch |
||||
|
||||
This product includes software developed by The Apache Software |
||||
Foundation (http://www.apache.org/). |
||||
@ -0,0 +1,235 @@ |
||||
h1. Elasticsearch |
||||
|
||||
h2. A Distributed RESTful Search Engine |
||||
|
||||
h3. "https://www.elastic.co/products/elasticsearch":https://www.elastic.co/products/elasticsearch |
||||
|
||||
Elasticsearch is a distributed RESTful search engine built for the cloud. Features include: |
||||
|
||||
* Distributed and Highly Available Search Engine. |
||||
** Each index is fully sharded with a configurable number of shards. |
||||
** Each shard can have one or more replicas. |
||||
** Read / Search operations performed on either one of the replica shard. |
||||
* Multi Tenant with Multi Types. |
||||
** Support for more than one index. |
||||
** Support for more than one type per index. |
||||
** Index level configuration (number of shards, index storage, ...). |
||||
* Various set of APIs |
||||
** HTTP RESTful API |
||||
** Native Java API. |
||||
** All APIs perform automatic node operation rerouting. |
||||
* Document oriented |
||||
** No need for upfront schema definition. |
||||
** Schema can be defined per type for customization of the indexing process. |
||||
* Reliable, Asynchronous Write Behind for long term persistency. |
||||
* (Near) Real Time Search. |
||||
* Built on top of Lucene |
||||
** Each shard is a fully functional Lucene index |
||||
** All the power of Lucene easily exposed through simple configuration / plugins. |
||||
* Per operation consistency |
||||
** Single document level operations are atomic, consistent, isolated and durable. |
||||
* Open Source under the Apache License, version 2 ("ALv2") |
||||
|
||||
h2. Getting Started |
||||
|
||||
First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about. |
||||
|
||||
h3. Requirements |
||||
|
||||
You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information. |
||||
|
||||
h3. Installation |
||||
|
||||
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution. |
||||
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows. |
||||
* Run @curl -X GET http://localhost:9200/@. |
||||
* Start more servers ... |
||||
|
||||
h3. Indexing |
||||
|
||||
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically): |
||||
|
||||
<pre> |
||||
curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }' |
||||
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d ' |
||||
{ |
||||
"user": "kimchy", |
||||
"postDate": "2009-11-15T13:12:00", |
||||
"message": "Trying out Elasticsearch, so far so good?" |
||||
}' |
||||
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d ' |
||||
{ |
||||
"user": "kimchy", |
||||
"postDate": "2009-11-15T14:12:12", |
||||
"message": "Another tweet, will it be indexed?" |
||||
}' |
||||
</pre> |
||||
|
||||
Now, let's see if the information was added by GETting it: |
||||
|
||||
<pre> |
||||
curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true' |
||||
curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true' |
||||
curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true' |
||||
</pre> |
||||
|
||||
h3. Searching |
||||
|
||||
Mmm search..., shouldn't it be elastic? |
||||
Let's find all the tweets that @kimchy@ posted: |
||||
|
||||
<pre> |
||||
curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true' |
||||
</pre> |
||||
|
||||
We can also use the JSON query language Elasticsearch provides instead of a query string: |
||||
|
||||
<pre> |
||||
curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d ' |
||||
{ |
||||
"query" : { |
||||
"match" : { "user": "kimchy" } |
||||
} |
||||
}' |
||||
</pre> |
||||
|
||||
Just for kicks, let's get all the documents stored (we should see the user as well): |
||||
|
||||
<pre> |
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d ' |
||||
{ |
||||
"query" : { |
||||
"matchAll" : {} |
||||
} |
||||
}' |
||||
</pre> |
||||
|
||||
We can also do range search (the @postDate@ was automatically identified as date) |
||||
|
||||
<pre> |
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d ' |
||||
{ |
||||
"query" : { |
||||
"range" : { |
||||
"postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" } |
||||
} |
||||
} |
||||
}' |
||||
</pre> |
||||
|
||||
There are many more options to perform search, after all, it's a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser. |
||||
|
||||
h3. Multi Tenant - Indices and Types |
||||
|
||||
Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data. |
||||
|
||||
Elasticsearch supports multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@. |
||||
|
||||
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case: |
||||
|
||||
<pre> |
||||
curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }' |
||||
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d ' |
||||
{ |
||||
"user": "kimchy", |
||||
"postDate": "2009-11-15T13:12:00", |
||||
"message": "Trying out Elasticsearch, so far so good?" |
||||
}' |
||||
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d ' |
||||
{ |
||||
"user": "kimchy", |
||||
"postDate": "2009-11-15T14:12:12", |
||||
"message": "Another tweet, will it be indexed?" |
||||
}' |
||||
</pre> |
||||
|
||||
The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index. |
||||
|
||||
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well): |
||||
|
||||
<pre> |
||||
curl -XPUT http://localhost:9200/another_user/ -d ' |
||||
{ |
||||
"index" : { |
||||
"numberOfShards" : 1, |
||||
"numberOfReplicas" : 1 |
||||
} |
||||
}' |
||||
</pre> |
||||
|
||||
Search (and similar operations) are multi index aware. This means that we can easily search on more than one |
||||
index (twitter user), for example: |
||||
|
||||
<pre> |
||||
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d ' |
||||
{ |
||||
"query" : { |
||||
"matchAll" : {} |
||||
} |
||||
}' |
||||
</pre> |
||||
|
||||
Or on all the indices: |
||||
|
||||
<pre> |
||||
curl -XGET 'http://localhost:9200/_search?pretty=true' -d ' |
||||
{ |
||||
"query" : { |
||||
"matchAll" : {} |
||||
} |
||||
}' |
||||
</pre> |
||||
|
||||
{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from friends of my friends). |
||||
|
||||
h3. Distributed, Highly Available |
||||
|
||||
Let's face it, things will fail.... |
||||
|
||||
Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards). |
||||
|
||||
In order to play with the distributed nature of Elasticsearch, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed. |
||||
|
||||
h3. Where to go from here? |
||||
|
||||
We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website. |
||||
|
||||
h3. Building from Source |
||||
|
||||
Elasticsearch uses "Maven":http://maven.apache.org for its build system. |
||||
|
||||
In order to create a distribution, simply run the @mvn clean package |
||||
-DskipTests@ command in the cloned directory. |
||||
|
||||
The distribution will be created under @target/releases@. |
||||
|
||||
See the "TESTING":TESTING.asciidoc file for more information about |
||||
running the Elasticsearch test suite. |
||||
|
||||
h3. Upgrading to Elasticsearch 1.x? |
||||
|
||||
In order to ensure a smooth upgrade process from earlier versions of Elasticsearch (< 1.0.0), it is recommended to perform a full cluster restart. Please see the "setup reference":https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process. |
||||
|
||||
h1. License |
||||
|
||||
<pre> |
||||
This software is licensed under the Apache License, version 2 ("ALv2"), quoted below. |
||||
|
||||
Copyright 2009-2015 Elasticsearch <https://www.elastic.co> |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not |
||||
use this file except in compliance with the License. You may obtain a copy of |
||||
the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
||||
License for the specific language governing permissions and limitations under |
||||
the License. |
||||
</pre> |
||||
@ -0,0 +1,251 @@ |
||||
#!/bin/sh |
||||
|
||||
# OPTIONS: |
||||
# -d daemonize (run in background) |
||||
# -p pidfile write PID to <pidfile> |
||||
# -h |
||||
# --help print command line options |
||||
# -v print elasticsearch version, then exit |
||||
# -D prop set JAVA system property |
||||
# -X prop set non-standard JAVA system property |
||||
# --prop=val |
||||
# --prop val set elasticsearch property (i.e. -Des.<prop>=<val>) |
||||
|
||||
# CONTROLLING STARTUP: |
||||
# |
||||
# This script relies on few environment variables to determine startup |
||||
# behavior, those variables are: |
||||
# |
||||
# ES_CLASSPATH -- A Java classpath containing everything necessary to run. |
||||
# JAVA_OPTS -- Additional arguments to the JVM for heap size, etc |
||||
# ES_JAVA_OPTS -- External Java Opts on top of the defaults set |
||||
# |
||||
# |
||||
# Optionally, exact memory values can be set using the following values, note, |
||||
# they can still be set using the `ES_JAVA_OPTS`. Sample format include "512m", and "10g". |
||||
# |
||||
# ES_HEAP_SIZE -- Sets both the minimum and maximum memory to allocate (recommended) |
||||
# |
||||
# As a convenience, a fragment of shell is sourced in order to set one or |
||||
# more of these variables. This so-called `include' can be placed in a |
||||
# number of locations and will be searched for in order. The lowest |
||||
# priority search path is the same directory as the startup script, and |
||||
# since this is the location of the sample in the project tree, it should |
||||
# almost work Out Of The Box. |
||||
# |
||||
# Any serious use-case though will likely require customization of the |
||||
# include. For production installations, it is recommended that you copy |
||||
# the sample to one of /usr/share/elasticsearch/elasticsearch.in.sh, |
||||
# /usr/local/share/elasticsearch/elasticsearch.in.sh, or |
||||
# /opt/elasticsearch/elasticsearch.in.sh and make your modifications there. |
||||
# |
||||
# Another option is to specify the full path to the include file in the |
||||
# environment. For example: |
||||
# |
||||
# $ ES_INCLUDE=/path/to/in.sh elasticsearch -p /var/run/es.pid |
||||
# |
||||
# Note: This is particularly handy for running multiple instances on a |
||||
# single installation, or for quick tests. |
||||
# |
||||
# If you would rather configure startup entirely from the environment, you |
||||
# can disable the include by exporting an empty ES_INCLUDE, or by |
||||
# ensuring that no include files exist in the aforementioned search list. |
||||
# Be aware that you will be entirely responsible for populating the needed |
||||
# environment variables. |
||||
|
||||
|
||||
# Maven will replace the project.name with elasticsearch below. If that |
||||
# hasn't been done, we assume that this is not a packaged version and the |
||||
# user has forgotten to run Maven to create a package. |
||||
IS_PACKAGED_VERSION='elasticsearch' |
||||
if [ "$IS_PACKAGED_VERSION" != "elasticsearch" ]; then |
||||
cat >&2 << EOF |
||||
Error: You must build the project with Maven or download a pre-built package |
||||
before you can run Elasticsearch. See 'Building from Source' in README.textile |
||||
or visit http://www.elasticsearch.org/download to get a pre-built package. |
||||
EOF |
||||
exit 1 |
||||
fi |
||||
|
||||
CDPATH="" |
||||
SCRIPT="$0" |
||||
|
||||
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path. |
||||
while [ -h "$SCRIPT" ] ; do |
||||
ls=`ls -ld "$SCRIPT"` |
||||
# Drop everything prior to -> |
||||
link=`expr "$ls" : '.*-> \(.*\)$'` |
||||
if expr "$link" : '/.*' > /dev/null; then |
||||
SCRIPT="$link" |
||||
else |
||||
SCRIPT=`dirname "$SCRIPT"`/"$link" |
||||
fi |
||||
done |
||||
|
||||
# determine elasticsearch home |
||||
ES_HOME=`dirname "$SCRIPT"`/.. |
||||
|
||||
# make ELASTICSEARCH_HOME absolute |
||||
ES_HOME=`cd "$ES_HOME"; pwd` |
||||
|
||||
|
||||
# If an include wasn't specified in the environment, then search for one... |
||||
if [ "x$ES_INCLUDE" = "x" ]; then |
||||
# Locations (in order) to use when searching for an include file. |
||||
for include in /usr/share/elasticsearch/elasticsearch.in.sh \ |
||||
/usr/local/share/elasticsearch/elasticsearch.in.sh \ |
||||
/opt/elasticsearch/elasticsearch.in.sh \ |
||||
~/.elasticsearch.in.sh \ |
||||
$ES_HOME/bin/elasticsearch.in.sh \ |
||||
"`dirname "$0"`"/elasticsearch.in.sh; do |
||||
if [ -r "$include" ]; then |
||||
. "$include" |
||||
break |
||||
fi |
||||
done |
||||
# ...otherwise, source the specified include. |
||||
elif [ -r "$ES_INCLUDE" ]; then |
||||
. "$ES_INCLUDE" |
||||
fi |
||||
|
||||
if [ -x "$JAVA_HOME/bin/java" ]; then |
||||
JAVA="$JAVA_HOME/bin/java" |
||||
else |
||||
JAVA=`which java` |
||||
fi |
||||
|
||||
if [ ! -x "$JAVA" ]; then |
||||
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME" |
||||
exit 1 |
||||
fi |
||||
|
||||
if [ -z "$ES_CLASSPATH" ]; then |
||||
echo "You must set the ES_CLASSPATH var" >&2 |
||||
exit 1 |
||||
fi |
||||
|
||||
# Special-case path variables. |
||||
case `uname` in |
||||
CYGWIN*) |
||||
ES_CLASSPATH=`cygpath -p -w "$ES_CLASSPATH"` |
||||
ES_HOME=`cygpath -p -w "$ES_HOME"` |
||||
;; |
||||
esac |
||||
|
||||
launch_service() |
||||
{ |
||||
pidpath=$1 |
||||
daemonized=$2 |
||||
props=$3 |
||||
es_parms="-Delasticsearch" |
||||
|
||||
if [ "x$pidpath" != "x" ]; then |
||||
es_parms="$es_parms -Des.pidfile=$pidpath" |
||||
fi |
||||
|
||||
# Make sure we dont use any predefined locale, as we check some exception message strings and rely on english language |
||||
# As those strings are created by the OS, they are dependant on the configured locale |
||||
LANG=en_US.UTF-8 |
||||
LC_ALL=en_US.UTF-8 |
||||
|
||||
export HOSTNAME=`hostname -s` |
||||
|
||||
# The es-foreground option will tell Elasticsearch not to close stdout/stderr, but it's up to us not to daemonize. |
||||
if [ "x$daemonized" = "x" ]; then |
||||
es_parms="$es_parms -Des.foreground=yes" |
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \ |
||||
org.elasticsearch.bootstrap.Elasticsearch |
||||
# exec without running it in the background, makes it replace this shell, we'll never get here... |
||||
# no need to return something |
||||
else |
||||
# Startup Elasticsearch, background it, and write the pid. |
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \ |
||||
org.elasticsearch.bootstrap.Elasticsearch <&- & |
||||
return $? |
||||
fi |
||||
} |
||||
|
||||
# Print command line usage / help |
||||
usage() { |
||||
echo "Usage: $0 [-vdh] [-p pidfile] [-D prop] [-X prop]" |
||||
echo "Start elasticsearch." |
||||
echo " -d daemonize (run in background)" |
||||
echo " -p pidfile write PID to <pidfile>" |
||||
echo " -h" |
||||
echo " --help print command line options" |
||||
echo " -v print elasticsearch version, then exit" |
||||
echo " -D prop set JAVA system property" |
||||
echo " -X prop set non-standard JAVA system property" |
||||
echo " --prop=val" |
||||
echo " --prop val set elasticsearch property (i.e. -Des.<prop>=<val>)" |
||||
} |
||||
|
||||
# Parse any long getopt options and put them into properties before calling getopt below |
||||
# Be dash compatible to make sure running under ubuntu works |
||||
ARGV="" |
||||
while [ $# -gt 0 ] |
||||
do |
||||
case $1 in |
||||
--help) ARGV="$ARGV -h"; shift;; |
||||
--*=*) properties="$properties -Des.${1#--}" |
||||
shift 1 |
||||
;; |
||||
--*) [ $# -le 1 ] && { |
||||
echo "Option requires an argument: '$1'." |
||||
shift |
||||
continue |
||||
} |
||||
properties="$properties -Des.${1#--}=$2" |
||||
shift 2 |
||||
;; |
||||
*) ARGV="$ARGV $1" ; shift |
||||
esac |
||||
done |
||||
|
||||
# Parse any command line options. |
||||
args=`getopt vdhp:D:X: $ARGV` |
||||
eval set -- "$args" |
||||
|
||||
while true; do |
||||
case $1 in |
||||
-v) |
||||
"$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \ |
||||
org.elasticsearch.Version |
||||
exit 0 |
||||
;; |
||||
-p) |
||||
pidfile="$2" |
||||
shift 2 |
||||
;; |
||||
-d) |
||||
daemonized="yes" |
||||
shift |
||||
;; |
||||
-h) |
||||
usage |
||||
exit 0 |
||||
;; |
||||
-D) |
||||
properties="$properties -D$2" |
||||
shift 2 |
||||
;; |
||||
-X) |
||||
properties="$properties -X$2" |
||||
shift 2 |
||||
;; |
||||
--) |
||||
shift |
||||
break |
||||
;; |
||||
*) |
||||
echo "Error parsing argument $1!" >&2 |
||||
usage |
||||
exit 1 |
||||
;; |
||||
esac |
||||
done |
||||
|
||||
# Start up the service |
||||
launch_service "$pidfile" "$daemonized" "$properties" |
||||
|
||||
exit $? |
||||
@ -0,0 +1,48 @@ |
||||
@echo off |
||||
|
||||
SETLOCAL enabledelayedexpansion |
||||
TITLE Elasticsearch 1.7.6 |
||||
|
||||
SET params='%*' |
||||
|
||||
:loop |
||||
FOR /F "usebackq tokens=1* delims= " %%A IN (!params!) DO ( |
||||
SET current=%%A |
||||
SET params='%%B' |
||||
SET silent=N |
||||
|
||||
IF "!current!" == "-s" ( |
||||
SET silent=Y |
||||
) |
||||
IF "!current!" == "--silent" ( |
||||
SET silent=Y |
||||
) |
||||
|
||||
IF "!silent!" == "Y" ( |
||||
SET nopauseonerror=Y |
||||
) ELSE ( |
||||
IF "x!newparams!" NEQ "x" ( |
||||
SET newparams=!newparams! !current! |
||||
) ELSE ( |
||||
SET newparams=!current! |
||||
) |
||||
) |
||||
|
||||
IF "x!params!" NEQ "x" ( |
||||
GOTO loop |
||||
) |
||||
) |
||||
|
||||
SET HOSTNAME=%COMPUTERNAME% |
||||
|
||||
CALL "%~dp0elasticsearch.in.bat" |
||||
IF ERRORLEVEL 1 ( |
||||
IF NOT DEFINED nopauseonerror ( |
||||
PAUSE |
||||
) |
||||
EXIT /B %ERRORLEVEL% |
||||
) |
||||
|
||||
"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% %ES_PARAMS% !newparams! -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" |
||||
|
||||
ENDLOCAL |
||||
@ -0,0 +1,83 @@ |
||||
@echo off |
||||
|
||||
if DEFINED JAVA_HOME goto cont |
||||
|
||||
:err |
||||
ECHO JAVA_HOME environment variable must be set! 1>&2 |
||||
EXIT /B 1 |
||||
|
||||
:cont |
||||
set SCRIPT_DIR=%~dp0 |
||||
for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI |
||||
|
||||
|
||||
REM ***** JAVA options ***** |
||||
|
||||
if "%ES_MIN_MEM%" == "" ( |
||||
set ES_MIN_MEM=256m |
||||
) |
||||
|
||||
if "%ES_MAX_MEM%" == "" ( |
||||
set ES_MAX_MEM=1g |
||||
) |
||||
|
||||
if NOT "%ES_HEAP_SIZE%" == "" ( |
||||
set ES_MIN_MEM=%ES_HEAP_SIZE% |
||||
set ES_MAX_MEM=%ES_HEAP_SIZE% |
||||
) |
||||
|
||||
REM min and max heap sizes should be set to the same value to avoid |
||||
REM stop-the-world GC pauses during resize, and so that we can lock the |
||||
REM heap in memory on startup to prevent any of it from being swapped |
||||
REM out. |
||||
set JAVA_OPTS=%JAVA_OPTS% -Xms%ES_MIN_MEM% -Xmx%ES_MAX_MEM% |
||||
|
||||
REM new generation |
||||
if NOT "%ES_HEAP_NEWSIZE%" == "" ( |
||||
set JAVA_OPTS=%JAVA_OPTS% -Xmn%ES_HEAP_NEWSIZE% |
||||
) |
||||
|
||||
REM max direct memory |
||||
if NOT "%ES_DIRECT_SIZE%" == "" ( |
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:MaxDirectMemorySize=%ES_DIRECT_SIZE% |
||||
) |
||||
|
||||
REM set to headless, just in case |
||||
set JAVA_OPTS=%JAVA_OPTS% -Djava.awt.headless=true |
||||
|
||||
REM Force the JVM to use IPv4 stack |
||||
if NOT "%ES_USE_IPV4%" == "" ( |
||||
set JAVA_OPTS=%JAVA_OPTS% -Djava.net.preferIPv4Stack=true |
||||
) |
||||
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+UseParNewGC |
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+UseConcMarkSweepGC |
||||
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:CMSInitiatingOccupancyFraction=75 |
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+UseCMSInitiatingOccupancyOnly |
||||
|
||||
REM When running under Java 7 |
||||
REM JAVA_OPTS=%JAVA_OPTS% -XX:+UseCondCardMark |
||||
|
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDetails |
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCTimeStamps |
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDateStamps |
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintClassHistogram |
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintTenuringDistribution |
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCApplicationStoppedTime |
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -Xloggc:%ES_HOME%/logs/gc.log |
||||
|
||||
REM Causes the JVM to dump its heap on OutOfMemory. |
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+HeapDumpOnOutOfMemoryError |
||||
REM The path to the heap dump location, note directory must exists and have enough |
||||
REM space for a full heap dump. |
||||
REM JAVA_OPTS=%JAVA_OPTS% -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof |
||||
|
||||
REM Disables explicit GC |
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+DisableExplicitGC |
||||
|
||||
REM Ensure UTF-8 encoding by default (e.g. filenames) |
||||
set JAVA_OPTS=%JAVA_OPTS% -Dfile.encoding=UTF-8 |
||||
|
||||
set ES_CLASSPATH=%ES_CLASSPATH%;%ES_HOME%/lib/elasticsearch-1.7.6.jar;%ES_HOME%/lib/*;%ES_HOME%/lib/sigar/* |
||||
set ES_PARAMS=-Delasticsearch -Des-foreground=yes -Des.path.home="%ES_HOME%" |
||||
@ -0,0 +1,68 @@ |
||||
#!/bin/sh |
||||
|
||||
ES_CLASSPATH=$ES_CLASSPATH:$ES_HOME/lib/elasticsearch-1.7.6.jar:$ES_HOME/lib/*:$ES_HOME/lib/sigar/* |
||||
|
||||
if [ "x$ES_MIN_MEM" = "x" ]; then |
||||
ES_MIN_MEM=256m |
||||
fi |
||||
if [ "x$ES_MAX_MEM" = "x" ]; then |
||||
ES_MAX_MEM=1g |
||||
fi |
||||
if [ "x$ES_HEAP_SIZE" != "x" ]; then |
||||
ES_MIN_MEM=$ES_HEAP_SIZE |
||||
ES_MAX_MEM=$ES_HEAP_SIZE |
||||
fi |
||||
|
||||
# min and max heap sizes should be set to the same value to avoid |
||||
# stop-the-world GC pauses during resize, and so that we can lock the |
||||
# heap in memory on startup to prevent any of it from being swapped |
||||
# out. |
||||
JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}" |
||||
JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}" |
||||
|
||||
# new generation |
||||
if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then |
||||
JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}" |
||||
fi |
||||
|
||||
# max direct memory |
||||
if [ "x$ES_DIRECT_SIZE" != "x" ]; then |
||||
JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}" |
||||
fi |
||||
|
||||
# set to headless, just in case |
||||
JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true" |
||||
|
||||
# Force the JVM to use IPv4 stack |
||||
if [ "x$ES_USE_IPV4" != "x" ]; then |
||||
JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true" |
||||
fi |
||||
|
||||
JAVA_OPTS="$JAVA_OPTS -XX:+UseParNewGC" |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC" |
||||
|
||||
JAVA_OPTS="$JAVA_OPTS -XX:CMSInitiatingOccupancyFraction=75" |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSInitiatingOccupancyOnly" |
||||
|
||||
# GC logging options |
||||
if [ "x$ES_USE_GC_LOGGING" != "x" ]; then |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails" |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps" |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDateStamps" |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram" |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution" |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime" |
||||
JAVA_OPTS="$JAVA_OPTS -Xloggc:/var/log/elasticsearch/gc.log" |
||||
fi |
||||
|
||||
# Causes the JVM to dump its heap on OutOfMemory. |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError" |
||||
# The path to the heap dump location, note directory must exists and have enough |
||||
# space for a full heap dump. |
||||
#JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof" |
||||
|
||||
# Disables explicit GC |
||||
JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC" |
||||
|
||||
# Ensure UTF-8 encoding by default (e.g. filenames) |
||||
JAVA_OPTS="$JAVA_OPTS -Dfile.encoding=UTF-8" |
||||
@ -0,0 +1,108 @@ |
||||
#!/bin/sh |
||||
|
||||
CDPATH="" |
||||
SCRIPT="$0" |
||||
|
||||
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path. |
||||
while [ -h "$SCRIPT" ] ; do |
||||
ls=`ls -ld "$SCRIPT"` |
||||
# Drop everything prior to -> |
||||
link=`expr "$ls" : '.*-> \(.*\)$'` |
||||
if expr "$link" : '/.*' > /dev/null; then |
||||
SCRIPT="$link" |
||||
else |
||||
SCRIPT=`dirname "$SCRIPT"`/"$link" |
||||
fi |
||||
done |
||||
|
||||
# determine elasticsearch home |
||||
ES_HOME=`dirname "$SCRIPT"`/.. |
||||
|
||||
# make ELASTICSEARCH_HOME absolute |
||||
ES_HOME=`cd "$ES_HOME"; pwd` |
||||
|
||||
# Sets the default values for elasticsearch variables used in this script |
||||
if [ -z "$CONF_DIR" ]; then |
||||
CONF_DIR="$ES_HOME/config" |
||||
|
||||
if [ -z "$CONF_FILE" ]; then |
||||
CONF_FILE="$CONF_DIR/elasticsearch.yml" |
||||
fi |
||||
fi |
||||
|
||||
if [ -z "$CONF_FILE" ]; then |
||||
CONF_FILE="$ES_HOME/config/elasticsearch.yml" |
||||
fi |
||||
|
||||
# The default env file is defined at building/packaging time. |
||||
# For a tar.gz package, the value is "". |
||||
ES_ENV_FILE="" |
||||
|
||||
# If an include is specified with the ES_INCLUDE environment variable, use it |
||||
if [ -n "$ES_INCLUDE" ]; then |
||||
ES_ENV_FILE="$ES_INCLUDE" |
||||
fi |
||||
|
||||
# Source the environment file |
||||
if [ -n "$ES_ENV_FILE" ]; then |
||||
|
||||
# If the ES_ENV_FILE is not found, try to resolve the path |
||||
# against the ES_HOME directory |
||||
if [ ! -f "$ES_ENV_FILE" ]; then |
||||
ES_ENV_FILE="$ELASTIC_HOME/$ES_ENV_FILE" |
||||
fi |
||||
|
||||
. "$ES_ENV_FILE" |
||||
if [ $? -ne 0 ]; then |
||||
echo "Unable to source environment file: $ES_ENV_FILE" >&2 |
||||
exit 1 |
||||
fi |
||||
fi |
||||
|
||||
if [ -x "$JAVA_HOME/bin/java" ]; then |
||||
JAVA=$JAVA_HOME/bin/java |
||||
else |
||||
JAVA=`which java` |
||||
fi |
||||
|
||||
# real getopt cannot be used because we need to hand options over to the PluginManager |
||||
while [ $# -gt 0 ]; do |
||||
case $1 in |
||||
-D*=*) |
||||
properties="$properties \"$1\"" |
||||
;; |
||||
-D*) |
||||
var=$1 |
||||
shift |
||||
properties="$properties \"$var\"=\"$1\"" |
||||
;; |
||||
*) |
||||
args="$args \"$1\"" |
||||
esac |
||||
shift |
||||
done |
||||
|
||||
# check if properties already has a config file or config dir |
||||
if [ -e "$CONF_DIR" ]; then |
||||
case "$properties" in |
||||
*-Des.default.path.conf=*|*-Des.path.conf=*) |
||||
;; |
||||
*) |
||||
properties="$properties -Des.default.path.conf=\"$CONF_DIR\"" |
||||
;; |
||||
esac |
||||
fi |
||||
|
||||
if [ -e "$CONF_FILE" ]; then |
||||
case "$properties" in |
||||
*-Des.default.config=*|*-Des.config=*) |
||||
;; |
||||
*) |
||||
properties="$properties -Des.default.config=\"$CONF_FILE\"" |
||||
;; |
||||
esac |
||||
fi |
||||
|
||||
export HOSTNAME=`hostname -s` |
||||
|
||||
eval "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home=\""$ES_HOME"\" $properties -cp \""$ES_HOME/lib/*"\" org.elasticsearch.plugins.PluginManager $args |
||||
@ -0,0 +1,25 @@ |
||||
@echo off |
||||
|
||||
SETLOCAL |
||||
|
||||
if NOT DEFINED JAVA_HOME goto err |
||||
|
||||
set SCRIPT_DIR=%~dp0 |
||||
for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI |
||||
|
||||
TITLE Elasticsearch Plugin Manager 1.7.6 |
||||
|
||||
SET HOSTNAME=%COMPUTERNAME% |
||||
|
||||
"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% -Xmx64m -Xms16m -Des.path.home="%ES_HOME%" -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginManager" %* |
||||
goto finally |
||||
|
||||
|
||||
:err |
||||
echo JAVA_HOME environment variable must be set! |
||||
pause |
||||
|
||||
|
||||
:finally |
||||
|
||||
ENDLOCAL |
||||
@ -0,0 +1,204 @@ |
||||
@echo off |
||||
SETLOCAL |
||||
|
||||
TITLE Elasticsearch Service 1.7.6 |
||||
|
||||
if NOT DEFINED JAVA_HOME goto err |
||||
|
||||
set SCRIPT_DIR=%~dp0 |
||||
for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI |
||||
|
||||
rem Detect JVM version to figure out appropriate executable to use |
||||
if not exist "%JAVA_HOME%\bin\java.exe" ( |
||||
echo JAVA_HOME points to an invalid Java installation (no java.exe found in "%JAVA_HOME%"^). Exiting... |
||||
goto:eof |
||||
) |
||||
"%JAVA_HOME%\bin\java" -version 2>&1 | "%windir%\System32\find" "64-Bit" >nul: |
||||
|
||||
if errorlevel 1 goto x86 |
||||
set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x64.exe |
||||
set SERVICE_ID=elasticsearch-service-x64 |
||||
set ARCH=64-bit |
||||
goto checkExe |
||||
|
||||
:x86 |
||||
set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x86.exe |
||||
set SERVICE_ID=elasticsearch-service-x86 |
||||
set ARCH=32-bit |
||||
|
||||
:checkExe |
||||
if EXIST "%EXECUTABLE%" goto okExe |
||||
echo elasticsearch-service-(x86|x64).exe was not found... |
||||
|
||||
:okExe |
||||
set ES_VERSION=1.7.6 |
||||
|
||||
if "%LOG_DIR%" == "" set LOG_DIR=%ES_HOME%\logs |
||||
|
||||
if "x%1x" == "xx" goto displayUsage |
||||
set SERVICE_CMD=%1 |
||||
shift |
||||
if "x%1x" == "xx" goto checkServiceCmd |
||||
set SERVICE_ID=%1 |
||||
|
||||
:checkServiceCmd |
||||
|
||||
if "%LOG_OPTS%" == "" set LOG_OPTS=--LogPath "%LOG_DIR%" --LogPrefix "%SERVICE_ID%" --StdError auto --StdOutput auto |
||||
|
||||
if /i %SERVICE_CMD% == install goto doInstall |
||||
if /i %SERVICE_CMD% == remove goto doRemove |
||||
if /i %SERVICE_CMD% == start goto doStart |
||||
if /i %SERVICE_CMD% == stop goto doStop |
||||
if /i %SERVICE_CMD% == manager goto doManagment |
||||
echo Unknown option "%SERVICE_CMD%" |
||||
|
||||
:displayUsage |
||||
echo. |
||||
echo Usage: service.bat install^|remove^|start^|stop^|manager [SERVICE_ID] |
||||
goto:eof |
||||
|
||||
:doStart |
||||
"%EXECUTABLE%" //ES//%SERVICE_ID% %LOG_OPTS% |
||||
if not errorlevel 1 goto started |
||||
echo Failed starting '%SERVICE_ID%' service |
||||
goto:eof |
||||
:started |
||||
echo The service '%SERVICE_ID%' has been started |
||||
goto:eof |
||||
|
||||
:doStop |
||||
"%EXECUTABLE%" //SS//%SERVICE_ID% %LOG_OPTS% |
||||
if not errorlevel 1 goto stopped |
||||
echo Failed stopping '%SERVICE_ID%' service |
||||
goto:eof |
||||
:stopped |
||||
echo The service '%SERVICE_ID%' has been stopped |
||||
goto:eof |
||||
|
||||
:doManagment |
||||
set EXECUTABLE_MGR=%ES_HOME%\bin\elasticsearch-service-mgr.exe |
||||
"%EXECUTABLE_MGR%" //ES//%SERVICE_ID% |
||||
if not errorlevel 1 goto managed |
||||
echo Failed starting service manager for '%SERVICE_ID%' |
||||
goto:eof |
||||
:managed |
||||
echo Succesfully started service manager for '%SERVICE_ID%'. |
||||
goto:eof |
||||
|
||||
:doRemove |
||||
rem Remove the service |
||||
"%EXECUTABLE%" //DS//%SERVICE_ID% %LOG_OPTS% |
||||
if not errorlevel 1 goto removed |
||||
echo Failed removing '%SERVICE_ID%' service |
||||
goto:eof |
||||
:removed |
||||
echo The service '%SERVICE_ID%' has been removed |
||||
goto:eof |
||||
|
||||
:doInstall |
||||
echo Installing service : "%SERVICE_ID%" |
||||
echo Using JAVA_HOME (%ARCH%): "%JAVA_HOME%" |
||||
|
||||
rem Check JVM server dll first |
||||
set JVM_DLL=%JAVA_HOME%\jre\bin\server\jvm.dll |
||||
if exist "%JVM_DLL%" goto foundJVM |
||||
|
||||
rem Check 'server' JRE (JRE installed on Windows Server) |
||||
set JVM_DLL=%JAVA_HOME%\bin\server\jvm.dll |
||||
if exist "%JVM_DLL%" goto foundJVM |
||||
|
||||
rem Fallback to 'client' JRE |
||||
set JVM_DLL=%JAVA_HOME%\bin\client\jvm.dll |
||||
|
||||
if exist "%JVM_DLL%" ( |
||||
echo Warning: JAVA_HOME points to a JRE and not JDK installation; a client (not a server^) JVM will be used... |
||||
) else ( |
||||
echo JAVA_HOME points to an invalid Java installation (no jvm.dll found in "%JAVA_HOME%"^). Existing... |
||||
goto:eof |
||||
) |
||||
|
||||
:foundJVM |
||||
if "%ES_MIN_MEM%" == "" set ES_MIN_MEM=256m |
||||
if "%ES_MAX_MEM%" == "" set ES_MAX_MEM=1g |
||||
|
||||
if NOT "%ES_HEAP_SIZE%" == "" set ES_MIN_MEM=%ES_HEAP_SIZE% |
||||
if NOT "%ES_HEAP_SIZE%" == "" set ES_MAX_MEM=%ES_HEAP_SIZE% |
||||
|
||||
call:convertxm %ES_MIN_MEM% JVM_XMS |
||||
call:convertxm %ES_MAX_MEM% JVM_XMX |
||||
|
||||
REM java_opts might be empty - init to avoid tripping commons daemon (if the command starts with ;) |
||||
if "%JAVA_OPTS%" == "" set JAVA_OPTS=-XX:+UseParNewGC |
||||
|
||||
CALL "%ES_HOME%\bin\elasticsearch.in.bat" |
||||
|
||||
rem thread stack size |
||||
set JVM_SS=256 |
||||
|
||||
if "%DATA_DIR%" == "" set DATA_DIR=%ES_HOME%\data |
||||
|
||||
if "%WORK_DIR%" == "" set WORK_DIR=%ES_HOME% |
||||
|
||||
if "%CONF_DIR%" == "" set CONF_DIR=%ES_HOME%\config |
||||
|
||||
if "%CONF_FILE%" == "" set CONF_FILE=%ES_HOME%\config\elasticsearch.yml |
||||
|
||||
set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.config="%CONF_FILE%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.work="%WORK_DIR%";-Des.default.path.conf="%CONF_DIR%" |
||||
|
||||
set JVM_OPTS=%JAVA_OPTS: =;% |
||||
|
||||
if not "%ES_JAVA_OPTS%" == "" set JVM_ES_JAVA_OPTS=%ES_JAVA_OPTS: =#% |
||||
if not "%ES_JAVA_OPTS%" == "" set JVM_OPTS=%JVM_OPTS%;%JVM_ES_JAVA_OPTS% |
||||
|
||||
if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual |
||||
if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0 |
||||
|
||||
"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmSs %JVM_SS% --JvmMs %JVM_XMS% --JvmMx %JVM_XMX% --JvmOptions %JVM_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "Elasticsearch %ES_VERSION% (%SERVICE_ID%)" --Description "Elasticsearch %ES_VERSION% Windows Service - http://elasticsearch.org" --Jvm "%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" |
||||
|
||||
|
||||
if not errorlevel 1 goto installed |
||||
echo Failed installing '%SERVICE_ID%' service |
||||
goto:eof |
||||
|
||||
:installed |
||||
echo The service '%SERVICE_ID%' has been installed. |
||||
goto:eof |
||||
|
||||
:err |
||||
echo JAVA_HOME environment variable must be set! |
||||
pause |
||||
goto:eof |
||||
|
||||
rem --- |
||||
rem Function for converting Xm[s|x] values into MB which Commons Daemon accepts |
||||
rem --- |
||||
:convertxm |
||||
set value=%~1 |
||||
rem extract last char (unit) |
||||
set unit=%value:~-1% |
||||
rem assume the unit is specified |
||||
set conv=%value:~0,-1% |
||||
|
||||
if "%unit%" == "k" goto kilo |
||||
if "%unit%" == "K" goto kilo |
||||
if "%unit%" == "m" goto mega |
||||
if "%unit%" == "M" goto mega |
||||
if "%unit%" == "g" goto giga |
||||
if "%unit%" == "G" goto giga |
||||
|
||||
rem no unit found, must be bytes; consider the whole value |
||||
set conv=%value% |
||||
rem convert to KB |
||||
set /a conv=%conv% / 1024 |
||||
:kilo |
||||
rem convert to MB |
||||
set /a conv=%conv% / 1024 |
||||
goto mega |
||||
:giga |
||||
rem convert to MB |
||||
set /a conv=%conv% * 1024 |
||||
:mega |
||||
set "%~2=%conv%" |
||||
goto:eof |
||||
|
||||
ENDLOCAL |
||||
@ -0,0 +1,385 @@ |
||||
##################### Elasticsearch Configuration Example ##################### |
||||
|
||||
# This file contains an overview of various configuration settings, |
||||
# targeted at operations staff. Application developers should |
||||
# consult the guide at <http://elasticsearch.org/guide>. |
||||
# |
||||
# The installation procedure is covered at |
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>. |
||||
# |
||||
# Elasticsearch comes with reasonable defaults for most settings, |
||||
# so you can try it out without bothering with configuration. |
||||
# |
||||
# Most of the time, these defaults are just fine for running a production |
||||
# cluster. If you're fine-tuning your cluster, or wondering about the |
||||
# effect of certain configuration option, please _do ask_ on the |
||||
# mailing list or IRC channel [http://elasticsearch.org/community]. |
||||
|
||||
# Any element in the configuration can be replaced with environment variables |
||||
# by placing them in ${...} notation. For example: |
||||
# |
||||
#node.rack: ${RACK_ENV_VAR} |
||||
|
||||
# For information on supported formats and syntax for the config file, see |
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html> |
||||
|
||||
|
||||
################################### Cluster ################################### |
||||
|
||||
# Cluster name identifies your cluster for auto-discovery. If you're running |
||||
# multiple clusters on the same network, make sure you're using unique names. |
||||
# |
||||
#cluster.name: elasticsearch |
||||
|
||||
|
||||
#################################### Node ##################################### |
||||
|
||||
# Node names are generated dynamically on startup, so you're relieved |
||||
# from configuring them manually. You can tie this node to a specific name: |
||||
# |
||||
#node.name: "Franz Kafka" |
||||
|
||||
# Every node can be configured to allow or deny being eligible as the master, |
||||
# and to allow or deny to store the data. |
||||
# |
||||
# Allow this node to be eligible as a master node (enabled by default): |
||||
# |
||||
#node.master: true |
||||
# |
||||
# Allow this node to store data (enabled by default): |
||||
# |
||||
#node.data: true |
||||
|
||||
# You can exploit these settings to design advanced cluster topologies. |
||||
# |
||||
# 1. You want this node to never become a master node, only to hold data. |
||||
# This will be the "workhorse" of your cluster. |
||||
# |
||||
#node.master: false |
||||
#node.data: true |
||||
# |
||||
# 2. You want this node to only serve as a master: to not store any data and |
||||
# to have free resources. This will be the "coordinator" of your cluster. |
||||
# |
||||
#node.master: true |
||||
#node.data: false |
||||
# |
||||
# 3. You want this node to be neither master nor data node, but |
||||
# to act as a "search load balancer" (fetching data from nodes, |
||||
# aggregating results, etc.) |
||||
# |
||||
#node.master: false |
||||
#node.data: false |
||||
|
||||
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the |
||||
# Node Info API [http://localhost:9200/_nodes] or GUI tools |
||||
# such as <http://www.elasticsearch.org/overview/marvel/>, |
||||
# <http://github.com/karmi/elasticsearch-paramedic>, |
||||
# <http://github.com/lukas-vlcek/bigdesk> and |
||||
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state. |
||||
|
||||
# A node can have generic attributes associated with it, which can later be used |
||||
# for customized shard allocation filtering, or allocation awareness. An attribute |
||||
# is a simple key value pair, similar to node.key: value, here is an example: |
||||
# |
||||
#node.rack: rack314 |
||||
|
||||
# By default, multiple nodes are allowed to start from the same installation location |
||||
# to disable it, set the following: |
||||
#node.max_local_storage_nodes: 1 |
||||
|
||||
|
||||
#################################### Index #################################### |
||||
|
||||
# You can set a number of options (such as shard/replica options, mapping |
||||
# or analyzer definitions, translog settings, ...) for indices globally, |
||||
# in this file. |
||||
# |
||||
# Note, that it makes more sense to configure index settings specifically for |
||||
# a certain index, either when creating it or by using the index templates API. |
||||
# |
||||
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and |
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html> |
||||
# for more information. |
||||
|
||||
# Set the number of shards (splits) of an index (5 by default): |
||||
# |
||||
#index.number_of_shards: 5 |
||||
|
||||
# Set the number of replicas (additional copies) of an index (1 by default): |
||||
# |
||||
#index.number_of_replicas: 1 |
||||
|
||||
# Note, that for development on a local machine, with small indices, it usually |
||||
# makes sense to "disable" the distributed features: |
||||
# |
||||
#index.number_of_shards: 1 |
||||
#index.number_of_replicas: 0 |
||||
|
||||
# These settings directly affect the performance of index and search operations |
||||
# in your cluster. Assuming you have enough machines to hold shards and |
||||
# replicas, the rule of thumb is: |
||||
# |
||||
# 1. Having more *shards* enhances the _indexing_ performance and allows to |
||||
# _distribute_ a big index across machines. |
||||
# 2. Having more *replicas* enhances the _search_ performance and improves the |
||||
# cluster _availability_. |
||||
# |
||||
# The "number_of_shards" is a one-time setting for an index. |
||||
# |
||||
# The "number_of_replicas" can be increased or decreased anytime, |
||||
# by using the Index Update Settings API. |
||||
# |
||||
# Elasticsearch takes care about load balancing, relocating, gathering the |
||||
# results from nodes, etc. Experiment with different settings to fine-tune |
||||
# your setup. |
||||
|
||||
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect |
||||
# the index status. |
||||
|
||||
|
||||
#################################### Paths #################################### |
||||
|
||||
# Path to directory containing configuration (this file and logging.yml): |
||||
# |
||||
#path.conf: /path/to/conf |
||||
|
||||
# Path to directory where to store index data allocated for this node. |
||||
# |
||||
#path.data: /path/to/data |
||||
# |
||||
# Can optionally include more than one location, causing data to be striped across |
||||
# the locations (a la RAID 0) on a file level, favouring locations with most free |
||||
# space on creation. For example: |
||||
# |
||||
#path.data: /path/to/data1,/path/to/data2 |
||||
|
||||
# Path to temporary files: |
||||
# |
||||
#path.work: /path/to/work |
||||
|
||||
# Path to log files: |
||||
# |
||||
#path.logs: /path/to/logs |
||||
|
||||
# Path to where plugins are installed: |
||||
# |
||||
#path.plugins: /path/to/plugins |
||||
|
||||
|
||||
#################################### Plugin ################################### |
||||
|
||||
# If a plugin listed here is not installed for current node, the node will not start. |
||||
# |
||||
#plugin.mandatory: mapper-attachments,lang-groovy |
||||
|
||||
|
||||
################################### Memory #################################### |
||||
|
||||
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that |
||||
# it _never_ swaps. |
||||
# |
||||
# Set this property to true to lock the memory: |
||||
# |
||||
#bootstrap.mlockall: true |
||||
|
||||
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set |
||||
# to the same value, and that the machine has enough memory to allocate |
||||
# for Elasticsearch, leaving enough memory for the operating system itself. |
||||
# |
||||
# You should also make sure that the Elasticsearch process is allowed to lock |
||||
# the memory, eg. by using `ulimit -l unlimited`. |
||||
|
||||
|
||||
############################## Network And HTTP ############################### |
||||
|
||||
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens |
||||
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node |
||||
# communication. (the range means that if the port is busy, it will automatically |
||||
# try the next port). |
||||
|
||||
# Set the bind address specifically (IPv4 or IPv6): |
||||
# |
||||
#network.bind_host: 192.168.0.1 |
||||
|
||||
# Set the address other nodes will use to communicate with this node. If not |
||||
# set, it is automatically derived. It must point to an actual IP address. |
||||
# |
||||
#network.publish_host: 192.168.0.1 |
||||
|
||||
# Set both 'bind_host' and 'publish_host': |
||||
# |
||||
#network.host: 192.168.0.1 |
||||
|
||||
# Set a custom port for the node to node communication (9300 by default): |
||||
# |
||||
#transport.tcp.port: 9300 |
||||
|
||||
# Enable compression for all communication between nodes (disabled by default): |
||||
# |
||||
#transport.tcp.compress: true |
||||
|
||||
# Set a custom port to listen for HTTP traffic: |
||||
# |
||||
#http.port: 9200 |
||||
|
||||
# Set a custom allowed content length: |
||||
# |
||||
#http.max_content_length: 100mb |
||||
|
||||
# Disable HTTP completely: |
||||
# |
||||
#http.enabled: false |
||||
|
||||
|
||||
################################### Gateway ################################### |
||||
|
||||
# The gateway allows for persisting the cluster state between full cluster |
||||
# restarts. Every change to the state (such as adding an index) will be stored |
||||
# in the gateway, and when the cluster starts up for the first time, |
||||
# it will read its state from the gateway. |
||||
|
||||
# There are several types of gateway implementations. For more information, see |
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>. |
||||
|
||||
# The default gateway type is the "local" gateway (recommended): |
||||
# |
||||
#gateway.type: local |
||||
|
||||
# Settings below control how and when to start the initial recovery process on |
||||
# a full cluster restart (to reuse as much local data as possible when using shared |
||||
# gateway). |
||||
|
||||
# Allow recovery process after N nodes in a cluster are up: |
||||
# |
||||
#gateway.recover_after_nodes: 1 |
||||
|
||||
# Set the timeout to initiate the recovery process, once the N nodes |
||||
# from previous setting are up (accepts time value): |
||||
# |
||||
#gateway.recover_after_time: 5m |
||||
|
||||
# Set how many nodes are expected in this cluster. Once these N nodes |
||||
# are up (and recover_after_nodes is met), begin recovery process immediately |
||||
# (without waiting for recover_after_time to expire): |
||||
# |
||||
#gateway.expected_nodes: 2 |
||||
|
||||
|
||||
############################# Recovery Throttling ############################# |
||||
|
||||
# These settings allow to control the process of shards allocation between |
||||
# nodes during initial recovery, replica allocation, rebalancing, |
||||
# or when adding and removing nodes. |
||||
|
||||
# Set the number of concurrent recoveries happening on a node: |
||||
# |
||||
# 1. During the initial recovery |
||||
# |
||||
#cluster.routing.allocation.node_initial_primaries_recoveries: 4 |
||||
# |
||||
# 2. During adding/removing nodes, rebalancing, etc |
||||
# |
||||
#cluster.routing.allocation.node_concurrent_recoveries: 2 |
||||
|
||||
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): |
||||
# |
||||
#indices.recovery.max_bytes_per_sec: 20mb |
||||
|
||||
# Set to limit the number of open concurrent streams when |
||||
# recovering a shard from a peer: |
||||
# |
||||
#indices.recovery.concurrent_streams: 5 |
||||
|
||||
|
||||
################################## Discovery ################################## |
||||
|
||||
# Discovery infrastructure ensures nodes can be found within a cluster |
||||
# and master node is elected. Multicast discovery is the default. |
||||
|
||||
# Set to ensure a node sees N other master eligible nodes to be considered |
||||
# operational within the cluster. This should be set to a quorum/majority of |
||||
# the master-eligible nodes in the cluster. |
||||
# |
||||
#discovery.zen.minimum_master_nodes: 1 |
||||
|
||||
# Set the time to wait for ping responses from other nodes when discovering. |
||||
# Set this option to a higher value on a slow or congested network |
||||
# to minimize discovery failures: |
||||
# |
||||
#discovery.zen.ping.timeout: 3s |
||||
|
||||
# For more information, see |
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html> |
||||
|
||||
# Unicast discovery allows to explicitly control which nodes will be used |
||||
# to discover the cluster. It can be used when multicast is not present, |
||||
# or to restrict the cluster communication-wise. |
||||
# |
||||
# 1. Disable multicast discovery (enabled by default): |
||||
# |
||||
#discovery.zen.ping.multicast.enabled: false |
||||
# |
||||
# 2. Configure an initial list of master nodes in the cluster |
||||
# to perform discovery when new nodes (master or data) are started: |
||||
# |
||||
#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] |
||||
|
||||
# EC2 discovery allows to use AWS EC2 API in order to perform discovery. |
||||
# |
||||
# You have to install the cloud-aws plugin for enabling the EC2 discovery. |
||||
# |
||||
# For more information, see |
||||
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html> |
||||
# |
||||
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/> |
||||
# for a step-by-step tutorial. |
||||
|
||||
# GCE discovery allows to use Google Compute Engine API in order to perform discovery. |
||||
# |
||||
# You have to install the cloud-gce plugin for enabling the GCE discovery. |
||||
# |
||||
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>. |
||||
|
||||
# Azure discovery allows to use Azure API in order to perform discovery. |
||||
# |
||||
# You have to install the cloud-azure plugin for enabling the Azure discovery. |
||||
# |
||||
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>. |
||||
|
||||
################################## Slow Log ################################## |
||||
|
||||
# Shard level query and fetch threshold logging. |
||||
|
||||
#index.search.slowlog.threshold.query.warn: 10s |
||||
#index.search.slowlog.threshold.query.info: 5s |
||||
#index.search.slowlog.threshold.query.debug: 2s |
||||
#index.search.slowlog.threshold.query.trace: 500ms |
||||
|
||||
#index.search.slowlog.threshold.fetch.warn: 1s |
||||
#index.search.slowlog.threshold.fetch.info: 800ms |
||||
#index.search.slowlog.threshold.fetch.debug: 500ms |
||||
#index.search.slowlog.threshold.fetch.trace: 200ms |
||||
|
||||
#index.indexing.slowlog.threshold.index.warn: 10s |
||||
#index.indexing.slowlog.threshold.index.info: 5s |
||||
#index.indexing.slowlog.threshold.index.debug: 2s |
||||
#index.indexing.slowlog.threshold.index.trace: 500ms |
||||
|
||||
################################## GC Logging ################################ |
||||
|
||||
#monitor.jvm.gc.young.warn: 1000ms |
||||
#monitor.jvm.gc.young.info: 700ms |
||||
#monitor.jvm.gc.young.debug: 400ms |
||||
|
||||
#monitor.jvm.gc.old.warn: 10s |
||||
#monitor.jvm.gc.old.info: 5s |
||||
#monitor.jvm.gc.old.debug: 2s |
||||
|
||||
################################## Security ################################ |
||||
|
||||
# Uncomment if you want to enable JSONP as a valid return transport on the |
||||
# http server. With this enabled, it may pose a security risk, so disabling |
||||
# it unless you need it is recommended (it is disabled by default). |
||||
# |
||||
#http.jsonp.enable: true |
||||
@ -0,0 +1,68 @@ |
||||
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG |
||||
es.logger.level: INFO |
||||
rootLogger: ${es.logger.level}, console, file |
||||
logger: |
||||
# log action execution errors for easier debugging |
||||
action: DEBUG |
||||
# reduce the logging for aws, too much is logged under the default INFO |
||||
com.amazonaws: WARN |
||||
org.apache.http: INFO |
||||
|
||||
# gateway |
||||
#gateway: DEBUG |
||||
#index.gateway: DEBUG |
||||
|
||||
# peer shard recovery |
||||
#indices.recovery: DEBUG |
||||
|
||||
# discovery |
||||
#discovery: TRACE |
||||
|
||||
index.search.slowlog: TRACE, index_search_slow_log_file |
||||
index.indexing.slowlog: TRACE, index_indexing_slow_log_file |
||||
|
||||
additivity: |
||||
index.search.slowlog: false |
||||
index.indexing.slowlog: false |
||||
|
||||
appender: |
||||
console: |
||||
type: console |
||||
layout: |
||||
type: consolePattern |
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" |
||||
|
||||
file: |
||||
type: dailyRollingFile |
||||
file: ${path.logs}/${cluster.name}.log |
||||
datePattern: "'.'yyyy-MM-dd" |
||||
layout: |
||||
type: pattern |
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" |
||||
|
||||
# Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. |
||||
# For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html |
||||
#file: |
||||
#type: extrasRollingFile |
||||
#file: ${path.logs}/${cluster.name}.log |
||||
#rollingPolicy: timeBased |
||||
#rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz |
||||
#layout: |
||||
#type: pattern |
||||
#conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" |
||||
|
||||
index_search_slow_log_file: |
||||
type: dailyRollingFile |
||||
file: ${path.logs}/${cluster.name}_index_search_slowlog.log |
||||
datePattern: "'.'yyyy-MM-dd" |
||||
layout: |
||||
type: pattern |
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" |
||||
|
||||
index_indexing_slow_log_file: |
||||
type: dailyRollingFile |
||||
file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log |
||||
datePattern: "'.'yyyy-MM-dd" |
||||
layout: |
||||
type: pattern |
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" |
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue