Initial commit
This commit is contained in:
commit
b16f2585c7
19 changed files with 690 additions and 0 deletions
239
templates/sentry.conf.j2
Normal file
239
templates/sentry.conf.j2
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
|
||||
#
|
||||
# This file is just Python, with a touch of Django which means
|
||||
# you can inherit and tweak settings to your hearts content.
|
||||
|
||||
from sentry.conf.server import * # NOQA
|
||||
|
||||
DATABASES = {
|
||||
"default": {
|
||||
"ENGINE": "sentry.db.postgres",
|
||||
"NAME": "{{ postgres_db }}",
|
||||
"USER": "{{ postgres_user }}",
|
||||
"PASSWORD": "{{ postgres_password }}",
|
||||
"HOST": "{{ postgres_host }}",
|
||||
"PORT": "{{ postgres_port }}",
|
||||
}
|
||||
}
|
||||
|
||||
# You should not change this setting after your database has been created
|
||||
# unless you have altered all schemas first
|
||||
SENTRY_USE_BIG_INTS = True
|
||||
|
||||
# If you're expecting any kind of real traffic on Sentry, we highly recommend
|
||||
# configuring the CACHES and Redis settings
|
||||
|
||||
###########
|
||||
# General #
|
||||
###########
|
||||
|
||||
# Instruct Sentry that this install intends to be run by a single organization
|
||||
# and thus various UI optimizations should be enabled.
|
||||
SENTRY_SINGLE_ORGANIZATION = True
|
||||
|
||||
SENTRY_OPTIONS["system.event-retention-days"] = int(
|
||||
env('SENTRY_EVENT_RETENTION_DAYS', '90')
|
||||
)
|
||||
|
||||
#########
|
||||
# Redis #
|
||||
#########
|
||||
|
||||
# Generic Redis configuration used as defaults for various things including:
|
||||
# Buffers, Quotas, TSDB
|
||||
|
||||
SENTRY_OPTIONS["redis.clusters"] = {
|
||||
"default": {
|
||||
"hosts": {0: {"host": "redis", "password": "", "port": "6379", "db": "0"}}
|
||||
}
|
||||
}
|
||||
|
||||
#########
|
||||
# Queue #
|
||||
#########
|
||||
|
||||
# See https://docs.getsentry.com/on-premise/server/queue/ for more
|
||||
# information on configuring your queue broker and workers. Sentry relies
|
||||
# on a Python framework called Celery to manage queues.
|
||||
|
||||
rabbitmq_host = None
|
||||
if rabbitmq_host:
|
||||
BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format(
|
||||
username="guest", password="guest", host=rabbitmq_host, vhost="/"
|
||||
)
|
||||
else:
|
||||
BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format(
|
||||
**SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0]
|
||||
)
|
||||
|
||||
|
||||
#########
|
||||
# Cache #
|
||||
#########
|
||||
|
||||
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
|
||||
# requirement, it will optimize several high throughput patterns.
|
||||
|
||||
CACHES = {
|
||||
"default": {
|
||||
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
|
||||
"LOCATION": ["memcached:11211"],
|
||||
"TIMEOUT": 3600,
|
||||
}
|
||||
}
|
||||
|
||||
# A primary cache is required for things such as processing events
|
||||
SENTRY_CACHE = "sentry.cache.redis.RedisCache"
|
||||
|
||||
DEFAULT_KAFKA_OPTIONS = {
|
||||
"bootstrap.servers": "kafka:9092",
|
||||
"message.max.bytes": 50000000,
|
||||
"socket.timeout.ms": 1000,
|
||||
}
|
||||
|
||||
SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream"
|
||||
SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS}
|
||||
|
||||
KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS
|
||||
|
||||
###############
|
||||
# Rate Limits #
|
||||
###############
|
||||
|
||||
# Rate limits apply to notification handlers and are enforced per-project
|
||||
# automatically.
|
||||
|
||||
SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter"
|
||||
|
||||
##################
|
||||
# Update Buffers #
|
||||
##################
|
||||
|
||||
# Buffers (combined with queueing) act as an intermediate layer between the
|
||||
# database and the storage API. They will greatly improve efficiency on large
|
||||
# numbers of the same events being sent to the API in a short amount of time.
|
||||
# (read: if you send any kind of real data to Sentry, you should enable buffers)
|
||||
|
||||
SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer"
|
||||
|
||||
##########
|
||||
# Quotas #
|
||||
##########
|
||||
|
||||
# Quotas allow you to rate limit individual projects or the Sentry install as
|
||||
# a whole.
|
||||
|
||||
SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota"
|
||||
|
||||
########
|
||||
# TSDB #
|
||||
########
|
||||
|
||||
# The TSDB is used for building charts as well as making things like per-rate
|
||||
# alerts possible.
|
||||
|
||||
SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"
|
||||
|
||||
#########
|
||||
# SNUBA #
|
||||
#########
|
||||
|
||||
SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend"
|
||||
SENTRY_SEARCH_OPTIONS = {}
|
||||
SENTRY_TAGSTORE_OPTIONS = {}
|
||||
|
||||
###########
|
||||
# Digests #
|
||||
###########
|
||||
|
||||
# The digest backend powers notification summaries.
|
||||
|
||||
SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend"
|
||||
|
||||
##############
|
||||
# Web Server #
|
||||
##############
|
||||
|
||||
SENTRY_WEB_HOST = "0.0.0.0"
|
||||
SENTRY_WEB_PORT = 9000
|
||||
SENTRY_WEB_OPTIONS = {
|
||||
# These ase for proper HTTP/1.1 support from uWSGI
|
||||
# Without these it doesn't do keep-alives causing
|
||||
# issues with Relay's direct requests.
|
||||
"http-keepalive": True,
|
||||
"http-chunked-input": True,
|
||||
# the number of web workers
|
||||
'workers': 3,
|
||||
# Turn off memory reporting
|
||||
"memory-report": False,
|
||||
# Some stuff so uwsgi will cycle workers sensibly
|
||||
'max-requests': 100000,
|
||||
'max-requests-delta': 500,
|
||||
'max-worker-lifetime': 86400,
|
||||
# Duplicate options from sentry default just so we don't get
|
||||
# bit by sentry changing a default value that we depend on.
|
||||
'thunder-lock': True,
|
||||
'log-x-forwarded-for': False,
|
||||
'buffer-size': 32768,
|
||||
'limit-post': 209715200,
|
||||
'disable-logging': True,
|
||||
'reload-on-rss': 600,
|
||||
'ignore-sigpipe': True,
|
||||
'ignore-write-errors': True,
|
||||
'disable-write-exception': True,
|
||||
}
|
||||
|
||||
###########
|
||||
# SSL/TLS #
|
||||
###########
|
||||
|
||||
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
|
||||
# header and enable the settings below
|
||||
|
||||
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
||||
SESSION_COOKIE_SECURE = True
|
||||
CSRF_COOKIE_SECURE = True
|
||||
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
|
||||
|
||||
# End of SSL/TLS settings
|
||||
|
||||
############
|
||||
# Features #
|
||||
############
|
||||
|
||||
SENTRY_FEATURES["projects:sample-events"] = False
|
||||
SENTRY_FEATURES.update(
|
||||
{
|
||||
feature: True
|
||||
for feature in (
|
||||
"organizations:discover",
|
||||
"organizations:events",
|
||||
"organizations:global-views",
|
||||
"organizations:integrations-issue-basic",
|
||||
"organizations:integrations-issue-sync",
|
||||
"organizations:invite-members",
|
||||
"organizations:sso-basic",
|
||||
"organizations:sso-rippling",
|
||||
"organizations:sso-saml2",
|
||||
"projects:custom-inbound-filters",
|
||||
"projects:data-forwarding",
|
||||
"projects:discard-groups",
|
||||
"projects:plugins",
|
||||
"projects:rate-limits",
|
||||
"projects:servicehooks",
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
######################
|
||||
# GitHub Integration #
|
||||
######################
|
||||
|
||||
GITHUB_EXTENDED_PERMISSIONS = ['repo']
|
||||
|
||||
#########################
|
||||
# Bitbucket Integration #
|
||||
########################
|
||||
|
||||
# BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY'
|
||||
# BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'
|
||||
Reference in a new issue