Skip to content
Snippets Groups Projects
Commit bd539260 authored by Madjid Sadallah's avatar Madjid Sadallah
Browse files

Upload of the whole project

parent 678ed12c
No related branches found
No related tags found
No related merge requests found
Showing
with 617 additions and 0 deletions
.env 0 → 100644
ELASTIC_VERSION=8.11.3
## Passwords for stack users
#
# User 'elastic' (built-in)
#
# Superuser role, full access to cluster management and data indices.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
ELASTIC_PASSWORD='mobilespassword'
# User 'logstash_internal' (custom)
#
# The user Logstash uses to connect and send data to Elasticsearch.
# https://www.elastic.co/guide/en/logstash/current/ls-security.html
LOGSTASH_INTERNAL_PASSWORD='mobilespassword'
# User 'kibana_system' (built-in)
#
# The user Kibana uses to connect and communicate with Elasticsearch.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
KIBANA_SYSTEM_PASSWORD='mobilespassword'
# Users 'metricbeat_internal', 'filebeat_internal' and 'heartbeat_internal' (custom)
#
# The users Beats use to connect and send data to Elasticsearch.
# https://www.elastic.co/guide/en/beats/metricbeat/current/feature-roles.html
METRICBEAT_INTERNAL_PASSWORD=''
FILEBEAT_INTERNAL_PASSWORD=''
HEARTBEAT_INTERNAL_PASSWORD=''
# User 'monitoring_internal' (custom)
#
# The user Metricbeat uses to collect monitoring data from stack components.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/how-monitoring-works.html
MONITORING_INTERNAL_PASSWORD=''
# User 'beats_system' (built-in)
#
# The user the Beats use when storing monitoring information in Elasticsearch.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
BEATS_SYSTEM_PASSWORD=''
MYSQL_USER='mobiles'
MYSQL_PASSWORD='mobilespassword'
MYSQL_ROOT_PASSWORD='mobilespassword'
MYSQL_DB='mobiles'
KTBS_USER='mobiles'
KTBS_PASSWORD='mobilespassword'
KTBS_URL='https://KTBS_URL/tracesmobiles/@obsels'
\ No newline at end of file
# Python
**/__pycache__/
**/*.py[cod]
**/*.pyo
**/*.pyd
**/.Python
**/env/
**/venv/
**/ENV/
**/env.bak/
**/venv.bak/
**/*.egg-info/
**/*.eggs/
**/dist/
**/build/
**/*.egg
# Docker
# Ignore Docker-related files
.docker/
docker-compose.override.yml
*.dockerignore
# VSCode
.vscode/
.history/
.vsconfig
# Log files
**/*.log
# Secrets
# **/*.env
# **/*.local
# **/secrets/
# Jupyter Notebook Checkpoints
**/.ipynb_checkpoints/
# Coverage reports
**/.coverage
**/*.cover
**/*.py,cover
# Miscellaneous
**/*.DS_Store
**/Thumbs.db
**/*.tmp
**/*.bak
**/*.swp
**/*.swo
**/*.swn
**/*.orig
# Specific IDE files
**/.idea/
**/*.sublime-workspace
**/*.sublime-project
# Ignore files generated by mypy
**/.mypy_cache/
-- Crée la base de données si elle n'existe pas
CREATE DATABASE IF NOT EXISTS mobiles;
-- Utilise la base de données mobiles
USE mobiles;
-- Crée la table scheduled_task
CREATE TABLE IF NOT EXISTS scheduled_task (
id INT PRIMARY KEY AUTO_INCREMENT,
name VARCHAR(50) NOT NULL,
action VARCHAR(50) NOT NULL,
scheduled_time DATETIME NOT NULL,
creation_time DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
status VARCHAR(20) NOT NULL DEFAULT 'Not Executed',
last_run DATETIME NULL,
recurrence VARCHAR(50) NULL
);
version: '3.7'
services:
setup:
profiles:
- setup
build:
context: setup/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
init: true
volumes:
- ./setup/entrypoint.sh:/entrypoint.sh:ro,Z
- ./setup/lib.sh:/lib.sh:ro,Z
- ./setup/roles:/roles:ro,Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
networks:
- mobnet
depends_on:
- elasticsearch
elasticsearch:
container_name: es
build:
context: elasticsearch/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
- elasticsearch:/usr/share/elasticsearch/data:Z
- ./elasticsearch/config/index-template.json:/usr/share/elasticsearch/config/index-template.json
ports:
- 9200:9200
- 9300:9300
environment:
node.name: elasticsearch
LS_JAVA_OPTS: -Xms2g -Xmx2g # Increase heap size to 1 GB
# Bootstrap password.
# Used to initialize the keystore during the initial startup of
# Elasticsearch. Ignored on subsequent runs.
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- mobnet
restart: unless-stopped
logstash:
container_name: stash
build:
context: logstash/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:Z
- ./logstash/config/pipeline:/usr/share/logstash/pipeline:Z
- ./logstash/config/index-template.json:/usr/share/logstash/config/index-template.json
- ./logstash/config/mysql-connector-j-8.2.0.jar:/usr/share/logstash/config/mysql-connector-j-8.2.0.jar
- ./logstash/ruby/:/usr/share/logstash/ruby/
ports:
- 5044:5044
- 50000:50000/tcp
- 50000:50000/udp
- 9600:9600
environment:
LS_JAVA_OPTS: -Xms2g -Xmx2g # Increase heap size to 2 GB
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KTBS_URL: ${KTBS_URL:-}
KTBS_USER: ${KTBS_USER:-}
KTBS_PASSWORD: ${KTBS_PASSWORD:-}
MYSQL_DB: ${MYSQL_DB}
MYSQL_USER: ${MYSQL_USER:-}
MYSQL_PASSWORD: ${MYSQL_PASSWORD:-}
LOG_LEVEL: debug
networks:
- mobnet
depends_on:
- elasticsearch
# restart: unless-stopped
db:
container_name: db
image: mariadb:latest
environment:
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD:-}
MYSQL_DATABASE: ${MYSQL_DB}
MYSQL_USER: ${MYSQL_USER:-}
MYSQL_PASSWORD: ${MYSQL_PASSWORD:-}
volumes:
- db_data:/var/lib/mysql
- ./db:/docker-entrypoint-initdb.d
networks:
- mobnet
ports:
- "3306:3306"
rs:
container_name: rs
build: ./rs
volumes:
- ./rs/config:/app/config
networks:
- mobnet
ports:
- "8080:8080"
networks:
mobnet:
driver: bridge
volumes:
elasticsearch:
db_data:
ARG ELASTIC_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}
# Add your elasticsearch plugins setup here
# Example: RUN elasticsearch-plugin install analysis-icu
---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: docker-cluster
network.host: 0.0.0.0
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html
#
xpack.license.self_generated.type: basic
xpack.security.enabled: true
{
"index_patterns": ["obsels"],
"settings": {
"number_of_shards": 1
},
"mappings": {
"properties": {
"m:coordinates": {
"type": "geo_point"
},
"m:position": {
"type": "geo_point"
},
"m:prev_coordinates": {
"type": "geo_point"
}
}
}
}
# Extensions
Third-party extensions that enable extra integrations with the Elastic stack.
FROM untergeek/curator:8.0.2
USER root
RUN >>/var/spool/cron/crontabs/nobody \
echo '* * * * * /curator/curator /.curator/delete_log_files_curator.yml'
ENTRYPOINT ["crond"]
CMD ["-f", "-d8"]
# Curator
Elasticsearch Curator helps you curate or manage your indices.
## Usage
If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `curator-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up
```
This sample setup demonstrates how to run `curator` every minute using `cron`.
All configuration files are available in the `config/` directory.
## Documentation
[Curator Reference](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html)
# Curator configuration
# https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html
elasticsearch:
client:
hosts: [ http://elasticsearch:9200 ]
other_settings:
username: elastic
password: ${ELASTIC_PASSWORD}
logging:
loglevel: INFO
logformat: default
actions:
1:
action: delete_indices
description: >-
Delete indices. Find which to delete by first limiting the list to
logstash- prefixed indices. Then further filter those to prevent deletion
of anything less than the number of days specified by unit_count.
Ignore the error if the filter does not result in an actionable list of
indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: 2
version: '3.7'
services:
curator:
build:
context: extensions/curator/
init: true
volumes:
- ./extensions/curator/config/curator.yml:/.curator/curator.yml:ro,Z
- ./extensions/curator/config/delete_log_files_curator.yml:/.curator/delete_log_files_curator.yml:ro,Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
networks:
- elk
depends_on:
- es
ARG ELASTIC_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/enterprise-search/enterprise-search:${ELASTIC_VERSION}
# Enterprise Search extension
Elastic Enterprise Search is a suite of products for search applications backed by the Elastic Stack.
## Requirements
* 2 GB of free RAM, on top of the resources required by the other stack components and extensions.
The Enterprise Search web application is served on the TCP port `3002`.
## Usage
### Generate an encryption key
Enterprise Search requires one or more [encryption keys][enterprisesearch-encryption] to be configured before the
initial startup. Failing to do so prevents the server from starting.
Encryption keys can contain any series of characters. Elastic recommends using 256-bit keys for optimal security.
Those encryption keys must be added manually to the [`config/enterprise-search.yml`][config-enterprisesearch] file. By
default, the list of encryption keys is empty and must be populated using one of the following formats:
```yaml
secret_management.encryption_keys:
- my_first_encryption_key
- my_second_encryption_key
- ...
```
```yaml
secret_management.encryption_keys: [my_first_encryption_key, my_second_encryption_key, ...]
```
> [!NOTE]
> To generate a strong random encryption key, you can use the OpenSSL utility or any other online/offline tool of your
> choice:
>
> ```console
> $ openssl rand -hex 32
> 680f94e568c90364bedf927b2f0f49609702d3eab9098688585a375b14274546
> ```
### Enable Elasticsearch's API key service
Enterprise Search requires Elasticsearch's built-in [API key service][es-security] to be enabled in order to start.
Unless Elasticsearch is configured to enable TLS on the HTTP interface (disabled by default), this service is disabled
by default.
To enable it, modify the Elasticsearch configuration file in [`elasticsearch/config/elasticsearch.yml`][config-es] and
add the following setting:
```yaml
xpack.security.authc.api_key.enabled: true
```
### Configure the Enterprise Search host in Kibana
Kibana acts as the [management interface][enterprisesearch-kb] to Enterprise Search.
To enable the management experience for Enterprise Search, modify the Kibana configuration file in
[`kibana/config/kibana.yml`][config-kbn] and add the following setting:
```yaml
enterpriseSearch.host: http://enterprise-search:3002
```
### Start the server
To include Enterprise Search in the stack, run Docker Compose from the root of the repository with an additional command
line argument referencing the `enterprise-search-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up
```
Allow a few minutes for the stack to start, then open your web browser at the address <http://localhost:3002> to see the
Enterprise Search home page.
Enterprise Search is configured on first boot with the following default credentials:
* user: *enterprise_search*
* password: *mobilespassword*
## Security
The Enterprise Search password is defined inside the Compose file via the `ENT_SEARCH_DEFAULT_PASSWORD` environment
variable. We highly recommend choosing a more secure password than the default one for security reasons.
To do so, change the value `ENT_SEARCH_DEFAULT_PASSWORD` environment variable inside the Compose file **before the first
boot**:
```yaml
enterprise-search:
environment:
ENT_SEARCH_DEFAULT_PASSWORD: {{some strong password}}
```
> [!WARNING]
> The default Enterprise Search password can only be set during the initial boot. Once the password is persisted in
> Elasticsearch, it can only be changed via the Elasticsearch API.
For more information, please refer to [User Management and Security][enterprisesearch-security].
## Configuring Enterprise Search
The Enterprise Search configuration is stored in [`config/enterprise-search.yml`][config-enterprisesearch]. You can
modify this file using the [Default Enterprise Search configuration][enterprisesearch-config] as a reference.
You can also specify the options you want to override by setting environment variables inside the Compose file:
```yaml
enterprise-search:
environment:
ent_search.auth.source: standard
worker.threads: '6'
```
Any change to the Enterprise Search configuration requires a restart of the Enterprise Search container:
```console
$ docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml restart enterprise-search
```
Please refer to the following documentation page for more details about how to configure Enterprise Search inside a
Docker container: [Running Enterprise Search Using Docker][enterprisesearch-docker].
## See also
[Enterprise Search documentation][enterprisesearch-docs]
[config-enterprisesearch]: ./config/enterprise-search.yml
[enterprisesearch-encryption]: https://www.elastic.co/guide/en/enterprise-search/current/encryption-keys.html
[enterprisesearch-security]: https://www.elastic.co/guide/en/workplace-search/current/workplace-search-security.html
[enterprisesearch-config]: https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
[enterprisesearch-docker]: https://www.elastic.co/guide/en/enterprise-search/current/docker.html
[enterprisesearch-docs]: https://www.elastic.co/guide/en/enterprise-search/current/index.html
[enterprisesearch-kb]: https://www.elastic.co/guide/en/kibana/current/enterprise-search-settings-kb.html
[es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings
[config-es]: ../../elasticsearch/config/elasticsearch.yml
[config-kbn]: ../../kibana/config/kibana.yml
---
## Enterprise Search core configuration
## https://www.elastic.co/guide/en/enterprise-search/current/configuration.html
#
## --------------------- REQUIRED ---------------------
# Encryption keys to protect application secrets.
secret_management.encryption_keys:
# example:
#- 680f94e568c90364bedf927b2f0f49609702d3eab9098688585a375b14274546
## ----------------------------------------------------
# IP address Enterprise Search listens on
ent_search.listen_host: 0.0.0.0
# URL at which users reach Enterprise Search / Kibana
ent_search.external_url: http://localhost:3002
kibana.host: http://localhost:5601
# Elasticsearch URL and credentials
elasticsearch.host: http://elasticsearch:9200
elasticsearch.username: elastic
elasticsearch.password: ${ELASTIC_PASSWORD}
# Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes.
allow_es_settings_modification: true
version: '3.7'
services:
enterprise-search:
build:
context: extensions/enterprise-search/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./extensions/enterprise-search/config/enterprise-search.yml:/usr/share/enterprise-search/config/enterprise-search.yml:ro,Z
environment:
JAVA_OPTS: -Xms2g -Xmx2g
ENT_SEARCH_DEFAULT_PASSWORD: 'mobilespassword'
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
ports:
- 3002:3002
networks:
- elk
depends_on:
- es
ARG ELASTIC_VERSION
FROM docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}
# Filebeat
Filebeat is a lightweight shipper for forwarding and centralizing log data. Installed as an agent on your servers,
Filebeat monitors the log files or locations that you specify, collects log events, and forwards them either to
Elasticsearch or Logstash for indexing.
## Usage
**This extension requires the `filebeat_internal` and `beats_system` users to be created and initialized with a
password.** In case you haven't done that during the initial startup of the stack, please refer to [How to re-execute
the setup][setup] to run the setup container again and initialize these users.
To include Filebeat in the stack, run Docker Compose from the root of the repository with an additional command line
argument referencing the `filebeat-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml up
```
## Configuring Filebeat
The Filebeat configuration is stored in [`config/filebeat.yml`](./config/filebeat.yml). You can modify this file with
the help of the [Configuration reference][filebeat-config].
Any change to the Filebeat configuration requires a restart of the Filebeat container:
```console
$ docker-compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml restart filebeat
```
Please refer to the following documentation page for more details about how to configure Filebeat inside a Docker
container: [Run Filebeat on Docker][filebeat-docker].
## See also
[Filebeat documentation][filebeat-doc]
[filebeat-config]: https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-reference-yml.html
[filebeat-docker]: https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html
[filebeat-doc]: https://www.elastic.co/guide/en/beats/filebeat/current/index.html
[setup]: ../../README.md#how-to-re-execute-the-setup
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment