Compare commits
8 Commits
a1679b69c3
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
| 405d7da754 | |||
| c316104d94 | |||
| eaeecc926a | |||
| 51950cb7d2 | |||
| 08380b2ca3 | |||
| b2bea677ef | |||
| 431541b3cb | |||
| a7a85b1816 |
13
CHANGELOG.md
Normal file
13
CHANGELOG.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Changelog
|
||||
|
||||
## [0.1.0] - 2023-09-16
|
||||
### Added
|
||||
- Web interface
|
||||
- Fully featured RestFullAPI v1;
|
||||
- Monitoring free space in storage;
|
||||
- Deleting an archive or ticket also deletes physical files;
|
||||
- Flexible deployment configuration using environment variables;
|
||||
- Dockerized app, the image size is less than 150mb;
|
||||
- Support sqlite3 and PostgreSQL^15;
|
||||
- Whitenoise Static management;
|
||||
- healthcheck checking application availability;
|
||||
@@ -60,8 +60,5 @@ EXPOSE ${WEB_PORT}
|
||||
LABEL maintainer="s.zhukovskii@ispsystem.com"
|
||||
LABEL me.zhukovsky.logs-collector.version=v${VERSION}
|
||||
|
||||
# call the health check endpoint of app
|
||||
HEALTHCHECK CMD curl --fail http://localhost:${WEB_PORT} || exit 1
|
||||
|
||||
# run app
|
||||
ENTRYPOINT [ "sh", "entrypoint.sh" ]
|
||||
|
||||
324
README-ru.md
Normal file
324
README-ru.md
Normal file
@@ -0,0 +1,324 @@
|
||||
# LOGS-COLLECTOR
|
||||
|
||||
```sh
|
||||
█░░ █▀█ █▀▀ █▀ ▄▄ █▀▀ █▀█ █░░ █░░ █▀▀ █▀▀ ▀█▀ █▀█ █▀█
|
||||
█▄▄ █▄█ █▄█ ▄█ ░░ █▄▄ █▄█ █▄▄ █▄▄ ██▄ █▄▄ ░█░ █▄█ █▀▄
|
||||
```
|
||||
### [English lang: README.md](README.md)
|
||||
|
||||
### [CHANGELOG.md](CHANGELOG.md)
|
||||
|
||||
|
||||
## Цель
|
||||
|
||||
Если вы являетесь разработчиком ПО которое в дальнейшем клиенты используют в своей инфраструктуре, вы должны понимать, как иногда бывает трудно изучить проблему с ПО не имея доступа к серверу на котором это ПО работает.
|
||||
|
||||
|
||||
Для решения этой задачи вы можете настраивать ПО на автоматическую отправку обезличенных отчетов о сбоях например использовать Sentry. Это не всегда приемлемо для клиента, к тому же информация может быть не полной или клиенту требуется повышенная конфиденциальность.
|
||||
|
||||
|
||||
В таком случае вы можете попросить клиента отправить вам нужные лог файлы и изучить их в последствии. Но тут возникает другая проблема вам нужен безопасный способ передачи этих файлов как для вас так и для клиента.
|
||||
Это мог быть FTP, SFTP, облако etc. Но что если вы не хотите давать клиенту данные для аутентификации и авторизации?
|
||||
|
||||
Возможно у вас есть доступ к серверу клиента и вы можете прочитать лог файлы на месте. И казалось бы проблема решена. Но на сервере клиента могут отсутствовать инструменты для удобного изучения лог файлов.
|
||||
Даже если сотрудник поддержки может забрать себе нужные файлы и изучить их локально, возникает проблема распространения этих файлов между другими сотрудниками.
|
||||
|
||||
Logs-collector позволяет решить эти задачи.
|
||||
|
||||
Logs-collector является удаленным хранилищем и может принимать и отдавать файлы.
|
||||
|
||||
|
||||
## Термины
|
||||
- Платформа: это ПО разработанное вашей компанией
|
||||
- Тикет: это номер связанный с тикетом в вашей help desk системе
|
||||
- Архив: это загруженный лог файл (поддерживается любой формат)
|
||||
|
||||
## Как это работает?
|
||||
|
||||
- Создаете платформы
|
||||
- Создаете тикет связанный с платформой и номером
|
||||
- Передаете клиенту уникальный токен тикета
|
||||
- Клиент загружает архив лог файлов
|
||||
- Скачиваете архив (находите решение проблемы)
|
||||
- Удаляете архив или тикет или отмечаете тикет решенным
|
||||
|
||||
## Особенности
|
||||
|
||||
- Централизованное хранилище
|
||||
- Для загрузки файла не нужно давать auth credentials
|
||||
- Каждый токен на загрузку уникален и связан только с одним тикетом
|
||||
- Токен имеет ограничение на количество попыток и время жизни
|
||||
- Загрузить файл можно из консоли или через веб
|
||||
- Полнофункциональный RestFullAPI v1
|
||||
- Мониторинг свободного пространства в хранилище
|
||||
- Удаление архива или тикета так же удаляет физические файлы
|
||||
- Приложение соответствует архитектуре приложения 12 факторов
|
||||
- Гибкая настройка развертывания переменными окружения
|
||||
- Приложение докеризировано, размер образа меньше 150mb
|
||||
- Может работать как с sqlite3 так и с PostgreSQL^15
|
||||
- Управление статикой без настройки для этого веб сервера
|
||||
- healthcheck проверка доступности приложения
|
||||
|
||||
## Безопасность
|
||||
|
||||
- Токен на загрузку не связан с авторизацией
|
||||
- Токен на загрузку обладает высокой энтропией.
|
||||
- Двухфакторная аутентификация для пользователей
|
||||
- Для скачивания файла - 2FA должна быть принудительно включена
|
||||
- Админ панель пропатчена на принудительное использование 2FA
|
||||
- Пользователь в контейнере является не привилегированным
|
||||
- Стандартные методы защиты Django и DRF
|
||||
|
||||
## Установка
|
||||
|
||||
### Из docker образа:
|
||||
- Создайте директорию для приложения где вам удобно
|
||||
- Создайте файл docker-compose.yml в директории приложения
|
||||
- Создайте файл .env в директории приложения
|
||||
- Наполните файл .env требуемыми переменными окружения см. ниже
|
||||
|
||||
>Пример файла с использованием хранилища докер и sqlite как база данных по умолчанию:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
# to set environment variables:
|
||||
# create a .env file in the same directory as docker-compose.yaml
|
||||
|
||||
services:
|
||||
server:
|
||||
image: mois3y/logs_collector:0.1.0
|
||||
container_name: logs-collector
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./.env
|
||||
ports:
|
||||
- "80:8000"
|
||||
volumes:
|
||||
- /etc/timezone:/etc/timezone:ro # optional
|
||||
- /etc/localtime:/etc/localtime:ro # optional
|
||||
- logs_collector_data:/data
|
||||
|
||||
volumes:
|
||||
logs_collector_data:
|
||||
```
|
||||
|
||||
### Из исходников:
|
||||
- Клонируйте репозиторий
|
||||
- docker-compose.yaml уже есть в директории с проектом
|
||||
- создайте в корне проекта файл .env
|
||||
- наполните .env требуемыми переменными окружения см. ниже
|
||||
- соберите образ и запустите контейнер в фоне:
|
||||
|
||||
```sh
|
||||
docker-compose up -d --build
|
||||
```
|
||||
- Вы можете создать свой файл и внести нужные правки:
|
||||
#### docker-compose-example-psql.yaml c PostgreSQL по умолчанию:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
logs_collector:
|
||||
container_name: logs-collector
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
- VERSION=${VERSION}
|
||||
- SRC_DIR=${SRC_DIR}
|
||||
- SCRIPTS_DIR=${SCRIPTS_DIR}
|
||||
- APP_DIR=${APP_DIR}
|
||||
- DATA_DIR=${DATA_DIR}
|
||||
- WEB_PORT=${WEB_PORT}
|
||||
- USER_NAME=${USER_NAME}
|
||||
- USER_GROUP=${USER_GROUP}
|
||||
- APP_UID=${APP_UID}
|
||||
- APP_GID=${APP_GID}
|
||||
ports:
|
||||
- "${WEB_HOST}:${WEB_PORT}:${WEB_PORT}"
|
||||
volumes:
|
||||
- type: volume
|
||||
source: logs_collector_data
|
||||
target: ${APP_DIR}/data
|
||||
env_file:
|
||||
- ./.env
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: postgres:15-alpine3.18
|
||||
container_name: psql-collector
|
||||
volumes:
|
||||
- logs_collector_psql_data:/var/lib/postgresql/data/
|
||||
env_file:
|
||||
- ./.env
|
||||
|
||||
|
||||
volumes:
|
||||
logs_collector_data:
|
||||
logs_collector_psql_data:
|
||||
```
|
||||
|
||||
#### docker-compose-example-psql.yaml c sqlite и bind-mount:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
# to set environment variables:
|
||||
# create a .env file in the same directory as docker-compose.yaml
|
||||
|
||||
services:
|
||||
logs_collector:
|
||||
container_name: logs-collector
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
- VERSION=${VERSION}
|
||||
- SRC_DIR=${SRC_DIR}
|
||||
- SCRIPTS_DIR=${SCRIPTS_DIR}
|
||||
- APP_DIR=${APP_DIR}
|
||||
- DATA_DIR=${DATA_DIR}
|
||||
- WEB_PORT=${WEB_PORT}
|
||||
- USER_NAME=${USER_NAME}
|
||||
- USER_GROUP=${USER_GROUP}
|
||||
- APP_UID=${APP_UID}
|
||||
- APP_GID=${APP_GID}
|
||||
ports:
|
||||
- "${WEB_HOST}:${WEB_PORT}:${WEB_PORT}"
|
||||
volumes:
|
||||
- "/opt/collector/data:${DATA_DIR}"
|
||||
- "/opt/collector/data/db.sqlite3:${DATA_DIR}/db.sqlite3"
|
||||
env_file:
|
||||
- /.env
|
||||
```
|
||||
|
||||
🔴
|
||||
|
||||
❗ВАЖНО❗
|
||||
|
||||
|
||||
Если вы используете bind-mount и монтируете его в хранилище приложения, помните
|
||||
пользователь в контейнере не привилегирован UID 1000 если примонтированный файл
|
||||
или директория будет принадлежать root приложение не сможет его прочитать и
|
||||
следовательно работать.
|
||||
|
||||
В продакшн среде используйте приложение за вашим любимым обратным прокси.
|
||||
|
||||
Просто добавьте его в стек docker-compose.yaml
|
||||
|
||||
>Можно этого не делать, но Gunicorn рекомендуют придерживаться этого правила.
|
||||
>
|
||||
>Я солидарен с ними, так что вас предупредили)
|
||||
|
||||
🔴
|
||||
|
||||
## Переменные окружения:
|
||||
>Приложение можно настроить, для этого передайте следующие возможные переменные
|
||||
>окружения.
|
||||
>Если переменная не передана, будет использоваться переменная окружения по умолчанию
|
||||
|
||||
```
|
||||
█▀▄ ░░█ ▄▀█ █▄░█ █▀▀ █▀█ ▀
|
||||
█▄▀ █▄█ █▀█ █░▀█ █▄█ █▄█ ▄
|
||||
```
|
||||
|
||||
| ENV | DEFAULT | INFO |
|
||||
| -------------------- | --------------- | ------------------------ |
|
||||
| SECRET_KEY | j9QGbvM9Z4otb47 | ❗change this immediately|
|
||||
| DEBUG | False | use only False in prod |
|
||||
| ALLOWED_HOSTS | '*' | list separated by commas |
|
||||
| CSRF_TRUSTED_ORIGINS | | list separated by commas |
|
||||
| DB_URL | | url for connect db |
|
||||
| TZ | 'UTC' | server timezone |
|
||||
|
||||
|
||||
|
||||
[CSRF_TRUSTED_ORIGINS](https://docs.djangoproject.com/en/4.2/ref/settings/#csrf-trusted-origins)
|
||||
|
||||
Требуется в среде докер в продакшн окружении
|
||||
принимает список url разделенных запятой
|
||||
>http://localhost,http://*.domain.com,http://127.0.0.1,http://0.0.0.0
|
||||
|
||||
|
||||
[DB_URL](https://django-environ.readthedocs.io/en/latest/quickstart.html)
|
||||
|
||||
Нужно указывать если вы хотите использовать PostgreSQL
|
||||
Эти данные должны совпадать с переменными контейнера PostgreSQL
|
||||
|
||||
| ENV | VALUE |
|
||||
| ----------------- | -------------- |
|
||||
| POSTGRES_USER | admin |
|
||||
| POSTGRES_PASSWORD | ddkwndkjdX7RrP |
|
||||
| POSTGRES_DB | collector |
|
||||
|
||||
Пример:
|
||||
|
||||
#### psql://admin:ddkwndkjdX7RrP@psql-collector:5432/collector
|
||||
- Протокол: **psql://**
|
||||
- Пользователь: **admin**
|
||||
- Пароль: **ddkwndkjdX7RrP**
|
||||
- IP адрес: **psql-collector**
|
||||
- Порт: **5432**
|
||||
- Имя БД: **collector**
|
||||
|
||||
```
|
||||
█▀▀ █░█ █▄░█ █ █▀▀ █▀█ █▀█ █▄░█ ▀
|
||||
█▄█ █▄█ █░▀█ █ █▄▄ █▄█ █▀▄ █░▀█ ▄
|
||||
```
|
||||
|
||||
| ENV | DEFAULT |
|
||||
| --------------------------- | -------------- |
|
||||
| GUNICORN_BIND | '0.0.0.0:8000' |
|
||||
| GUNICORN_BACKLOG | 2048 |
|
||||
| GUNICORN_WORKERS | 2 |
|
||||
| GUNICORN_WORKER_CLASS | 'sync' |
|
||||
| GUNICORN_WORKER_CONNECTIONS | 1000 |
|
||||
| GUNICORN_THREADS | 1 |
|
||||
| GUNICORN_TIMEOUT | 3600 |
|
||||
| GUNICORN_KEEPALIVE | 2 |
|
||||
| GUNICORN_LOGLEVEL | 'info' |
|
||||
|
||||
[GUNICORN_*](https://docs.gunicorn.org/en/stable/settings.html)
|
||||
|
||||
Подробная информация о каждой переменной окружения доступна в официальной документации.
|
||||
|
||||
GUNICORN_BIND не изменяйте это так как переменная отвечает за прослушиваемый адрес и порт внутри контейнера.
|
||||
|
||||
GUNICORN_TIMEOUT по умолчанию установлена в 3600. Такой большой таймаут нужен для загрузки больших файлов.
|
||||
Поскольку я старался сделать приложение минималистичным и не использовать менеджер задач загрузка файла идет в один поток.
|
||||
|
||||
Если время загрузки будет больше часа соединение разорвется, это особенность синхронной работы воркеров gunicorn если вам не хватает времени на загрузку вы можете увеличить это значение.
|
||||
|
||||
❗ВАЖНО❗
|
||||
|
||||
Gunicorn настроен писать в лог в следующем формате:
|
||||
```python
|
||||
'%({X-Forwarded-For}i)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
|
||||
```
|
||||
Это значит что в логе будет видно IP адрес запроса только из заголовка
|
||||
|
||||
**X-Forwarded-For**
|
||||
|
||||
В продакшн среде приложение должно быть за обратным прокси
|
||||
|
||||
|
||||
## Помощники
|
||||
В корне репозитория проекта есть директория scripts в ней лежит скрипт uploader.sh с помощью которого можно отправить файлы из консоли используя curl.
|
||||
|
||||
Синтаксис простой:
|
||||
|
||||
```cmd
|
||||
Usage: ./uploader.sh [options [parameters]]
|
||||
|
||||
Options:
|
||||
|
||||
-f | --file full path to upload file required
|
||||
-t | --token access token required
|
||||
-u | --url target url required
|
||||
-v | --version print version
|
||||
-h | --help print help
|
||||
```
|
||||
|
||||
|
||||
## Лицензия
|
||||
|
||||
GNU GPL 3.0
|
||||
330
README.md
330
README.md
@@ -1,3 +1,329 @@
|
||||
# logs-collector
|
||||
# LOGS-COLLECTOR
|
||||
|
||||
Серверная сторона для получения и хранения лог файлов
|
||||
```sh
|
||||
█░░ █▀█ █▀▀ █▀ ▄▄ █▀▀ █▀█ █░░ █░░ █▀▀ █▀▀ ▀█▀ █▀█ █▀█
|
||||
█▄▄ █▄█ █▄█ ▄█ ░░ █▄▄ █▄█ █▄▄ █▄▄ ██▄ █▄▄ ░█░ █▄█ █▀▄
|
||||
```
|
||||
|
||||
### [CHANGELOG.md](CHANGELOG.md)
|
||||
|
||||
### [Russian lang: README.md](README-ru.md)
|
||||
|
||||
|
||||
## Purpose
|
||||
|
||||
If you are a developer of software that clients later use in their infrastructure,
|
||||
you must understand how sometimes it can be difficult to research a problem
|
||||
with software without access to the server on which this software runs.
|
||||
|
||||
To solve this problem, you can configure the software to automatically send
|
||||
anonymized crash reports, for example, use Sentry.
|
||||
This is not always acceptable to the client;
|
||||
Moreover, the information may not be complete or the client
|
||||
requires increased confidentiality.
|
||||
|
||||
|
||||
## Terms
|
||||
- Platform: this is software developed by your company
|
||||
- Ticket: this is the number associated with the ticket in your help desk system
|
||||
- Archive: this is an uploaded log file (any format is supported)
|
||||
|
||||
## How it works?
|
||||
|
||||
- Create platforms
|
||||
- Create a ticket associated with the platform and number
|
||||
- Transfer a unique ticket token to the client
|
||||
- The client downloads an archive of log files
|
||||
- Download the archive (find a solution to the problem)
|
||||
- Delete the archive or ticket or mark the ticket as resolved
|
||||
|
||||
## Features
|
||||
|
||||
- Centralized storage;
|
||||
- To download a file you do not need to provide auth credentials;
|
||||
- Each download token is unique and associated with only one ticket;
|
||||
- The token has a limit on the number of attempts and lifetime;
|
||||
- You can download the file from the console or via the web;
|
||||
- Fully featured RestFullAPI v1;
|
||||
- Monitoring free space in storage;
|
||||
- Deleting an archive or ticket also deletes physical files;
|
||||
- The application follows the 12 factors application architecture;
|
||||
- Flexible deployment configuration using environment variables;
|
||||
- The application is dockerized, the image size is less than 150mb;
|
||||
- Can work with both sqlite3 and PostgreSQL^15;
|
||||
- Static management without configuration for this web server;
|
||||
- healthcheck checking application availability;
|
||||
|
||||
## Security
|
||||
|
||||
- The download token is not associated with authorization
|
||||
- The download token has high entropy.
|
||||
- Two-factor authentication for users
|
||||
- To download a file - 2FA must be forcibly enabled
|
||||
- The admin panel has been patched to force the use of 2FA
|
||||
- The user in the container is not privileged
|
||||
- Standard Django and DRF protection methods
|
||||
|
||||
## Install
|
||||
|
||||
### From the docker image:
|
||||
- Create a directory for the application wherever it is convenient for you
|
||||
- Create a docker-compose.yml file in the application directory
|
||||
- Create a .env file in the application directory
|
||||
- Fill the .env file with the required environment variables, see below
|
||||
|
||||
>Example file using docker store and sqlite as default database:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
# to set environment variables:
|
||||
# create a .env file in the same directory as docker-compose.yaml
|
||||
|
||||
services:
|
||||
server:
|
||||
image: mois3y/logs_collector:0.1.0
|
||||
container_name: logs-collector
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./.env
|
||||
ports:
|
||||
- "80:8000"
|
||||
volumes:
|
||||
- /etc/timezone:/etc/timezone:ro # optional
|
||||
- /etc/localtime:/etc/localtime:ro # optional
|
||||
- logs_collector_data:/data
|
||||
|
||||
volumes:
|
||||
logs_collector_data:
|
||||
```
|
||||
|
||||
### From the source:
|
||||
- Clone the repository
|
||||
- docker-compose.yaml is already in the project directory
|
||||
- create a .env file in the project root
|
||||
- fill .env with the required environment variables, see below
|
||||
- build the image and run the container in the background:
|
||||
|
||||
```sh
|
||||
docker-compose up -d --build
|
||||
```
|
||||
- You can create your own file and make the necessary edits:
|
||||
#### docker-compose.yaml PostgreSQL by default:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
logs_collector:
|
||||
container_name: logs-collector
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
- VERSION=${VERSION}
|
||||
- SRC_DIR=${SRC_DIR}
|
||||
- SCRIPTS_DIR=${SCRIPTS_DIR}
|
||||
- APP_DIR=${APP_DIR}
|
||||
- DATA_DIR=${DATA_DIR}
|
||||
- WEB_PORT=${WEB_PORT}
|
||||
- USER_NAME=${USER_NAME}
|
||||
- USER_GROUP=${USER_GROUP}
|
||||
- APP_UID=${APP_UID}
|
||||
- APP_GID=${APP_GID}
|
||||
ports:
|
||||
- "${WEB_HOST}:${WEB_PORT}:${WEB_PORT}"
|
||||
volumes:
|
||||
- type: volume
|
||||
source: logs_collector_data
|
||||
target: ${APP_DIR}/data
|
||||
env_file:
|
||||
- ./.env
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: postgres:15-alpine3.18
|
||||
container_name: psql-collector
|
||||
volumes:
|
||||
- logs_collector_psql_data:/var/lib/postgresql/data/
|
||||
env_file:
|
||||
- ./.env
|
||||
|
||||
|
||||
volumes:
|
||||
logs_collector_data:
|
||||
logs_collector_psql_data:
|
||||
```
|
||||
|
||||
#### docker-compose-example-psql.yaml c sqlite и bind-mount:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
# to set environment variables:
|
||||
# create a .env file in the same directory as docker-compose.yaml
|
||||
|
||||
services:
|
||||
logs_collector:
|
||||
container_name: logs-collector
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
- VERSION=${VERSION}
|
||||
- SRC_DIR=${SRC_DIR}
|
||||
- SCRIPTS_DIR=${SCRIPTS_DIR}
|
||||
- APP_DIR=${APP_DIR}
|
||||
- DATA_DIR=${DATA_DIR}
|
||||
- WEB_PORT=${WEB_PORT}
|
||||
- USER_NAME=${USER_NAME}
|
||||
- USER_GROUP=${USER_GROUP}
|
||||
- APP_UID=${APP_UID}
|
||||
- APP_GID=${APP_GID}
|
||||
ports:
|
||||
- "${WEB_HOST}:${WEB_PORT}:${WEB_PORT}"
|
||||
volumes:
|
||||
- "/opt/collector/data:${DATA_DIR}"
|
||||
- "/opt/collector/data/db.sqlite3:${DATA_DIR}/db.sqlite3"
|
||||
env_file:
|
||||
- /.env
|
||||
```
|
||||
|
||||
🔴
|
||||
|
||||
❗IMPORTANT❗
|
||||
|
||||
If you are using bind-mount and mounting it to your application's storage,
|
||||
remember user in container is not privileged UID 1000 if mounted file
|
||||
or the directory will belong to the root
|
||||
application will not be able to read it and therefore work.
|
||||
|
||||
In a production environment, use the application behind your favorite reverse proxy.
|
||||
|
||||
Just add it to the docker-compose.yaml stack
|
||||
|
||||
>You don't have to do this, but Gunicorn recommends following this rule.
|
||||
>
|
||||
>I agree with them, so you have been warned)
|
||||
|
||||
🔴
|
||||
|
||||
## Environment:
|
||||
>The application can be configured,
|
||||
>to do this, pass the following possible variables surroundings.
|
||||
>If no variable is passed, the default environment variable will be used
|
||||
|
||||
```
|
||||
█▀▄ ░░█ ▄▀█ █▄░█ █▀▀ █▀█ ▀
|
||||
█▄▀ █▄█ █▀█ █░▀█ █▄█ █▄█ ▄
|
||||
```
|
||||
|
||||
| ENV | DEFAULT | INFO |
|
||||
| -------------------- | --------------- | ------------------------ |
|
||||
| SECRET_KEY | j9QGbvM9Z4otb47 | ❗change this immediately|
|
||||
| DEBUG | False | use only False in prod |
|
||||
| ALLOWED_HOSTS | '*' | list separated by commas |
|
||||
| CSRF_TRUSTED_ORIGINS | | list separated by commas |
|
||||
| DB_URL | | url for connect db |
|
||||
| TZ | 'UTC' | server timezone |
|
||||
|
||||
|
||||
|
||||
[CSRF_TRUSTED_ORIGINS](https://docs.djangoproject.com/en/4.2/ref/settings/#csrf-trusted-origins)
|
||||
|
||||
Required in a Docker environment in a production environment
|
||||
accepts a list of urls separated by commas
|
||||
>http://localhost,http://*.domain.com,http://127.0.0.1,http://0.0.0.0
|
||||
|
||||
|
||||
[DB_URL](https://django-environ.readthedocs.io/en/latest/quickstart.html)
|
||||
|
||||
Must be specified if you want to use PostgreSQL
|
||||
This data must match the PostgreSQL container variables
|
||||
|
||||
| ENV | VALUE |
|
||||
| ----------------- | -------------- |
|
||||
| POSTGRES_USER | admin |
|
||||
| POSTGRES_PASSWORD | ddkwndkjdX7RrP |
|
||||
| POSTGRES_DB | collector |
|
||||
|
||||
Example:
|
||||
|
||||
#### psql://admin:ddkwndkjdX7RrP@psql-collector:5432/collector
|
||||
- Protocol: **psql://**
|
||||
- User: **admin**
|
||||
- Password: **ddkwndkjdX7RrP**
|
||||
- Address: **psql-collector**
|
||||
- Port: **5432**
|
||||
- Database name: **collector**
|
||||
|
||||
```
|
||||
█▀▀ █░█ █▄░█ █ █▀▀ █▀█ █▀█ █▄░█ ▀
|
||||
█▄█ █▄█ █░▀█ █ █▄▄ █▄█ █▀▄ █░▀█ ▄
|
||||
```
|
||||
|
||||
| ENV | DEFAULT |
|
||||
| --------------------------- | -------------- |
|
||||
| GUNICORN_BIND | '0.0.0.0:8000' |
|
||||
| GUNICORN_BACKLOG | 2048 |
|
||||
| GUNICORN_WORKERS | 2 |
|
||||
| GUNICORN_WORKER_CLASS | 'sync' |
|
||||
| GUNICORN_WORKER_CONNECTIONS | 1000 |
|
||||
| GUNICORN_THREADS | 1 |
|
||||
| GUNICORN_TIMEOUT | 3600 |
|
||||
| GUNICORN_KEEPALIVE | 2 |
|
||||
| GUNICORN_LOGLEVEL | 'info' |
|
||||
|
||||
[GUNICORN_*](https://docs.gunicorn.org/en/stable/settings.html)
|
||||
|
||||
Detailed information about each environment variable is available in
|
||||
the official documentation.
|
||||
|
||||
**GUNICORN_BIND** do not change this since the variable
|
||||
is responsible for the listening address and port inside the container.
|
||||
|
||||
**GUNICORN_TIMEOUT** is set to 3600 by default.
|
||||
Such a large timeout is needed to download large files.
|
||||
Since I tried to make the application minimalistic and not use a task manager,
|
||||
the file is downloaded in one thread.
|
||||
|
||||
If the loading time is more than an hour, the connection will be broken,
|
||||
this is a feature of the synchronous operation of gunicorn workers;
|
||||
if you do not have enough time to load, you can increase this value.
|
||||
|
||||
|
||||
❗IMPORTANT❗
|
||||
|
||||
Gunicorn is configured to write to the log in the following format:
|
||||
```python
|
||||
'%({X-Forwarded-For}i)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
|
||||
```
|
||||
This means that the log will show the IP address of the request only from the header
|
||||
|
||||
**X-Forwarded-For**
|
||||
|
||||
In a production environment, the application must be behind a reverse proxy
|
||||
|
||||
|
||||
## Helpers
|
||||
At the root of the project repository there is a scripts directory,
|
||||
it contains the uploader.sh script with which you can send files
|
||||
from the console using **curl**.
|
||||
|
||||
The syntax is simple:
|
||||
|
||||
```cmd
|
||||
Usage: ./uploader.sh [options [parameters]]
|
||||
|
||||
Options:
|
||||
|
||||
-f | --file full path to upload file required
|
||||
-t | --token access token required
|
||||
-u | --url target url required
|
||||
-v | --version print version
|
||||
-h | --help print help
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
## License
|
||||
|
||||
GNU GPL 3.0
|
||||
|
||||
@@ -27,6 +27,12 @@ services:
|
||||
target: ${APP_DIR}/data
|
||||
env_file:
|
||||
- ./.env
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://${WEB_HOST}:${WEB_PORT}/${HEALTHCHECK_URL}"]
|
||||
interval: 1m30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
volumes:
|
||||
logs_collector_data:
|
||||
|
||||
@@ -7,4 +7,4 @@ __license__ = "GPL v3.0"
|
||||
__version__ = "0.1.0"
|
||||
__maintainer__ = "Stepan Zhukovsky"
|
||||
__email__ = "stepan@zhukovsky.me"
|
||||
__status__ = "Development"
|
||||
__status__ = "Production"
|
||||
|
||||
@@ -11,3 +11,15 @@ class IsGuestUpload(permissions.BasePermission):
|
||||
return True
|
||||
|
||||
return request.user.is_authenticated
|
||||
|
||||
|
||||
class IsGuestCheckUrls(permissions.BasePermission):
|
||||
"""
|
||||
Special permission class for the ability to upload attachments
|
||||
to an unauthorized user using a ticket token
|
||||
"""
|
||||
def has_permission(self, request, view):
|
||||
if request.method in ('HEAD', 'OPTIONS', 'GET',):
|
||||
return True
|
||||
|
||||
return request.user.is_authenticated
|
||||
|
||||
@@ -67,3 +67,25 @@ class StorageInfoSerializer(serializers.Serializer):
|
||||
free = serializers.IntegerField(read_only=True)
|
||||
used_percent = serializers.IntegerField(read_only=True)
|
||||
status = serializers.CharField(read_only=True)
|
||||
|
||||
|
||||
class TokenStateRootSerializer(serializers.Serializer):
|
||||
info = serializers.CharField(read_only=True, default="manual message")
|
||||
|
||||
|
||||
class TokenStateSerializer(serializers.ModelSerializer):
|
||||
token = serializers.UUIDField(read_only=True)
|
||||
attempts = serializers.IntegerField(read_only=True)
|
||||
resolved = serializers.BooleanField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Ticket
|
||||
fields = [
|
||||
'token',
|
||||
'attempts',
|
||||
'resolved'
|
||||
]
|
||||
|
||||
|
||||
class AppHealthInfoSerializer(serializers.Serializer):
|
||||
status = serializers.CharField(read_only=True, default="ok")
|
||||
|
||||
@@ -15,8 +15,31 @@ router.register(r'archives', views.ArchiveViewSet)
|
||||
router.register(r'platforms', views.PlatformViewSet)
|
||||
router.register(r'tickets', views.TicketViewSet)
|
||||
|
||||
check_urlpatterns = [
|
||||
path(
|
||||
'health/',
|
||||
views.AppHealthInfo.as_view(),
|
||||
name='app-info'
|
||||
),
|
||||
path(
|
||||
'storage/',
|
||||
views.StorageInfo.as_view(),
|
||||
name='storage-info'
|
||||
),
|
||||
path(
|
||||
'token/',
|
||||
views.TokenStateRoot.as_view(),
|
||||
name='token-root'
|
||||
),
|
||||
path(
|
||||
'token/<str:token>',
|
||||
views.TokenStateInfo.as_view(),
|
||||
name='token-info'
|
||||
),
|
||||
]
|
||||
|
||||
urlpatterns = [
|
||||
# CRUD:
|
||||
path('v1/', include(router.urls)),
|
||||
path('v1/storage/', views.StorageInfo.as_view(), name='storage-info'),
|
||||
path('v1/check/', include(check_urlpatterns)),
|
||||
]
|
||||
|
||||
@@ -10,9 +10,7 @@ from rest_framework.parsers import (
|
||||
)
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import viewsets
|
||||
from rest_framework import views
|
||||
from rest_framework import filters
|
||||
from rest_framework import filters, generics, views, viewsets
|
||||
|
||||
from django_filters.rest_framework import DjangoFilterBackend
|
||||
|
||||
@@ -23,13 +21,16 @@ from collector.models import Archive, Ticket, Platform
|
||||
from collector.utils.helpers import get_mount_fs_info
|
||||
|
||||
from .filters import ArchiveFilter, TicketFilter
|
||||
from .permissions import IsGuestUpload
|
||||
from .permissions import IsGuestUpload, IsGuestCheckUrls
|
||||
from .serializers import (
|
||||
PublicArchiveUploadSerializer,
|
||||
ArchiveSerializer,
|
||||
PlatformSerializer,
|
||||
TicketSerializer,
|
||||
StorageInfoSerializer,
|
||||
TokenStateSerializer,
|
||||
AppHealthInfoSerializer,
|
||||
TokenStateRootSerializer,
|
||||
)
|
||||
|
||||
|
||||
@@ -82,29 +83,35 @@ class ArchiveViewSet(viewsets.ModelViewSet):
|
||||
bound_ticket = Ticket.objects.get(token=upload_token)
|
||||
if bound_ticket.resolved:
|
||||
return Response(
|
||||
{'error': f'ticket {bound_ticket} already resolved'},
|
||||
{'detail': f'ticket {bound_ticket} already resolved'},
|
||||
status=status.HTTP_423_LOCKED
|
||||
)
|
||||
if bound_ticket.attempts <= 0:
|
||||
return Response(
|
||||
{'error': f'token {upload_token} expired'},
|
||||
{'detail': f'token {upload_token} expired'},
|
||||
status=status.HTTP_423_LOCKED
|
||||
)
|
||||
bound_ticket.attempts -= 1
|
||||
bound_ticket.save()
|
||||
# ? mixin bound ticket number to request.data from user
|
||||
try:
|
||||
request.data['ticket'] = bound_ticket.number
|
||||
except AttributeError:
|
||||
return Response(
|
||||
{'detail': 'Bad Request'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
# ? change serializer for guest user
|
||||
if not request.user.is_authenticated:
|
||||
self.serializer_class = PublicArchiveUploadSerializer
|
||||
except (ValidationError, ObjectDoesNotExist,):
|
||||
return Response(
|
||||
{'error': f'token {upload_token} is not valid'},
|
||||
{'detail': f'token {upload_token} is not valid'},
|
||||
status=status.HTTP_403_FORBIDDEN
|
||||
)
|
||||
else:
|
||||
return Response(
|
||||
{'error': 'Header Upload-Token is required'},
|
||||
{'detail': 'Header Upload-Token is required'},
|
||||
status=status.HTTP_401_UNAUTHORIZED
|
||||
)
|
||||
# ! default create method:
|
||||
@@ -172,3 +179,40 @@ class StorageInfo(views.APIView):
|
||||
)
|
||||
def get(self, request):
|
||||
return Response(get_mount_fs_info(settings.DATA_DIR))
|
||||
|
||||
|
||||
class TokenStateRoot(views.APIView):
|
||||
""" Show the message of a specific upload token URL"""
|
||||
permission_classes = (IsGuestCheckUrls,)
|
||||
|
||||
@extend_schema(
|
||||
responses=TokenStateRootSerializer,
|
||||
summary='Show info message how get token status'
|
||||
)
|
||||
def get(self, request):
|
||||
message = "to find out the status of the token, place it in the URL"
|
||||
return Response({"detail": message}, status=status.HTTP_303_SEE_OTHER)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
get=extend_schema(
|
||||
summary='Show the status of a specific upload token'
|
||||
)
|
||||
)
|
||||
class TokenStateInfo(generics.RetrieveAPIView):
|
||||
""" Show the status of a specific upload token"""
|
||||
queryset = Ticket.objects.order_by('-time_create')
|
||||
lookup_field = 'token'
|
||||
serializer_class = TokenStateSerializer
|
||||
permission_classes = (IsGuestCheckUrls,)
|
||||
|
||||
|
||||
class AppHealthInfo(views.APIView):
|
||||
permission_classes = (IsGuestCheckUrls,)
|
||||
|
||||
@extend_schema(
|
||||
responses=AppHealthInfoSerializer,
|
||||
summary='Show app status'
|
||||
)
|
||||
def get(self, request):
|
||||
return Response({'status': 'ok'}, status=status.HTTP_200_OK)
|
||||
|
||||
0
logs_collector/collector/management/__init__.py
Normal file
0
logs_collector/collector/management/__init__.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import os
|
||||
import logging
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.apps import apps
|
||||
from django.db.models import Q
|
||||
from django.conf import settings
|
||||
from django.db.models import FileField
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
logging.config.dictConfig({
|
||||
'version': 1,
|
||||
'disable_existing_loggers': False,
|
||||
'formatters': {
|
||||
'console': {
|
||||
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': 'console'
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'': {
|
||||
'level': 'INFO',
|
||||
'handlers': ['console']
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
# HELP MESSAGE:
|
||||
help_part1 = 'This command deletes all media files from'
|
||||
help_part2 = 'the MEDIA_ROOT directory which are no longer referenced'
|
||||
help_part3 = 'by any of the models from installed_apps'
|
||||
help = f'{help_part1} {help_part2} {help_part3}'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
logger.info('Start cleanup storage....')
|
||||
all_models = apps.get_models()
|
||||
physical_files = set()
|
||||
db_files = set()
|
||||
# Get all files from the database
|
||||
logger.info('Get all files from the database....')
|
||||
for model in all_models:
|
||||
file_fields = []
|
||||
filters = Q()
|
||||
for f_ in model._meta.fields:
|
||||
if isinstance(f_, FileField):
|
||||
file_fields.append(f_.name)
|
||||
is_null = {'{}__isnull'.format(f_.name): True}
|
||||
is_empty = {'{}__exact'.format(f_.name): ''}
|
||||
filters &= Q(**is_null) | Q(**is_empty)
|
||||
# only retrieve the models which have non-empty,
|
||||
# non-null file fields
|
||||
if file_fields:
|
||||
files = model.objects.exclude(filters).values_list(
|
||||
*file_fields,
|
||||
flat=True
|
||||
).distinct()
|
||||
db_files.update(files)
|
||||
logger.info(f'Find: {len(db_files)} files from the database')
|
||||
# Get all files from the MEDIA_ROOT, recursively
|
||||
logger.info('Get all files from the MEDIA_ROOT, recursively....')
|
||||
media_root = getattr(settings, 'MEDIA_ROOT', None)
|
||||
if media_root is not None:
|
||||
for relative_root, dirs, files in os.walk(media_root):
|
||||
for file_ in files:
|
||||
# Compute the relative file path to the media directory,
|
||||
# so it can be compared to the values from the db
|
||||
relative_file = os.path.join(
|
||||
os.path.relpath(relative_root, media_root), file_
|
||||
)
|
||||
physical_files.add(relative_file)
|
||||
logger.info(f'Find: {len(physical_files)} files from the MEDIA_ROOT')
|
||||
# Compute the difference and delete those files
|
||||
logger.info('Compute the difference and delete those files....')
|
||||
deletables = physical_files - db_files
|
||||
logger.info(f'Find: {len(deletables)} orphan files')
|
||||
if deletables:
|
||||
for file_ in deletables:
|
||||
logger.info(f"Delete orphan file: {file_}")
|
||||
os.remove(os.path.join(media_root, file_))
|
||||
# Bottom-up - delete all empty folders
|
||||
logger.info('Bottom-up - delete all empty folders....')
|
||||
for relative_root, dirs, files in os.walk(
|
||||
media_root, topdown=False):
|
||||
for dir_ in dirs:
|
||||
if not os.listdir(os.path.join(relative_root, dir_)):
|
||||
os.rmdir(os.path.join(relative_root, dir_))
|
||||
logger.info('Done! Storage has been cleaned up')
|
||||
logger.info('Done! Nothing to delete')
|
||||
24
logs_collector/collector/middleware.py
Normal file
24
logs_collector/collector/middleware.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from django.http import HttpResponse
|
||||
from django.template import loader
|
||||
|
||||
|
||||
class HttpResponseNotAllowedMiddleware:
|
||||
def __init__(self, get_response):
|
||||
self.get_response = get_response
|
||||
# One-time configuration and initialization.
|
||||
|
||||
def __call__(self, request):
|
||||
|
||||
# Code to be executed for each request before
|
||||
# the view (and later middleware) are called.
|
||||
|
||||
response = self.get_response(request)
|
||||
|
||||
# Code to be executed for each request/response after
|
||||
# the view is called.
|
||||
if response.status_code == 405:
|
||||
context = {}
|
||||
template = loader.get_template('405.html')
|
||||
return HttpResponse(template.render(context, request))
|
||||
|
||||
return response
|
||||
@@ -66,4 +66,18 @@ const updateStorageInfo = () => {
|
||||
});
|
||||
};
|
||||
|
||||
export {sizify, updateBsTooltip, updateStorageInfo};
|
||||
const genAlertMessage = (
|
||||
alertMessage='Success message',
|
||||
alertType='success',
|
||||
extraClass=''
|
||||
) => {
|
||||
let alertMessageHTML = [
|
||||
`<div class="alert alert-${alertType} alert-dismissible ${extraClass}" role="alert">`,
|
||||
` <div>${alertMessage}</div>`,
|
||||
' <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>',
|
||||
'</div>'
|
||||
].join('')
|
||||
return alertMessageHTML
|
||||
}
|
||||
|
||||
export {sizify, updateBsTooltip, updateStorageInfo, genAlertMessage};
|
||||
|
||||
@@ -1,23 +1,33 @@
|
||||
import {updateStorageInfo} from "./helpers.js";
|
||||
import {updateStorageInfo, genAlertMessage} from "./helpers.js";
|
||||
|
||||
$(function () {
|
||||
// set global variables:
|
||||
const uploadForm = document.getElementById('upload_form');
|
||||
const input_file = document.getElementById('id_file');
|
||||
const progress_bar = document.getElementById('progress');
|
||||
const alert_container = document.getElementById('alert');
|
||||
|
||||
const inputFile = document.getElementById('id_file');
|
||||
const progressBar = document.getElementById('progress');
|
||||
const alertContainer = document.getElementById('alert');
|
||||
// get upload form:
|
||||
$("#upload_form").submit(function(e){
|
||||
e.preventDefault();
|
||||
// $form = $(this)
|
||||
// collect request data:
|
||||
let formData = new FormData(this);
|
||||
let upload_token = formData.get("token")
|
||||
const media_data = input_file.files[0];
|
||||
if(media_data != null){
|
||||
progress_bar.classList.remove("not-visible");
|
||||
let uploadToken = formData.get("token")
|
||||
// generate the URL for token validation:
|
||||
let tokenStatusUrl = [
|
||||
progressBar.getAttribute('token-status-url'),
|
||||
uploadToken
|
||||
].join('')
|
||||
// init upload file func:
|
||||
const uploadFile = () => {
|
||||
// toggle visible progress bar:
|
||||
const mediaData = inputFile.files[0];
|
||||
if(mediaData != null){
|
||||
progressBar.classList.remove("not-visible");
|
||||
}
|
||||
// upload file (chunk) xrh request:
|
||||
$.ajax({
|
||||
type: 'POST',
|
||||
url: progress_bar.getAttribute("upload-url"),
|
||||
url: progressBar.getAttribute("upload-url"),
|
||||
data: formData,
|
||||
dataType: 'json',
|
||||
xhr:function(){
|
||||
@@ -27,7 +37,7 @@ $(function () {
|
||||
if(e.lengthComputable){
|
||||
const percentProgress = (e.loaded/e.total)*100;
|
||||
console.log(percentProgress);
|
||||
progress_bar.innerHTML = `
|
||||
progressBar.innerHTML = `
|
||||
<div
|
||||
class="progress-bar progress-bar-striped progress-bar-animated"
|
||||
style="width: ${percentProgress}%"
|
||||
@@ -37,22 +47,20 @@ $(function () {
|
||||
});
|
||||
return xhr
|
||||
},
|
||||
// set auth method:
|
||||
beforeSend: function(xhr) {
|
||||
if (upload_token) {
|
||||
xhr.setRequestHeader("Upload-Token", upload_token);
|
||||
if (uploadToken) {
|
||||
xhr.setRequestHeader("Upload-Token", uploadToken);
|
||||
}
|
||||
},
|
||||
success: function(data, textStatus, jqXHR){
|
||||
console.log(jqXHR.status);
|
||||
let type = "success";
|
||||
alert_container.innerHTML = [
|
||||
`<div class="alert alert-${type} alert-dismissible col-lg-6" role="alert">`,
|
||||
` <div>The file has been successfully uploaded to the server. Thank you!</div>`,
|
||||
' <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>',
|
||||
'</div>'
|
||||
].join('')
|
||||
alertContainer.innerHTML = genAlertMessage(
|
||||
'The file has been successfully uploaded to the server. Thank you!',
|
||||
'success',
|
||||
'col-lg-6'
|
||||
)
|
||||
uploadForm.reset()
|
||||
progress_bar.classList.add('not-visible')
|
||||
progressBar.classList.add('not-visible')
|
||||
try {
|
||||
updateStorageInfo();
|
||||
} catch (error) {
|
||||
@@ -60,29 +68,71 @@ $(function () {
|
||||
};
|
||||
},
|
||||
error: function(jqXHR, textStatus, errorThrown){
|
||||
console.log(jqXHR);
|
||||
let type = "danger";
|
||||
let error_message = "Unexpected error. Try again please"
|
||||
if (jqXHR.status === 423) {
|
||||
error_message = `Error ${jqXHR.status}: ${jqXHR.responseJSON.error}`
|
||||
}
|
||||
if (jqXHR.status === 403) {
|
||||
error_message = `Error ${jqXHR.status}: ${jqXHR.responseJSON.error}`
|
||||
let errorMessage = "Unexpected error. Try again please"
|
||||
if (jqXHR.status === 423 || jqXHR.status === 403) {
|
||||
errorMessage = `Error ${jqXHR.status} <br> ${jqXHR.responseJSON.detail}`
|
||||
}
|
||||
if (jqXHR.status === 401) {
|
||||
error_message = 'The token field cannot be empty'
|
||||
errorMessage = `Error ${jqXHR.status} <br> The token field cannot be empty`
|
||||
}
|
||||
alert_container.innerHTML = [
|
||||
`<div class="alert alert-${type} alert-dismissible col-lg-6" role="alert">`,
|
||||
` <div>${error_message}</div>`,
|
||||
' <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>',
|
||||
'</div>'
|
||||
].join('')
|
||||
progress_bar.classList.add('not-visible')
|
||||
if (jqXHR.status === 400) {
|
||||
errorMessage = `Error ${jqXHR.status} <br> ${jqXHR.responseJSON.detail}`
|
||||
}
|
||||
alertContainer.innerHTML = genAlertMessage(
|
||||
errorMessage,
|
||||
'danger',
|
||||
'col-lg-6'
|
||||
)
|
||||
progressBar.classList.add('not-visible')
|
||||
},
|
||||
cache: false,
|
||||
contentType: false,
|
||||
processData: false,
|
||||
});
|
||||
}
|
||||
// check token status and upload file if token valid:
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
url: tokenStatusUrl,
|
||||
dataType: "json",
|
||||
success: function (data, textStatus, jqXHR) {
|
||||
if (data.attempts === 0) {
|
||||
alertContainer.innerHTML = genAlertMessage(
|
||||
`Error 423 <br> Token: ${uploadToken} expired`,
|
||||
'danger',
|
||||
'col-lg-6'
|
||||
);
|
||||
}
|
||||
else if (data.resolved === true) {
|
||||
alertContainer.innerHTML = genAlertMessage(
|
||||
`Error 423 <br> Ticket bound with token: ${uploadToken} <br> already resolved`,
|
||||
'danger',
|
||||
'col-lg-6'
|
||||
);
|
||||
} else {
|
||||
alertContainer.innerHTML = genAlertMessage(
|
||||
`Token: ${uploadToken} is valid. <br> Starting to upload...`,
|
||||
'success',
|
||||
'col-lg-6'
|
||||
);
|
||||
uploadFile();
|
||||
};
|
||||
},
|
||||
error: function(jqXHR){
|
||||
if (jqXHR.responseJSON.detail) {
|
||||
alertContainer.innerHTML = genAlertMessage(
|
||||
`Error 403 <br> Token: ${uploadToken} is not valid`,
|
||||
'danger',
|
||||
'col-lg-6'
|
||||
)
|
||||
} else {
|
||||
alertContainer.innerHTML = genAlertMessage(
|
||||
`Unexpected error. Try again please`,
|
||||
'danger',
|
||||
'col-lg-6'
|
||||
)
|
||||
}
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -35,7 +35,8 @@
|
||||
<div
|
||||
id="progress"
|
||||
upload-url="{% url 'collector_api:archive-list' %}"
|
||||
class="progress"
|
||||
token-status-url="{% url 'collector_api:token-root' %}"
|
||||
class="progress not-visible"
|
||||
role="progressbar"
|
||||
aria-label="Example 20px high"
|
||||
aria-valuenow="25"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from django.contrib.auth.mixins import LoginRequiredMixin
|
||||
from django.http import FileResponse
|
||||
from django.http import FileResponse, Http404
|
||||
from django.views import generic
|
||||
from django.views.generic.detail import SingleObjectMixin
|
||||
from django.db.models import Q
|
||||
@@ -35,6 +35,10 @@ class ArchiveHandlerView(
|
||||
|
||||
def get(self, request, path):
|
||||
self.object = self.get_object()
|
||||
try:
|
||||
self.object.file.size
|
||||
except FileNotFoundError:
|
||||
raise Http404(f'File: {self.object.file} not found')
|
||||
return FileResponse(self.object.file)
|
||||
|
||||
|
||||
|
||||
@@ -26,4 +26,4 @@ __license__ = "GPL v3.0"
|
||||
__version__ = "0.1.0"
|
||||
__maintainer__ = "Stepan Zhukovsky"
|
||||
__email__ = "stepan@zhukovsky.me"
|
||||
__status__ = "Development"
|
||||
__status__ = "Production"
|
||||
|
||||
@@ -89,6 +89,7 @@ MIDDLEWARE = [
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
'django.middleware.clickjacking.XFrameOptionsMiddleware',
|
||||
'whitenoise.middleware.WhiteNoiseMiddleware',
|
||||
'collector.middleware.HttpResponseNotAllowedMiddleware',
|
||||
]
|
||||
|
||||
ROOT_URLCONF = 'logs_collector.urls'
|
||||
|
||||
10
logs_collector/templates/403_csrf.html
Normal file
10
logs_collector/templates/403_csrf.html
Normal file
@@ -0,0 +1,10 @@
|
||||
{% extends 'errors.html' %}
|
||||
{% load static %}
|
||||
|
||||
{% block title %} Logs Collector - CSRF error {% endblock title %}
|
||||
|
||||
{% block status_code %}403{% endblock status_code %}
|
||||
{% block error_message %}
|
||||
<p class="fs-3"> <span class="text-danger">Opps!</span> CSRF verification failed.</p>
|
||||
<p class="lead">Request aborted</p>
|
||||
{% endblock error_message %}
|
||||
10
logs_collector/templates/404.html
Normal file
10
logs_collector/templates/404.html
Normal file
@@ -0,0 +1,10 @@
|
||||
{% extends 'errors.html' %}
|
||||
{% load static %}
|
||||
|
||||
{% block title %} Logs Collector - Not Found {% endblock title %}
|
||||
|
||||
{% block status_code %}404{% endblock status_code %}
|
||||
{% block error_message %}
|
||||
<p class="fs-3"> <span class="text-danger">Opps!</span> Page not found.</p>
|
||||
<p class="lead">The content you're looking for doesn't exist.</p>
|
||||
{% endblock error_message %}
|
||||
14
logs_collector/templates/405.html
Normal file
14
logs_collector/templates/405.html
Normal file
@@ -0,0 +1,14 @@
|
||||
{% extends 'errors.html' %}
|
||||
{% load static %}
|
||||
|
||||
{% block title %} Logs Collector - Method not allowed {% endblock title %}
|
||||
|
||||
{% block status_code %}405{% endblock status_code %}
|
||||
{% block error_message %}
|
||||
<p class="fs-3"> <span class="text-danger">Opps!</span> Method not allowed</p>
|
||||
<p class="lead">
|
||||
Request method:
|
||||
<span class="text-danger">{{ request.method }}</span>
|
||||
isn't allowed for this URL
|
||||
</p>
|
||||
{% endblock error_message %}
|
||||
10
logs_collector/templates/500.html
Normal file
10
logs_collector/templates/500.html
Normal file
@@ -0,0 +1,10 @@
|
||||
{% extends 'errors.html' %}
|
||||
{% load static %}
|
||||
|
||||
{% block title %} Logs Collector - Server error {% endblock title %}
|
||||
|
||||
{% block status_code %}500{% endblock status_code %}
|
||||
{% block error_message %}
|
||||
<p class="fs-3"> <span class="text-danger">Opps!</span> Server error</p>
|
||||
<p class="lead">Unexpected error, please try again or contact system admin </p>
|
||||
{% endblock error_message %}
|
||||
@@ -34,10 +34,12 @@
|
||||
rel="stylesheet"
|
||||
href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.10.5/font/bootstrap-icons.css"
|
||||
>
|
||||
{% block errors_head %}{% endblock errors_head %}
|
||||
{% block collector_head %}{% endblock collector_head %}
|
||||
{% block account_head %}{% endblock account_head %}
|
||||
</head>
|
||||
<body class="d-flex flex-column min-vh-100">
|
||||
{% block http_errors %}{% endblock http_errors %}
|
||||
{% block collector_content %}{% endblock collector_content %}
|
||||
{% block account_content %}{% endblock account_content %}
|
||||
<!-- BS dependences JS-->
|
||||
|
||||
20
logs_collector/templates/errors.html
Normal file
20
logs_collector/templates/errors.html
Normal file
@@ -0,0 +1,20 @@
|
||||
{% extends 'base.html' %}
|
||||
{% load static %}
|
||||
|
||||
{% block errors_head %}
|
||||
<title>{% block title %}{% endblock title %}</title>
|
||||
{% endblock errors_head %}
|
||||
|
||||
{% block http_errors %}
|
||||
<div class="d-flex align-items-center justify-content-center vh-100" >
|
||||
<div class="text-center">
|
||||
<h1 class="display-1 fw-bold">{% block status_code %}{% endblock status_code %}</h1>
|
||||
{% block error_message %}{% endblock error_message %}
|
||||
<a href="{% url 'collector:index' %}" class="btn btn-secondary">Go Home</a>
|
||||
</div>
|
||||
<!-- Theme switcher -->
|
||||
<div class="dropdown position-fixed bottom-0 end-0 mb-3 me-3 bd-mode-toggle">
|
||||
{% include 'includes/theme_switcher.html' %}
|
||||
</div>
|
||||
</div>
|
||||
{% endblock http_errors %}
|
||||
@@ -1,21 +1,29 @@
|
||||
<nav class="navbar navbar-expand-lg bg-body-tertiary">
|
||||
<div class="container d-flex justify-content-between">
|
||||
<div class="container">
|
||||
<ul class="navbar-nav">
|
||||
<li class="nav-item">
|
||||
<span class="text-muted">
|
||||
<button class="btn">
|
||||
v{{ version }}
|
||||
{% if environment != 'Production' %}
|
||||
Staging: {{ environment }}
|
||||
{% endif %}
|
||||
| © {{ author }}
|
||||
</span>
|
||||
</button>
|
||||
</li>
|
||||
<!-- Separator -->
|
||||
<li class="nav-item py-2 py-lg-1 col-12 col-lg-auto">
|
||||
<div class="vr d-none d-lg-flex h-100 mx-lg-2 text-white"></div>
|
||||
<hr class="d-lg-none my-2 text-white-50">
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<button class='btn'> © {{ author }} </button>
|
||||
</li>
|
||||
</ul>
|
||||
{% if request.user.is_authenticated %}
|
||||
<ul class="navbar-nav">
|
||||
<li class="nav-item">
|
||||
<li>
|
||||
<a
|
||||
class="nav-link"
|
||||
class="btn"
|
||||
type="button"
|
||||
href="{% url 'swagger-ui' %}"
|
||||
target="_blank"
|
||||
@@ -28,5 +36,6 @@
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
{% endif %}
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
159
scripts/uploader.sh
Executable file
159
scripts/uploader.sh
Executable file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
# INIT GLOBAL VARIABLES:
|
||||
_VERSION="0.1.0"
|
||||
_PACKGMGR="apt yum"
|
||||
_SCRIPT_NAME="$0"
|
||||
_CMD="curl"
|
||||
_FILE=""
|
||||
_TOKEN=""
|
||||
_URL=""
|
||||
|
||||
|
||||
# Colorize output
|
||||
# Usage - $(colorize CYAN "Hello, friend!")
|
||||
colorize() {
|
||||
local RED="\033[0;31m"
|
||||
local GREEN="\033[0;32m" # <-- [0 means not bold
|
||||
local YELLOW="\033[1;33m" # <-- [1 means bold
|
||||
local BLUE="\033[0;34m"
|
||||
local MAGNETA="\033[0;35"
|
||||
local CYAN="\033[1;36m"
|
||||
# ... Add more colors if you like
|
||||
|
||||
local NC="\033[0m" # No Color
|
||||
|
||||
# printf "${(P)1}${2} ${NC}\n" # <-- zsh
|
||||
# printf "${!1}${2} ${NC}\n" # <-- bash
|
||||
echo -e "${!1}${2}${NC}" # <-- all-purpose
|
||||
}
|
||||
|
||||
|
||||
# checks whether the utility is installed
|
||||
# takes the util name as input $_CMD
|
||||
check_util_exists() {
|
||||
local request_util=$1
|
||||
if ! command -v $request_util >/dev/null; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Print help message how used it script
|
||||
help() {
|
||||
local script=$(colorize GREEN "$_SCRIPT_NAME")
|
||||
local required=$(colorize RED "required")
|
||||
printf "Usage: $script [options [parameters]]\n"
|
||||
printf "\n"
|
||||
printf "Options:\n"
|
||||
printf "\n"
|
||||
printf " -f | --file full path to upload file $required\n"
|
||||
printf " -t | --token access token $required\n"
|
||||
printf " -u | --url target url $required\n"
|
||||
printf " -v | --version print version\n"
|
||||
printf " -h | --help print help\n"
|
||||
}
|
||||
|
||||
|
||||
# parse user arguments
|
||||
argparser() {
|
||||
# count user-passed arguments:
|
||||
local count_arguments=$#
|
||||
# run help if empty and exit:
|
||||
if [[ count_arguments -eq 0 ]]; then
|
||||
help
|
||||
exit 2
|
||||
fi
|
||||
# parse args:
|
||||
while [ ! -z "$1" ]; do
|
||||
case "$1" in
|
||||
--file|-f)
|
||||
shift
|
||||
_FILE="$1"
|
||||
;;
|
||||
--token|-t)
|
||||
shift
|
||||
_TOKEN="$1"
|
||||
;;
|
||||
--url|-u)
|
||||
shift
|
||||
_URL="$1"
|
||||
;;
|
||||
--help|-h)
|
||||
help
|
||||
exit 0
|
||||
;;
|
||||
--version|-v)
|
||||
printf "$_VERSION\n"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
help
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
# check curl is exists:
|
||||
curl_is_exists() {
|
||||
if ! check_util_exists $_CMD; then
|
||||
local error_cmd=$(colorize RED "$_CMD")
|
||||
printf "$(colorize RED "ERROR"): upload util doesn't exist, "
|
||||
printf "please install $error_cmd before run $_SCRIPT_NAME\n"
|
||||
# Print how install curl (support only apt/yum):
|
||||
for pkgmgr in $_PACKGMGR; do
|
||||
if check_util_exists $pkgmgr; then
|
||||
printf "$(colorize GREEN "RUN"): $pkgmgr install $error_cmd"
|
||||
fi
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
validate_args() {
|
||||
if [[ -z $_URL ]]; then
|
||||
printf "$(colorize RED "ERROR"): -u | --url argument is required\n"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z $_FILE ]]; then
|
||||
printf "$(colorize RED "ERROR"): -f | --file argument is required\n"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z $_TOKEN ]]; then
|
||||
printf "$(colorize RED "ERROR"): -t | --token argument is required\n"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Upload file used curl
|
||||
# get $_URL $_FILE $_TOKEN
|
||||
upload() {
|
||||
local url=$1
|
||||
local file=$2
|
||||
local token=$3
|
||||
# run:
|
||||
curl --progress-bar -X 'POST' \
|
||||
"${url}" \
|
||||
-H 'accept: application/json' \
|
||||
-H "Upload-Token: ${token} " \
|
||||
-H 'Content-Type: multipart/form-data' \
|
||||
-F "file=@${file}" | cat # cat required to show progress bar
|
||||
}
|
||||
|
||||
|
||||
main () {
|
||||
argparser $@
|
||||
curl_is_exists
|
||||
validate_args
|
||||
upload $_URL $_FILE $_TOKEN
|
||||
}
|
||||
|
||||
|
||||
# RUN IT:
|
||||
main $@
|
||||
Reference in New Issue
Block a user