Using gunicorn to run upto 4 workers. This should fix the problems of the whole program crashing.

This commit is contained in:
Amritanshu Agrawal 2022-04-16 07:57:18 +05:30
parent c494284517
commit 3eaf4cd41d
5 changed files with 125 additions and 1 deletions

2
.env
View File

@ -6,6 +6,8 @@ SQLALCHEMY_DATABASE_URI=postgresql://postgres:123456@localhost:5432/petty
MODULE_NAME=barker.main
PROJECT_NAME=barker
MAX_WORKERS=4
REDIS_HOST=localhost
REDIS_PORT=6379

68
barker/gunicorn.conf.py Normal file
View File

@ -0,0 +1,68 @@
import json
import multiprocessing
import os
workers_per_core_str = os.getenv("WORKERS_PER_CORE", "1")
max_workers_str = os.getenv("MAX_WORKERS")
use_max_workers = None
if max_workers_str:
use_max_workers = int(max_workers_str)
web_concurrency_str = os.getenv("WEB_CONCURRENCY", None)
host = os.getenv("HOST", "0.0.0.0")
port = os.getenv("PORT", "9995")
bind_env = os.getenv("BIND", None)
use_loglevel = os.getenv("LOG_LEVEL", "info")
if bind_env:
use_bind = bind_env
else:
use_bind = f"{host}:{port}"
cores = multiprocessing.cpu_count()
workers_per_core = float(workers_per_core_str)
default_web_concurrency = workers_per_core * cores
if web_concurrency_str:
web_concurrency = int(web_concurrency_str)
assert web_concurrency > 0
else:
web_concurrency = max(int(default_web_concurrency), 2)
if use_max_workers:
web_concurrency = min(web_concurrency, use_max_workers)
accesslog_var = os.getenv("ACCESS_LOG", "-")
use_accesslog = accesslog_var or None
errorlog_var = os.getenv("ERROR_LOG", "-")
use_errorlog = errorlog_var or None
graceful_timeout_str = os.getenv("GRACEFUL_TIMEOUT", "120")
timeout_str = os.getenv("TIMEOUT", "120")
keepalive_str = os.getenv("KEEP_ALIVE", "5")
# Gunicorn config variables
loglevel = use_loglevel
workers = web_concurrency
bind = use_bind
errorlog = use_errorlog
worker_tmp_dir = "/dev/shm"
accesslog = use_accesslog
graceful_timeout = int(graceful_timeout_str)
timeout = int(timeout_str)
keepalive = int(keepalive_str)
# For debugging and testing
log_data = {
"loglevel": loglevel,
"workers": workers,
"bind": bind,
"graceful_timeout": graceful_timeout,
"timeout": timeout,
"keepalive": keepalive,
"errorlog": errorlog,
"accesslog": accesslog,
# Additional, non-gunicorn variables
"workers_per_core": workers_per_core,
"use_max_workers": use_max_workers,
"host": host,
"port": port,
}
print(json.dumps(log_data))

53
barker/logging.conf Normal file
View File

@ -0,0 +1,53 @@
[loggers]
keys=root, gunicorn.error, gunicorn.access
[handlers]
keys=console, error, access
[formatters]
keys=generic, error, access
[logger_root]
level=INFO
handlers=console
qualname=root
[logger_gunicorn.error]
level=INFO
handlers=console
qualname=gunicorn.error
[logger_gunicorn.access]
level=INFO
handlers=access
qualname=gunicorn.access
[handler_console]
class=StreamHandler
formatter=generic
args=(sys.stdout, )
[handler_error]
class=StreamHandler
formatter=error
args=(sys.stdout, )
[handler_access]
class=StreamHandler
formatter=access
args=(sys.stdout, )
[formatter_generic]
format=%(asctime)s [%(name)s %(levelname)s] %(message)s
datefmt=%Y-%m-%d %H:%M:%S %Z
class=logging.Formatter
[formatter_error]
format=%(asctime)s [%(name)s %(levelname)s] %(message)s | %(funcName)s() | %(pathname)s L%(lineno)-4d
datefmt=%Y-%m-%d %H:%M:%S %Z
class=logging.Formatter
[formatter_access]
format=%(asctime)s [%(name)s %(levelname)s] %(message)s
datefmt=%Y-%m-%d %H:%M:%S %Z
class=logging.Formatter

View File

@ -22,6 +22,7 @@ starlette = "^0.17.1"
arq = "^0.22"
aiohttp = "^3.8.1"
cryptography = "^36.0.2"
gunicorn = "^20.1.0"
[tool.poetry.dev-dependencies]
flake8 = "^4.0.1"

View File

@ -50,4 +50,4 @@ RUN chmod 777 /app/docker-entrypoint.sh \
&& ln -s /app/docker-entrypoint.sh /
ENTRYPOINT ["docker-entrypoint.sh"]
CMD ["poetry", "run", "python", "-m", "barker"]
CMD ["poetry", "run", "gunicorn", "app.main:app", "--worker-class", "uvicorn.workers.UvicornWorker", "--config", "/app/gunicorn.conf.py", "--log-config", "/app/logging.conf"]