The following is prepared as "Pytest environment example to fix the database with Docker".
This article was MySQL, but I also checked it with PostgreSQL.
As a library of database connection interface,
--For MySQL: PyMySQL --For PostgreSQL: psycopg2
I used, but I think it is almost the same when using other things.
In the sample of pytest-docker, waiting for the startup of httpbin,
def is_responsive(url):
try:
response = requests.get(url)
if response.status_code == 200:
return True
except ConnectionError:
return False
@pytest.fixture(scope="session")
def http_service(docker_ip, docker_services):
"""Ensure that HTTP service is up and responsive."""
# `port_for` takes a container port and returns the corresponding host port
port = docker_services.port_for("httpbin", 80)
url = "http://{}:{}".format(docker_ip, port)
docker_services.wait_until_responsive(
timeout=30.0, pause=0.1, check=lambda: is_responsive(url)
)
return url
Database version of this. On the application side, even when using ORM such as SQLAlchemy, I tried connecting with the direct connection library. I'm not sure if it's the timing of SQLAlchemy's create_engine () via ORM. It's like making it clear that you are connecting via the driver library and checking the startup.
Wait until the MySQL container starts up and there are no exceptions to connect ()
def is_mysqld_ready(docker_ip):
try:
pymysql.connect(
host=docker_ip,
user=os.getenv('MYSQL_USER', ''),
password=os.getenv('MYSQL_PASSWORD', ''),
db=os.getenv('MYSQL_DATABASE', '')
)
return True
except:
return False
@pytest.fixture(scope="session")
def database_service(docker_ip, docker_services):
docker_services.wait_until_responsive(
timeout=30.0, pause=0.1, check=lambda: is_mysqld_ready(docker_ip)
)
return
It's the same.
def is_postgresql_ready(docker_ip):
try:
psycopg2.connect(
"postgresql://{user}:{password}@{host}/{db}".format(
user=os.getenv('POSTGRES_USER', ''),
password=os.getenv('POSTGRES_PASSWORD', ''),
host=docker_ip,
db=os.getenv('POSTGRES_DB', '')
)
)
return True
except:
return False
@pytest.fixture(scope="session")
def database_service(docker_ip, docker_services):
docker_services.wait_until_responsive(
timeout=30.0, pause=0.1, check=lambda: is_postgresql_ready(docker_ip)
)
return
docker-compose.yml
#For MySQL
version: "3"
services:
database:
image: mysql:5.7
ports:
- 3306:3306
volumes:
- ./initdb.d:/docker-entrypoint-initdb.d
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
#For PostgreSQL
version: "3"
services:
database:
image: postgres:13
ports:
- 5432:5432
volumes:
- ./initdb.d:/docker-entrypoint-initdb.d
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
After that, the SQL and initialization shell script etc. are thrown into the ./initdb.d
folder of ./initdb.d:/docker-entrypoint-initdb.d
which is volume mounted and initialized by docker-compose. If you do, you can test it.
In the Qiita article, Initialize data when starting MySQL with Docker.