Skip to content
Snippets Groups Projects
Commit aca33223 authored by Daniel Lyons's avatar Daniel Lyons
Browse files

Initial Alembic configuration

parent 76275a11
No related branches found
No related tags found
No related merge requests found
Generic single-database configuration.
\ No newline at end of file
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = .
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to ./versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat ./versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks=black
# black.type=console_scripts
# black.entrypoint=black
# black.options=-l 79
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
create table workflows (
workflow_id serial primary key ,
name varchar
);
comment on table workflows is 'A listing of the available workflows in the system. ';
comment on column workflows.workflow_id is 'the unique id of the workflow. auto-generated.';
comment on column workflows.name is 'a human-readable name for the workflow.';
create table workflow_requests (
workflow_request_id serial primary key ,
job_id int,
workflow_id int references workflows(workflow_id),
argument json
);
comment on table workflow_requests is 'A listing of requests for workflows and te resulting job ids.';
comment on column workflow_requests.workflow_request_id is 'the unique id of the request. auto-generated';
comment on column workflow_requests.job_id is 'the id of the job that this request generted in the ? system.';
comment on column workflow_requests.workflow_id is 'the id of the workflow used in this request.';
comment on column workflow_requests.argument is 'the argument(s) used for the workflow in this request.';
create table workflow_request_files (
workflow_request_id int references workflow_requests(workflow_request_id),
file_id int references files(file_id),
primary key (workflow_request_id, file_id)
);
comment on table workflow_request_files is 'A man-to-many mapping table tracking which files were used for workflow requests.';
comment on column workflow_request_files.workflow_request_id is 'the id of the workflow request.';
comment on column workflow_request_files.file_id is 'the id of the file referenced by the workflow request.';
create table capabilities (
capability_id serial primary key ,
name varchar not null ,
template_files varchar,
steps varchar not null
);
comment on table capabilities is 'A listing of the available capabilities in the system.';
comment on column capabilities.capability_id is 'the unique id of the capability. auto-generated.';
comment on column capabilities.name is 'the human-readable name of the capability.';
comment on column capabilities.template_files is '?';
comment on column capabilities.steps is 'the unique id of the capability. auto-generated.';
create table capability_requests (
capability_request_id serial primary key ,
capability_id int references capabilities(capability_id),
user_id int not null ,
argument json
);
comment on table capability_requests is 'A listing of requests for capabilities, with the id of the requesting user.';
comment on column capability_requests.capability_request_id is 'The unique id of the request. auto-generated.';
comment on column capability_requests.capability_id is 'the id of the capability being requested.';
comment on column capability_requests.user_id is 'the id of the user requesting the capability.';
comment on column capability_requests.argument is 'the JSON holding the details of the request.';
\ No newline at end of file
from logging.config import fileConfig
from sqlalchemy import create_engine
from sqlalchemy import pool
from alembic import context
from pycapo import CapoConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = lookup_url()
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def lookup_url():
capo = CapoConfig().settings('metadataDatabase')
url = capo.jdbcUrl.replace('jdbc:', '').replace('://', f'://{capo.jdbcUsername}:{capo.jdbcPassword}@')
return url
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = create_engine(lookup_url())
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}
"""workspaces-init
Revision ID: 44d5bbbf2615
Revises:
Create Date: 2020-09-02 11:25:01.571392
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '44d5bbbf2615'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('workflows',
sa.Column('workflow_id', sa.Integer, primary_key=True, autoincrement=True,
comment='the unique id of the workflow. auto-generated.'),
sa.Column('name', sa.String, unique=True, comment='a human-readable name for the workflow.'),
comment='A listing of the available workflows in the system.')
"""
create table workflow_requests (
workflow_request_id serial primary key ,
job_id int,
workflow_id int references workflows(workflow_id),
argument json
);
comment on table workflow_requests is 'A listing of requests for workflows and te resulting job ids.';
comment on column workflow_requests.workflow_request_id is 'the unique id of the request. auto-generated';
comment on column workflow_requests.job_id is 'the id of the job that this request generted in the ? system.';
comment on column workflow_requests.workflow_id is 'the id of the workflow used in this request.';
comment on column workflow_requests.argument is 'the argument(s) used for the workflow in this request.';
create table workflow_request_files (
workflow_request_id int references workflow_requests(workflow_request_id),
file_id int references files(file_id),
primary key (workflow_request_id, file_id)
);
comment on table workflow_request_files is 'A man-to-many mapping table tracking which files were used for workflow requests.';
comment on column workflow_request_files.workflow_request_id is 'the id of the workflow request.';
comment on column workflow_request_files.file_id is 'the id of the file referenced by the workflow request.';
create table capabilities (
capability_id serial primary key ,
name varchar not null ,
template_files varchar,
steps varchar not null
);
comment on table capabilities is 'A listing of the available capabilities in the system.';
comment on column capabilities.capability_id is 'the unique id of the capability. auto-generated.';
comment on column capabilities.name is 'the human-readable name of the capability.';
comment on column capabilities.template_files is '?';
comment on column capabilities.steps is 'the unique id of the capability. auto-generated.';
create table capability_requests (
capability_request_id serial primary key ,
capability_id int references capabilities(capability_id),
user_id int not null ,
argument json
);
comment on table capability_requests is 'A listing of requests for capabilities, with the id of the requesting user.';
comment on column capability_requests.capability_request_id is 'The unique id of the request. auto-generated.';
comment on column capability_requests.capability_id is 'the id of the capability being requested.';
comment on column capability_requests.user_id is 'the id of the user requesting the capability.';
comment on column capability_requests.argument is 'the JSON holding the details of the request.';
"""
def downgrade():
op.drop_table('workflows')
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment