add Linux_i686

This commit is contained in:
j 2014-05-17 18:11:40 +00:00 committed by Ubuntu
commit 95cd9b11f2
1644 changed files with 564260 additions and 0 deletions

View file

@ -0,0 +1 @@
Generic single-database configuration.

View file

@ -0,0 +1,59 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = ${script_location}
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
sqlalchemy.url = driver://user:pass@localhost/dbname
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View file

@ -0,0 +1,71 @@
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View file

@ -0,0 +1,22 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View file

@ -0,0 +1 @@
Rudimentary multi-database configuration.

View file

@ -0,0 +1,65 @@
# a multi-database configuration.
[alembic]
# path to migration scripts
script_location = ${script_location}
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
databases = engine1, engine2
[engine1]
sqlalchemy.url = driver://user:pass@localhost/dbname
[engine2]
sqlalchemy.url = driver://user:pass@localhost/dbname2
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View file

@ -0,0 +1,130 @@
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
import re
USE_TWOPHASE = False
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# gather section names referring to different
# databases. These are named "engine1", "engine2"
# in the sample .ini file.
db_names = config.get_main_option('databases')
# add your model's MetaData objects here
# for 'autogenerate' support. These must be set
# up to hold just those tables targeting a
# particular database. table.tometadata() may be
# helpful here in case a "copy" of
# a MetaData is needed.
# from myapp import mymodel
# target_metadata = {
# 'engine1':mymodel.metadata1,
# 'engine2':mymodel.metadata2
#}
target_metadata = {}
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
# for the --sql use case, run migrations for each URL into
# individual files.
engines = {}
for name in re.split(r',\s*', db_names):
engines[name] = rec = {}
rec['url'] = context.config.get_section_option(name,
"sqlalchemy.url")
for name, rec in engines.items():
logger.info("Migrating database %s" % name)
file_ = "%s.sql" % name
logger.info("Writing output to %s" % file_)
with open(file_, 'w') as buffer:
context.configure(url=rec['url'], output_buffer=buffer,
target_metadata=target_metadata.get(name))
with context.begin_transaction():
context.run_migrations(engine_name=name)
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# for the direct-to-DB use case, start a transaction on all
# engines, then run all migrations, then commit all transactions.
engines = {}
for name in re.split(r',\s*', db_names):
engines[name] = rec = {}
rec['engine'] = engine_from_config(
context.config.get_section(name),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
for name, rec in engines.items():
engine = rec['engine']
rec['connection'] = conn = engine.connect()
if USE_TWOPHASE:
rec['transaction'] = conn.begin_twophase()
else:
rec['transaction'] = conn.begin()
try:
for name, rec in engines.items():
logger.info("Migrating database %s" % name)
context.configure(
connection=rec['connection'],
upgrade_token="%s_upgrades" % name,
downgrade_token="%s_downgrades" % name,
target_metadata=target_metadata.get(name)
)
context.run_migrations(engine_name=name)
if USE_TWOPHASE:
for rec in engines.values():
rec['transaction'].prepare()
for rec in engines.values():
rec['transaction'].commit()
except:
for rec in engines.values():
rec['transaction'].rollback()
raise
finally:
for rec in engines.values():
rec['connection'].close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View file

@ -0,0 +1,43 @@
<%!
import re
%>"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade(engine_name):
eval("upgrade_%s" % engine_name)()
def downgrade(engine_name):
eval("downgrade_%s" % engine_name)()
<%
db_names = config.get_main_option("databases")
%>
## generate an "upgrade_<xyz>() / downgrade_<xyz>()" function
## for each database name in the ini file.
% for db_name in re.split(r',\s*', db_names):
def upgrade_${db_name}():
${context.get("%s_upgrades" % db_name, "pass")}
def downgrade_${db_name}():
${context.get("%s_downgrades" % db_name, "pass")}
% endfor

View file

@ -0,0 +1 @@
Configuration that reads from a Pylons project environment.

View file

@ -0,0 +1,25 @@
# a Pylons configuration.
[alembic]
# path to migration scripts
script_location = ${script_location}
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
pylons_config_file = ./development.ini
# that's it !

View file

@ -0,0 +1,86 @@
"""Pylons bootstrap environment.
Place 'pylons_config_file' into alembic.ini, and the application will
be loaded from there.
"""
from alembic import context
from paste.deploy import loadapp
from logging.config import fileConfig
from sqlalchemy.engine.base import Engine
try:
# if pylons app already in, don't create a new app
from pylons import config as pylons_config
pylons_config['__file__']
except:
config = context.config
# can use config['__file__'] here, i.e. the Pylons
# ini file, instead of alembic.ini
config_file = config.get_main_option('pylons_config_file')
fileConfig(config_file)
wsgi_app = loadapp('config:%s' % config_file, relative_to='.')
# customize this section for non-standard engine configurations.
meta = __import__("%s.model.meta" % wsgi_app.config['pylons.package']).model.meta
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=meta.engine.url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# specify here how the engine is acquired
# engine = meta.engine
raise NotImplementedError("Please specify engine connectivity here")
if isinstance(engine, Engine):
connection = engine.connect()
else:
raise Exception(
'Expected engine instance got %s instead' % type(engine)
)
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View file

@ -0,0 +1,22 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}