Python sqlalchemy 模块,Float() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sqlalchemy.Float()

项目:zun    作者:openstack    | 项目源码 | 文件源码
def upgrade():
    op.add_column('container',
                  sa.Column('cpu', sa.Float(),
                            nullable=True))
    op.add_column('container',
                  sa.Column('workdir', sa.String(length=255),
                            nullable=True))
    op.add_column('container',
                  sa.Column('ports',
                            zun.db.sqlalchemy.models.JSONEncodedList(),
                            nullable=True))
    op.add_column('container',
                  sa.Column('hostname', sa.String(length=255),
                            nullable=True))
    op.add_column('container',
                  sa.Column('labels',
                            zun.db.sqlalchemy.models.JSONEncodedDict(),
                            nullable=True))
项目:FRG-Crowdsourcing    作者:97amarnathk    | 项目源码 | 文件源码
def upgrade():
    op.create_table(
        'project_stats',
        sa.Column('id', sa.Integer, primary_key=True),
        sa.Column('project_id', sa.Integer, sa.ForeignKey('project.id',
                                                          ondelete='CASCADE')),
        sa.Column('n_tasks', sa.Integer, default=0),
        sa.Column('n_task_runs', sa.Integer, default=0),
        sa.Column('n_results', sa.Integer, default=0),
        sa.Column('n_volunteers', sa.Integer, default=0),
        sa.Column('n_completed_tasks', sa.Integer, default=0),
        sa.Column('overall_progress', sa.Integer, default=0),
        sa.Column('average_time', sa.Float, default=0),
        sa.Column('n_blogposts', sa.Integer, default=0),
        sa.Column('last_activity', sa.Text, default=make_timestamp),
        sa.Column('info', JSON, nullable=False)
    )
项目:trio2o    作者:openstack    | 项目源码 | 文件源码
def _get_field_value(column):
    """Get field value for resource creating

    returning None indicates that not setting this field in resource dict
    """
    if column.nullable:
        # just skip nullable column
        return None
    if isinstance(column.type, sql.Text):
        return 'fake_text'
    elif isinstance(column.type, sql.Enum):
        return column.type.enums[0]
    elif isinstance(column.type, sql.String):
        return 'fake_str'
    elif isinstance(column.type, sql.Integer):
        return 1
    elif isinstance(column.type, sql.Float):
        return 1.0
    elif isinstance(column.type, sql.Boolean):
        return True
    elif isinstance(column.type, sql.DateTime):
        return datetime.datetime.utcnow()
    else:
        return None
项目:ibstract    作者:jesseliu0    | 项目源码 | 文件源码
def _gen_sa_table(sectype, metadata=None):
    """Generate SQLAlchemy Table object by sectype.
    """
    if metadata is None:
        metadata = MetaData()
    table = Table(
        sectype, metadata,
        Column('Symbol', String(20), primary_key=True),
        Column('DataType', String(20), primary_key=True),
        Column('BarSize', String(10), primary_key=True),
        Column('TickerTime', DateTime(), primary_key=True),
        Column('opening', Float(10, 2)),
        Column('high', Float(10, 2)),
        Column('low', Float(10, 2)),
        Column('closing', Float(10, 2)),
        Column('volume', mysqlINTEGER(unsigned=True)),
        Column('barcount', mysqlINTEGER(unsigned=True)),
        Column('average', Float(10, 2))
    )
    return table
项目:zun    作者:openstack    | 项目源码 | 文件源码
def upgrade():
    op.create_table(
        'capsule',
        sa.Column('created_at', sa.DateTime(), nullable=True),
        sa.Column('updated_at', sa.DateTime(), nullable=True),
        sa.Column('capsule_version', sa.String(length=255), nullable=True),
        sa.Column('kind', sa.String(length=36), nullable=True),
        sa.Column('project_id', sa.String(length=255), nullable=True),
        sa.Column('user_id', sa.String(length=255), nullable=True),
        sa.Column('restart_policy', sa.String(length=255), nullable=True),
        sa.Column('host_selector', sa.String(length=255), nullable=True),
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('uuid', sa.String(length=36), nullable=False),
        sa.Column('status', sa.String(length=255), nullable=True),
        sa.Column('status_reason', sa.Text(), nullable=True),
        sa.Column('message', models.JSONEncodedDict(), nullable=True),
        sa.Column('spec', models.JSONEncodedDict(), nullable=True),
        sa.Column('cpu', sa.Float(), nullable=True),
        sa.Column('memory', sa.String(length=255), nullable=True),
        sa.Column('meta_name', sa.String(length=255), nullable=True),
        sa.Column('meta_labels', models.JSONEncodedList(), nullable=True),
        sa.Column('containers_uuids', models.JSONEncodedList(), nullable=True),
        sa.PrimaryKeyConstraint('id'),
    )
项目:xiaodi    作者:shenaishiren    | 项目源码 | 文件源码
def _init_cls(cls):
    setattr(cls, '__tablename__', cls.__name__)
    setattr(cls, 'id', Column(Integer, primary_key=True, autoincrement=True))
    for _str in getattr(cls, 'stringV', []):
        setattr(cls, _str, Column(String(DB_STRING_LENGTH)))
    for _int in getattr(cls, 'integerV', []):
        setattr(cls, _int, Column(Integer))
    for _bool in getattr(cls, 'boolV', []):
        setattr(cls, _bool, Column(Boolean))
    for _float in getattr(cls, 'floatV', []):
        setattr(cls, _float, Column(Float))
    for _time in getattr(cls, 'timeV', []):
        setattr(cls, _time, Column(DateTime, default=datetime.utcnow))
    for key, value in getattr(cls, 'foreignKeyV', {}).iteritems():
        setattr(cls, key, Column(Integer, ForeignKey(value), primary_key=True))
    setattr(cls, 'to_json', _convert_attr_to_dict)
项目:forget    作者:codl    | 项目源码 | 文件源码
def upgrade():
    op.create_table('mastodon_instances',
    sa.Column('instance', sa.String(), nullable=False),
    sa.Column('popularity', sa.Float(), server_default='10', nullable=False),
    sa.PrimaryKeyConstraint('instance', name=op.f('pk_mastodon_instances'))
    )
    op.execute("""
        INSERT INTO mastodon_instances (instance, popularity) VALUES
            ('mastodon.social', 100),
            ('mastodon.cloud', 90),
            ('social.tchncs.de', 80),
            ('mastodon.xyz', 70),
            ('mstdn.io', 60),
            ('awoo.space', 50),
            ('cybre.space', 40),
            ('mastodon.art', 30)
            ;
    """)
项目:coverage2sql    作者:openstack    | 项目源码 | 文件源码
def upgrade():
    migration_context = context.get_context()
    if migration_context.dialect.name == 'sqlite':
        id_type = sa.Integer
    else:
        id_type = sa.BigInteger

    op.create_table('coverages',
                    sa.Column('id', id_type, autoincrement=True,
                              primary_key=True),
                    sa.Column('project_name', sa.String(256), nullable=False),
                    sa.Column('coverage_rate', sa.Float()),
                    sa.Column('report_time', sa.DateTime()),
                    sa.Column('report_time_microsecond', sa.Integer(),
                              default=0),
                    mysql_engine='InnoDB')
    op.create_index('ix_project_name', 'coverages', ['project_name'])
项目:coverage2sql    作者:openstack    | 项目源码 | 文件源码
def upgrade():
    migration_context = context.get_context()
    if migration_context.dialect.name == 'sqlite':
        id_type = sa.Integer
    else:
        id_type = sa.BigInteger

    op.create_table('files',
                    sa.Column('id', id_type, autoincrement=True,
                              primary_key=True),
                    sa.Column('coverage_id', id_type, nullable=False),
                    sa.Column('filename', sa.String(256), nullable=False),
                    sa.Column('line_rate', sa.Float()),
                    mysql_engine='InnoDB')
    op.create_index('ix_class_coverage_id', 'files', ['coverage_id'])
    op.create_index('ix_filename', 'files', ['filename'])
项目:sqlacodegen    作者:agronholm    | 项目源码 | 文件源码
def test_column_adaptation(self):
        Table(
            'simple_items', self.metadata,
            Column('id', BIGINT),
            Column('length', DOUBLE_PRECISION)
        )

        assert self.generate_code() == """\
# coding: utf-8
from sqlalchemy import BigInteger, Column, Float, MetaData, Table

metadata = MetaData()


t_simple_items = Table(
    'simple_items', metadata,
    Column('id', BigInteger),
    Column('length', Float)
)
"""
项目:GenomicsSampleAPIs    作者:Intel-HLS    | 项目源码 | 文件源码
def upgrade():
    op.create_table(
        'reference',
        sa.Column('id', sa.BigInteger, primary_key=True),
        sa.Column('guid', sa.String(36), nullable=False, unique=True),
        sa.Column('length', sa.BigInteger),
        sa.Column('reference_set_id', sa.BigInteger,
                  sa.ForeignKey('reference_set.id'), nullable=False),
        sa.Column('md5_checksum', sa.String(32)),
        sa.Column('name', sa.Text),
        sa.Column('source_uri', sa.Text),
        sa.Column('is_derived', sa.Boolean),
        sa.Column('source_divergence', sa.Float),
        sa.Column('ncbi_taxon_id', sa.Integer),
        sa.Column('offset', sa.BigInteger)
    )
项目:aiohttp_admin    作者:aio-libs    | 项目源码 | 文件源码
def sa_table():
    choices = ['a', 'b', 'c']
    meta = sa.MetaData()
    post = sa.Table(
        'test_post', meta,
        sa.Column('id', sa.Integer, nullable=False),
        sa.Column('title', sa.String(200), nullable=False),
        sa.Column('category', sa.String(200), nullable=True),
        sa.Column('body', sa.Text, nullable=False),
        sa.Column('views', sa.Integer, nullable=False),
        sa.Column('average_note', sa.Float, nullable=False),
        # sa.Column('pictures', postgresql.JSON, server_default='{}'),
        sa.Column('published_at', sa.DateTime, nullable=False),
        # sa.Column('tags', postgresql.ARRAY(sa.Integer), server_default='{}'),
        sa.Column('status',
                  sa.Enum(*choices, name="enum_name", native_enum=False),
                  server_default="a", nullable=False),
        sa.Column('visible', sa.Boolean, nullable=False),

        # Indexes #
        sa.PrimaryKeyConstraint('id', name='post_id_pkey'))
    return post
项目:aiohttp_admin    作者:aio-libs    | 项目源码 | 文件源码
def document_schema():
    choices = ['a', 'b', 'c']
    schema = t.Dict({
        t.Key('_id'): MongoId,
        t.Key('title'): t.String(max_length=200),
        t.Key('category'): t.String(max_length=200),
        t.Key('body'): t.String,
        t.Key('views'): t.Int,
        t.Key('average_note'): t.Float,
        # t.Key('pictures'): t.Dict({}).allow_extra('*'),
        t.Key('published_at'): DateTime,
        # t.Key('tags'): t.List(t.Int),
        t.Key('status'): t.Enum(*choices),
        t.Key('visible'): t.StrBool,
    })
    return schema
项目:aiohttp_admin    作者:aio-libs    | 项目源码 | 文件源码
def table():

    meta = sa.MetaData()
    post = sa.Table(
        'post', meta,
        sa.Column('id', sa.Integer, nullable=False),
        sa.Column('title', sa.String(200), nullable=False),
        sa.Column('body', sa.Text, nullable=False),
        sa.Column('views', sa.Integer, nullable=False),
        sa.Column('average_note', sa.Float, nullable=False),
        sa.Column('pictures', postgresql.JSON, server_default='{}'),
        sa.Column('published_at', sa.Date, nullable=False),
        sa.Column('tags', postgresql.ARRAY(sa.Integer), server_default='[]'),

        # Indexes #
        sa.PrimaryKeyConstraint('id', name='post_id_pkey'))
    return post
项目:floranet    作者:Fluent-networks    | 项目源码 | 文件源码
def downgrade():
    op.add_column('devices',
        sa.Column('snr1', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr2', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr3', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr4', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr5', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr6', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr7', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr8', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr9', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr10', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr11', sa.Float(), nullable=True))
    op.drop_column('devices', 'snr')
项目:floranet    作者:Fluent-networks    | 项目源码 | 文件源码
def upgrade():
    op.create_table(
        'gateways',
        sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
        sa.Column('host', INET, nullable=False, unique=True),
        sa.Column('name', sa.String, nullable=True),
        sa.Column('enabled', sa.Boolean, nullable=False, default=True),
        sa.Column('eui', sa.Numeric, nullable=False, unique=True),
        sa.Column('power', sa.Integer, nullable=False),
        sa.Column('port', sa.String, nullable=True),
        sa.Column('latitude', sa.Float, nullable=True),
        sa.Column('longitude', sa.Float, nullable=True),
        sa.Column('created', sa.DateTime(timezone=True), nullable=False),
        sa.Column('updated', sa.DateTime(timezone=True), nullable=False),
        )
    op.add_column('devices',
        sa.Column('devnonce', sa.dialects.postgresql.ARRAY(sa.Integer())))
项目:floranet    作者:Fluent-networks    | 项目源码 | 文件源码
def upgrade():
    op.create_table(
        'config',
        sa.Column('id', sa.Integer, primary_key=True),
        sa.Column('name', sa.String, nullable=False),
        sa.Column('listen', INET, nullable=False),
        sa.Column('port', sa.Integer, nullable=False),
        sa.Column('webport', sa.Integer, nullable=False),
        sa.Column('apitoken', sa.String, nullable=False),
        sa.Column('freqband', sa.String, nullable=False),
        sa.Column('netid', sa.Integer, nullable=False),
        sa.Column('duplicateperiod', sa.Integer, nullable=False),
        sa.Column('fcrelaxed', sa.Boolean, nullable=False),
        sa.Column('otaastart', sa.Integer, nullable=False),
        sa.Column('otaaend', sa.Integer, nullable=False),
        sa.Column('macqueueing', sa.Boolean, nullable=False),
        sa.Column('macqueuelimit', sa.Integer, nullable=False),
        sa.Column('adrenable', sa.Boolean, nullable=False),
        sa.Column('adrmargin', sa.Float, nullable=False),
        sa.Column('adrcycletime', sa.Integer, nullable=False),
        sa.Column('adrmessagetime', sa.Integer, nullable=False),
        sa.Column('created', sa.DateTime(timezone=True), nullable=False),
        sa.Column('updated', sa.DateTime(timezone=True), nullable=False),
        )
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_notnull_dtype(self):
        cols = {'Bool': Series([True, None]),
                'Date': Series([datetime(2012, 5, 1), None]),
                'Int': Series([1, None], dtype='object'),
                'Float': Series([1.1, None])
                }
        df = DataFrame(cols)

        tbl = 'notnull_dtype_test'
        df.to_sql(tbl, self.conn)
        returned_df = sql.read_sql_table(tbl, self.conn)  # noqa
        meta = sqlalchemy.schema.MetaData(bind=self.conn)
        meta.reflect()
        if self.flavor == 'mysql':
            my_type = sqltypes.Integer
        else:
            my_type = sqltypes.Boolean

        col_dict = meta.tables[tbl].columns

        self.assertTrue(isinstance(col_dict['Bool'].type, my_type))
        self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))
        self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))
        self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_notnull_dtype(self):
        if self.flavor == 'mysql':
            raise nose.SkipTest('Not applicable to MySQL legacy')

        cols = {'Bool': Series([True, None]),
                'Date': Series([datetime(2012, 5, 1), None]),
                'Int': Series([1, None], dtype='object'),
                'Float': Series([1.1, None])
                }
        df = DataFrame(cols)

        tbl = 'notnull_dtype_test'
        df.to_sql(tbl, self.conn)

        self.assertEqual(self._get_sqlite_column_type(tbl, 'Bool'), 'INTEGER')
        self.assertEqual(self._get_sqlite_column_type(
            tbl, 'Date'), 'TIMESTAMP')
        self.assertEqual(self._get_sqlite_column_type(tbl, 'Int'), 'INTEGER')
        self.assertEqual(self._get_sqlite_column_type(tbl, 'Float'), 'REAL')
项目:bitfinex-socket-crawler    作者:Asoul    | 项目源码 | 文件源码
def upgrade():
    for currency in currency_list:
        op.create_table(
            '{}_ticker'.format(currency),
            sa.Column('timestamp', sa.BigInteger, primary_key=True),
            sa.Column('bid_price', sa.Float),
            sa.Column('bid_size', sa.Float),
            sa.Column('ask_price', sa.Float),
            sa.Column('ask_size', sa.Float),
            sa.Column('daily_change', sa.Float),
            sa.Column('daily_change_perc', sa.Float),
            sa.Column('last_price', sa.Float),
            sa.Column('daily_volume', sa.Float),
            sa.Column('daily_high', sa.Float),
            sa.Column('daily_low', sa.Float)
        )
项目:vcf2db    作者:quinlan-lab    | 项目源码 | 文件源码
def variants_gene_columns(self):
        # all of these are also stored in the variant_impacts table.
        return [
            sql.Column("gene", sql.String(20)),
            sql.Column("transcript", sql.String(20)),
            sql.Column("is_exonic", sql.Boolean()),
            sql.Column("is_coding", sql.Boolean()),
            sql.Column("is_lof", sql.Boolean()),
            sql.Column("is_splicing", sql.Boolean()),
            sql.Column("exon", sql.String(8)),
            sql.Column("codon_change", sql.TEXT()),
            sql.Column("aa_change", sql.TEXT()),
            sql.Column("aa_length", sql.String(8)),
            sql.Column("biotype", sql.String(50)),
            sql.Column("impact", sql.String(20)),
            sql.Column("impact_so", sql.String(20)),
            sql.Column("impact_severity", sql.String(4)),
            sql.Column("polyphen_pred", sql.String(20)),
            sql.Column("polyphen_score", sql.Float()),
            sql.Column("sift_pred", sql.String(20)),
            sql.Column("sift_score", sql.Float()),
            ]
项目:metaseek    作者:ahoarfrost    | 项目源码 | 文件源码
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table('run',
    sa.Column('id', sa.Integer(), nullable=False),
    sa.Column('run_id', sa.String(length=30), nullable=True),
    sa.Column('library_reads_sequenced', sa.BIGINT(), nullable=True),
    sa.Column('total_num_bases', sa.BIGINT(), nullable=True),
    sa.Column('download_size', sa.BIGINT(), nullable=True),
    sa.Column('avg_read_length', sa.Float(), nullable=True),
    sa.Column('baseA_count', sa.BIGINT(), nullable=True),
    sa.Column('baseC_count', sa.BIGINT(), nullable=True),
    sa.Column('baseG_count', sa.BIGINT(), nullable=True),
    sa.Column('baseT_count', sa.BIGINT(), nullable=True),
    sa.Column('baseN_count', sa.BIGINT(), nullable=True),
    sa.Column('gc_percent', sa.Float(), nullable=True),
    sa.Column('run_quality_counts', sa.Text(), nullable=True),
    sa.Column('dataset_id', sa.Integer(), nullable=True),
    sa.ForeignKeyConstraint(['dataset_id'], ['dataset.id'], ),
    sa.PrimaryKeyConstraint('id')
    )
    op.create_unique_constraint(None, 'dataset', ['db_source_uid'])
    # ### end Alembic commands ###
项目:fabric8-analytics-worker    作者:fabric8-analytics    | 项目源码 | 文件源码
def upgrade():
    """Upgrade the database to a newer revision."""
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table('stacks',
                    sa.Column('id', sa.Integer(), nullable=False),
                    sa.Column('is_ref_stack', sa.Boolean(), nullable=False),
                    sa.Column('stack_json', postgresql.JSONB(), nullable=False),
                    sa.PrimaryKeyConstraint('id'))
    op.add_column('similar_stacks', sa.Column('analysis', postgresql.JSONB()))
    op.add_column('similar_stacks', sa.Column('similar_stack_id', sa.Integer(), nullable=False))
    op.add_column('similar_stacks', sa.Column('similarity_value', sa.Float(), nullable=False))
    op.add_column('similar_stacks', sa.Column('stack_id', sa.Integer(), nullable=False))
    op.create_unique_constraint('sim_unique', 'similar_stacks', ['stack_id', 'similar_stack_id'])
    op.drop_constraint('similar_stacks_appstack_id_fkey', 'similar_stacks', type_='foreignkey')
    op.create_foreign_key(None, 'similar_stacks', 'stacks', ['stack_id'], ['id'])
    op.create_foreign_key(None, 'similar_stacks', 'stacks', ['similar_stack_id'], ['id'])
    op.drop_column('similar_stacks', 'dependency_list')
    op.drop_column('similar_stacks', 'appstack_id')
    op.drop_table('reference_stacks')
    op.drop_table('app_stacks')
    # ### end Alembic commands ###
项目:yui    作者:item4    | 项目源码 | 文件源码
def upgrade():
    op.create_table(
        'aws',
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('name', sa.String(length=15), nullable=False),
        sa.Column('height', sa.Integer(), nullable=False),
        sa.Column('is_raining', sa.Boolean(), nullable=True),
        sa.Column('rain15', sa.Float(), nullable=True),
        sa.Column('rain60', sa.Float(), nullable=True),
        sa.Column('rain6h', sa.Float(), nullable=True),
        sa.Column('rain12h', sa.Float(), nullable=True),
        sa.Column('rainday', sa.Float(), nullable=True),
        sa.Column('temperature', sa.Float(), nullable=True),
        sa.Column('wind_direction1', sa.String(length=3), nullable=True),
        sa.Column('wind_speed1', sa.Float(), nullable=True),
        sa.Column('wind_direction10', sa.String(length=3), nullable=True),
        sa.Column('wind_speed10', sa.Float(), nullable=True),
        sa.Column('humidity', sa.Integer(), nullable=True),
        sa.Column('pressure', sa.Float(), nullable=True),
        sa.Column('location', sa.String(length=50), nullable=True),
        sa.Column('observed_datetime', sa.DateTime(timezone=True),
                  nullable=False),
        sa.PrimaryKeyConstraint('id')
    )
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def _futures_contracts_schema(metadata):
    # NOTE: When modifying this schema, update the ASSET_DB_VERSION value
    return sa.Table(
        'futures_contracts',
        metadata,
        sa.Column(
            'sid',
            sa.Integer,
            unique=True,
            nullable=False,
            primary_key=True,
        ),
        sa.Column('symbol', sa.Text, unique=True, index=True),
        sa.Column(
            'root_symbol',
            sa.Text,
            sa.ForeignKey('futures_root_symbols.root_symbol'),
            index=True
        ),
        sa.Column('asset_name', sa.Text),
        sa.Column('start_date', sa.Integer, default=0, nullable=False),
        sa.Column('end_date', sa.Integer, nullable=False),
        sa.Column('first_traded', sa.Integer, nullable=False),
        sa.Column(
            'exchange',
            sa.Text,
            sa.ForeignKey('futures_exchanges.exchange'),
        ),
        sa.Column('notice_date', sa.Integer, nullable=False),
        sa.Column('expiration_date', sa.Integer, nullable=False),
        sa.Column('auto_close_date', sa.Integer, nullable=False),
        sa.Column('multiplier', sa.Float),
        sa.Column('tick_size', sa.Float),
    )
项目:FRG-Crowdsourcing    作者:97amarnathk    | 项目源码 | 文件源码
def downgrade():
    op.add_column('project', sa.Column('time_estimate', sa.Integer, default=0))
    op.add_column('project', sa.Column('time_limit', sa.Integer, default=0))
    op.add_column('project', sa.Column('calibration_frac', sa.Float, default=0))
    op.add_column('project', sa.Column('bolt_course_id', sa.Integer, default=0))
    op.add_column('project', sa.Column('long_tasks', sa.Integer, default=0))
项目:sitrep    作者:bittorrent    | 项目源码 | 文件源码
def upgrade():
    op.alter_column('component_update', 'time', existing_type=sa.Integer(), type_=sa.Float(precision=53))
    op.alter_column('component_update', 'lifetime', existing_type=sa.Integer(), type_=sa.Float(precision=53))
项目:sitrep    作者:bittorrent    | 项目源码 | 文件源码
def downgrade():
    op.alter_column('component_update', 'time', existing_type=sa.Float(precision=53), type_=sa.Integer())
    op.alter_column('component_update', 'lifetime', existing_type=sa.Float(precision=53), type_=sa.Integer())
项目:stacker    作者:bamine    | 项目源码 | 文件源码
def initialize(self):
        metadata = MetaData()
        logs = Table(self.table_name, metadata,
                     Column('task', String, primary_key=True),
                     Column('date_time', DateTime, primary_key=True),
                     Column('model', String),
                     Column('parameters', String),
                     Column('score', Float),
                     Column('scorer_name', String),
                     Column('validation_method', String),
                     Column('predictions', String),
                     Column('random_state', Integer))
        mapper(self.OptimizationResultLog, logs)
        metadata.create_all(bind=self.engine)
项目:triage    作者:dssg    | 项目源码 | 文件源码
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table(
        'individual_importances',
        sa.Column('model_id', sa.Integer(), nullable=False),
        sa.Column('entity_id', sa.BigInteger(), nullable=False),
        sa.Column('as_of_date', sa.DateTime(), nullable=False),
        sa.Column('feature', sa.String(), nullable=False),
        sa.Column('method', sa.String(), nullable=False),
        sa.Column('importance_score', sa.Text(), nullable=True),
        sa.ForeignKeyConstraint(['model_id'], ['results.models.model_id'], ),
        sa.PrimaryKeyConstraint(
            'model_id',
            'entity_id',
            'as_of_date',
            'feature',
            'method',
        ),
        schema='results'
    )
    op.create_table(
        'list_predictions',
        sa.Column('model_id', sa.Integer(), nullable=False),
        sa.Column('entity_id', sa.BigInteger(), nullable=False),
        sa.Column('as_of_date', sa.DateTime(), nullable=False),
        sa.Column('score', sa.Numeric(), nullable=True),
        sa.Column('rank_abs', sa.Integer(), nullable=True),
        sa.Column('rank_pct', sa.Float(), nullable=True),
        sa.Column('matrix_uuid', sa.Text(), nullable=True),
        sa.Column('test_label_window', sa.Interval(), nullable=True),
        sa.ForeignKeyConstraint(['model_id'], ['results.models.model_id'], ),
        sa.PrimaryKeyConstraint('model_id', 'entity_id', 'as_of_date'),
        schema='results'
    )
    # ### end Alembic commands ###
项目:triage    作者:dssg    | 项目源码 | 文件源码
def upgrade():
    op.alter_column(
        table_name='individual_importances',
        column_name='importance_score',
        type_=sa.Float(),
        schema='results',
        postgresql_using='importance_score::double precision'
    )
项目:triage    作者:dssg    | 项目源码 | 文件源码
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.add_column(
        'individual_importances',
        sa.Column('feature_value', sa.Float(), nullable=True),
        schema='results',
    )
    # ### end Alembic commands ###
项目:zun    作者:openstack    | 项目源码 | 文件源码
def upgrade():
    op.add_column('compute_node',
                  sa.Column('cpus', sa.Integer(), nullable=False))
    op.add_column('compute_node',
                  sa.Column('cpu_used', sa.Float(), nullable=False))
项目:Dallinger    作者:Dallinger    | 项目源码 | 文件源码
def proportion(self):
        """Make proportion queryable."""
        return cast(self.property4, Float)
项目:Dallinger    作者:Dallinger    | 项目源码 | 文件源码
def fitness(self):
        """Retrieve fitness via property1."""
        return cast(self.property1, Float)
项目:Url    作者:beiruan    | 项目源码 | 文件源码
def __init__(self, url):
        self.table = Table(self.__tablename__, MetaData(),
                           Column('name', String(64), primary_key=True),
                           Column('group', String(64)),
                           Column('status', String(16)),
                           Column('script', Text),
                           Column('comments', String(1024)),
                           Column('rate', Float(11)),
                           Column('burst', Float(11)),
                           Column('updatetime', Float(32)),
                           mysql_engine='InnoDB',
                           mysql_charset='utf8'
                           )

        self.url = make_url(url)
        if self.url.database:
            database = self.url.database
            self.url.database = None
            try:
                engine = create_engine(self.url, convert_unicode=True, pool_recycle=3600)
                conn = engine.connect()
                conn.execute("commit")
                conn.execute("CREATE DATABASE %s" % database)
            except sqlalchemy.exc.SQLAlchemyError:
                pass
            self.url.database = database
        self.engine = create_engine(url, convert_unicode=True, pool_recycle=3600)
        self.table.create(self.engine, checkfirst=True)
项目:PilosusBot    作者:pilosus    | 项目源码 | 文件源码
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.add_column('sentiments', sa.Column('score', sa.Float(), nullable=True))
    op.drop_column('sentiments', 'is_negative')
    # ### end Alembic commands ###
项目:CodeGra.de    作者:CodeGra-de    | 项目源码 | 文件源码
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table('AssignmentAssignedGrader',
                    sa.Column('weight', sa.Float(), nullable=False),
                    sa.Column('User_id', sa.Integer(), nullable=False),
                    sa.Column('Assignment_id', sa.Integer(), nullable=False),
                    sa.ForeignKeyConstraint(['Assignment_id'], ['Assignment.id'], ondelete='CASCADE'),
                    sa.ForeignKeyConstraint(['User_id'], ['User.id'], ondelete='CASCADE'),
                    sa.PrimaryKeyConstraint('Assignment_id', 'User_id')
    )
    # ### end Alembic commands ###
项目:CodeGra.de    作者:CodeGra-de    | 项目源码 | 文件源码
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table('GradeHistory',
    sa.Column('id', sa.Integer(), nullable=False),
    sa.Column('changed_at', sa.DateTime(), nullable=True),
    sa.Column('is_rubric', sa.Boolean(), nullable=True),
    sa.Column('grade', sa.Float(), nullable=True),
    sa.Column('passed_back', sa.Boolean(), nullable=True),
    sa.Column('Work_id', sa.Integer(), nullable=True),
    sa.Column('User_id', sa.Integer(), nullable=True),
    sa.ForeignKeyConstraint(['User_id'], ['User.id'], ondelete='CASCADE'),
    sa.ForeignKeyConstraint(['Work_id'], ['Work.id'], ),
    sa.PrimaryKeyConstraint('id')
    )
    # ### end Alembic commands ###
项目:incubator-airflow-old    作者:apache    | 项目源码 | 文件源码
def upgrade():
    # use batch_alter_table to support SQLite workaround
    with op.batch_alter_table("task_instance") as batch_op:
        batch_op.alter_column('duration',
                              existing_type=mysql.INTEGER(display_width=11),
                              type_=sa.Float(),
                              existing_nullable=True)
项目:GenomicsSampleAPIs    作者:Intel-HLS    | 项目源码 | 文件源码
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.add_column(
        'reference_set',
        sa.Column('tiledb_reference_offset_padding_factor', sa.Float(53), server_default=sa.text(u'1.10'), nullable=False))
    op.drop_column('reference_set', 'offset_factor')
    # drop and re-create trigger
    op.execute(
        'DROP TRIGGER IF EXISTS increment_next_column_in_reference_set ON reference CASCADE')
    padded_length = get_tiledb_padded_reference_length_string('NEW.length')
    op.execute('''\
    CREATE OR REPLACE FUNCTION increment_next_column_in_reference_set_pgsql()
      RETURNS trigger AS $increment_next_column_in_reference_set_pgsql$
    DECLARE
        updated_next_tiledb_column_offset bigint;
        padded_reference_length bigint;
    BEGIN
        padded_reference_length = %s;
        UPDATE reference_set SET next_tiledb_column_offset=
            CASE
                WHEN NEW.tiledb_column_offset IS NULL THEN next_tiledb_column_offset+padded_reference_length
                WHEN NEW.tiledb_column_offset+padded_reference_length>next_tiledb_column_offset THEN NEW.tiledb_column_offset+padded_reference_length
                ELSE next_tiledb_column_offset
            END
        WHERE id = NEW.reference_set_id RETURNING next_tiledb_column_offset INTO updated_next_tiledb_column_offset;
        IF NEW.tiledb_column_offset IS NULL THEN
            NEW.tiledb_column_offset = updated_next_tiledb_column_offset-padded_reference_length;
        END IF;
        RETURN NEW;
    END;
    $increment_next_column_in_reference_set_pgsql$ LANGUAGE plpgsql;
    CREATE TRIGGER increment_next_column_in_reference_set BEFORE INSERT ON reference
    FOR EACH ROW EXECUTE PROCEDURE increment_next_column_in_reference_set_pgsql();
    ''' % (padded_length))
    ### end Alembic commands ###
项目:GenomicsSampleAPIs    作者:Intel-HLS    | 项目源码 | 文件源码
def downgrade():
    ### commands auto generated by Alembic - please adjust! ###
    # Downgrade trigger by dropping the trigger and downgrading the trigger
    # function
    op.execute(
        'DROP TRIGGER IF EXISTS increment_next_column_in_reference_set ON reference CASCADE')
    padded_length = get_tiledb_padded_reference_length_string_default('NEW.length')
    op.execute('''\
    CREATE OR REPLACE FUNCTION increment_next_column_in_reference_set_pgsql()
      RETURNS trigger AS $increment_next_column_in_reference_set_pgsql$
    DECLARE
        updated_next_tiledb_column_offset bigint;
    BEGIN
        UPDATE reference_set SET next_tiledb_column_offset=
            CASE
                WHEN NEW.tiledb_column_offset IS NULL THEN next_tiledb_column_offset+%s
                WHEN NEW.tiledb_column_offset+%s>next_tiledb_column_offset THEN NEW.tiledb_column_offset+%s
                ELSE next_tiledb_column_offset
            END
        WHERE id = NEW.reference_set_id RETURNING next_tiledb_column_offset INTO updated_next_tiledb_column_offset;
        IF NEW.tiledb_column_offset IS NULL THEN
            NEW.tiledb_column_offset = updated_next_tiledb_column_offset-%s;
        END IF;
        RETURN NEW;
    END;
    $increment_next_column_in_reference_set_pgsql$ LANGUAGE plpgsql;
    CREATE TRIGGER increment_next_column_in_reference_set BEFORE INSERT ON reference
    FOR EACH ROW EXECUTE PROCEDURE increment_next_column_in_reference_set_pgsql();
    ''' % (padded_length, padded_length, padded_length, padded_length))

    op.add_column(
        'reference_set',
        sa.Column('offset_factor',sa.Float(53),autoincrement=False,nullable=True))
    op.drop_column('reference_set', 'tiledb_reference_offset_padding_factor')
    ### end Alembic commands ###
项目:GenomicsSampleAPIs    作者:Intel-HLS    | 项目源码 | 文件源码
def upgrade():
    op.create_table(
        'reference_set',
        sa.Column('id', sa.BigInteger, primary_key=True),
        sa.Column('guid', sa.String(36), nullable=False, unique=True),
        sa.Column('md5_checksum', sa.String(32)),
        sa.Column('description', sa.Text),
        sa.Column('source_uri', sa.Text),
        sa.Column('is_derived', sa.Boolean),
        sa.Column('ncbi_taxon_id', sa.Integer),
        sa.Column('assembly_id', sa.String(100)),
        sa.Column('offset_factor', sa.Float)
    )
项目:marvin    作者:sdss    | 项目源码 | 文件源码
def HybridMag(flux_parameter, band, index=None):
    """Returns a hybrid property describing an asinh magnitude.

    ``flux_parameter`` must be a column with a flux in nanomaggies. ``band`` is
    the band name, to determine the softening parameter. If ``flux_parameter``
    is and array, ``index`` defines the position of ``band`` within the array.

    """

    @hybrid_property
    def hybridMag(self):
        if index is not None:
            flux = getattr(self, flux_parameter)[index]
        else:
            flux = getattr(self, flux_parameter)

        flux *= 1e-9  # From nanomaggies to maggies
        bb_band = bb[band]
        asinh_mag = -2.5 / np.log(10) * (np.arcsinh(flux / (2. * bb_band)) + np.log(bb_band))
        return asinh_mag

    @hybridMag.expression
    def hybridMag(cls):
        if index is not None:
            # It needs to be index + 1 because Postgresql arrays are 1-indexed.
            flux = getattr(cls, flux_parameter)[index + 1]
        else:
            flux = getattr(cls, flux_parameter)

        flux *= 1e-9
        bb_band = bb[band]
        xx = flux / (2. * bb_band)
        asinh_mag = (-2.5 / func.log(10) *
                     (func.log(xx + func.sqrt(func.pow(xx, 2) + 1)) + func.log(bb_band)))
        return cast(asinh_mag, Float)

    return hybridMag
项目:marvin    作者:sdss    | 项目源码 | 文件源码
def logmass(parameter):

    @hybrid_property
    def mass(self):
        par = getattr(self, parameter)
        return math.log10(par) if par > 0. else 0.

    @mass.expression
    def mass(cls):
        par = getattr(cls, parameter)
        return cast(case([(par > 0., func.log(par)),
                          (par == 0., 0.)]), Float)

    return mass
项目:marvin    作者:sdss    | 项目源码 | 文件源码
def HybridRatio(line1, line2):
    ''' produces emission line ratio hybrid properties '''

    @hybrid_property
    def hybridRatio(self):

        if type(line1) == tuple:
            myline1 = getattr(self, line1[0])+getattr(self, line1[1])
        else:
            myline1 = getattr(self, line1)

        if getattr(self, line2) > 0:
            return myline1/getattr(self, line2)
        else:
            return -999.

    @hybridRatio.expression
    def hybridRatio(cls):

        if type(line1) == tuple:
            myline1 = getattr(cls, line1[0])+getattr(cls, line1[1])
        else:
            myline1 = getattr(cls, line1)

        return cast(case([(getattr(cls, line2) > 0., myline1/getattr(cls, line2)),
                          (getattr(cls, line2) == 0., -999.)]), Float)

    return hybridRatio
项目:floranet    作者:Fluent-networks    | 项目源码 | 文件源码
def upgrade():
    op.drop_column('devices', 'snr1')
    op.drop_column('devices', 'snr2')
    op.drop_column('devices', 'snr3')
    op.drop_column('devices', 'snr4')
    op.drop_column('devices', 'snr5')
    op.drop_column('devices', 'snr6')
    op.drop_column('devices', 'snr7')
    op.drop_column('devices', 'snr8')
    op.drop_column('devices', 'snr9')
    op.drop_column('devices', 'snr10')
    op.drop_column('devices', 'snr11')
    op.add_column('devices',
        sa.Column('snr', sa.dialects.postgresql.ARRAY(sa.Float())))
项目:floranet    作者:Fluent-networks    | 项目源码 | 文件源码
def upgrade():
    op.add_column('devices',
        sa.Column('adr_datr', sa.String(), nullable=True))
    op.add_column('devices',
        sa.Column('snr_pointer', sa.Integer(), nullable=True))
    op.add_column('devices',
        sa.Column('snr_average', sa.Float(), nullable=True))    
    op.add_column('devices',
        sa.Column('snr1', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr2', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr3', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr4', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr5', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr6', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr7', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr8', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr9', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr10', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('snr11', sa.Float(), nullable=True))
    op.add_column('devices',
        sa.Column('fcnterror', sa.Boolean(), nullable=False, default=False))
    op.add_column('devices',
        sa.Column('created', sa.DateTime(timezone=True)))
    op.add_column('devices',
        sa.Column('updated', sa.DateTime(timezone=True)))
项目:to-vendendo    作者:anapaulagomes    | 项目源码 | 文件源码
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table('categories',
    sa.Column('id', sa.Integer(), nullable=False),
    sa.Column('name', sa.String(length=100), nullable=False),
    sa.PrimaryKeyConstraint('id')
    )
    op.create_index(op.f('ix_categories_name'), 'categories', ['name'], unique=True)
    op.create_table('items',
    sa.Column('id', sa.Integer(), nullable=False),
    sa.Column('name', sa.String(length=100), nullable=False),
    sa.Column('description', sa.String(length=255), nullable=True),
    sa.Column('manufacturer', sa.String(length=100), nullable=True),
    sa.Column('price', sa.Float(), nullable=False),
    sa.Column('age', sqlalchemy_utils.types.choice.ChoiceType(Item.TYPES), nullable=True),
    sa.Column('available_on', sa.DateTime(), nullable=True),
    sa.Column('quantity', sa.Integer(), nullable=False),
    sa.PrimaryKeyConstraint('id')
    )
    op.create_index(op.f('ix_items_name'), 'items', ['name'], unique=True)
    op.create_table('users',
    sa.Column('id', sa.Integer(), nullable=False),
    sa.Column('name', sa.String(length=100), nullable=True),
    sa.Column('email', sqlalchemy_utils.types.email.EmailType(), nullable=True),
    sa.Column('phone_number', sqlalchemy_utils.types.phone_number.PhoneNumberType(), nullable=True),
    sa.Column('_password', sqlalchemy_utils.types.password.PasswordType(), nullable=True),
    sa.PrimaryKeyConstraint('id'),
    sa.UniqueConstraint('email')
    )
    op.create_table('items_categories',
    sa.Column('category_id', sa.Integer(), nullable=True),
    sa.Column('item_id', sa.Integer(), nullable=True),
    sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
    sa.ForeignKeyConstraint(['item_id'], ['items.id'], )
    )
    op.drop_table('category')
    # ### end Alembic commands ###
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_double_precision(self):
        V = 1.23456789101112131415

        df = DataFrame({'f32': Series([V, ], dtype='float32'),
                        'f64': Series([V, ], dtype='float64'),
                        'f64_as_f32': Series([V, ], dtype='float64'),
                        'i32': Series([5, ], dtype='int32'),
                        'i64': Series([5, ], dtype='int64'),
                        })

        df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
                  dtype={'f64_as_f32': sqlalchemy.Float(precision=23)})
        res = sql.read_sql_table('test_dtypes', self.conn)

        # check precision of float64
        self.assertEqual(np.round(df['f64'].iloc[0], 14),
                         np.round(res['f64'].iloc[0], 14))

        # check sql types
        meta = sqlalchemy.schema.MetaData(bind=self.conn)
        meta.reflect()
        col_dict = meta.tables['test_dtypes'].columns
        self.assertEqual(str(col_dict['f32'].type),
                         str(col_dict['f64_as_f32'].type))
        self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))
        self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))
        self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))
        self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))