Something went wrong on our end
-
Françoise Conil authoredfdbdd01b
basic-data-select-with-core.1-4.py 34.77 KiB
"""
https://docs.sqlalchemy.org/en/14/tutorial/index.html#unified-tutorial
https://docs.sqlalchemy.org/en/14/tutorial/data_select.html
For both Core and ORM, the select() function generates a Select construct which
is used for all SELECT queries. Passed to methods like Connection.execute() in
Core and Session.execute() in ORM, a SELECT statement is emitted in the current
transaction and the result rows available via the returned Result object.
"""
from operator import add
from sqlalchemy import (
JSON,
Column,
ForeignKey,
Integer,
MetaData,
String,
Table,
and_,
bindparam,
create_engine,
dialects,
func,
insert,
literal_column,
or_,
select,
text,
union_all,
)
from sqlalchemy.dialects import oracle, postgresql
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.functions import user
metadata = MetaData()
user_table = Table(
"user_account",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30)),
Column("fullname", String),
)
# When using the ForeignKey object within a Column definition, we can omit
# the datatype for that Column; it is automatically inferred from that of
# the related column.
address_table = Table(
"address",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", ForeignKey("user_account.id"), nullable=False),
Column("email_address", String, nullable=False),
)
def show_objects():
print(user_table.c.name)
print(user_table.c.keys())
print(user_table.primary_key)
def emit_database_DDL(engine):
# The DDL create process by default includes some SQLite-specific PRAGMA
# statements that test for the existence of each table before emitting a
# CREATE. The full series of steps are also included within a BEGIN/COMMIT
# pair to accommodate for transactional DDL (SQLite does actually support
# transactional DDL, however the sqlite3 database driver historically runs
# DDL in “autocommit” mode).
metadata.create_all(engine)
def insert_stmt(engine):
stmt = insert(user_table).values(
name="spongebob", fullname="Spongebob Squarepants"
)
with engine.connect() as conn:
result = conn.execute(stmt)
conn.commit()
print(f"result.inserted_primary_key: {result.inserted_primary_key}")
def insert_two_rows(engine):
with engine.connect() as conn:
result = conn.execute(
insert(user_table),
[
{"name": "sandy", "fullname": "Sandy Cheeks"},
{"name": "patrick", "fullname": "Patrick Star"},
],
)
conn.commit()
def scalar_subqueries(engine):
# see basic-insert-with-core.1-4.py
# https://docs.sqlalchemy.org/en/14/glossary.html#term-scalar-subquery
scalar_subquery = (
select(user_table.c.id)
.where(user_table.c.name == bindparam("username"))
.scalar_subquery()
)
with engine.connect() as conn:
result = conn.execute(
insert(address_table).values(user_id=scalar_subquery),
[
{
"username": "spongebob",
"email_address": "spongebob@sqlalchemy.org",
},
{"username": "sandy", "email_address": "sandy@sqlalchemy.org"},
{"username": "sandy", "email_address": "sandy@aol.com"},
{
"username": "patrick",
"email_address": "patrick@gmail.com",
},
],
)
conn.commit()
def insert_character_without_email(engine):
# Enter a character whithout email address
with engine.connect() as conn:
result = conn.execute(
insert(user_table),
[
{"name": "squidward", "fullname": "Squidward"},
],
)
conn.commit()
def select_stmt(engine):
# The select() construct builds up a statement in the same way as that of
# insert(), using a generative approach where each method builds more
# state onto the object. Like the other SQL constructs, it can be
# stringified in place.
stmt = select(user_table).where(user_table.c.name == "spongebob")
print(f"stmt: {stmt}")
# Also in the same manner as all other statement-level SQL constructs,
# to actually run the statement we pass it to an execution method. Since
# a SELECT statement returns rows we can always iterate the result object
# to get Row objects back.
with engine.connect() as conn:
for row in conn.execute(stmt):
print(row)
def select_columns_and_from(engine):
# he select() function accepts positional elements representing any number
# of Column and/or Table expressions
stmt = select(user_table)
print(f"stmt: {stmt}")
# To SELECT from individual columns using a Core approach, Column objects
# are accessed from the Table.c accessor and can be sent directly; the FROM
# clause will be inferred as the set of all Table and other FromClause
# objects that are represented by those columns.
stmt = select(user_table.c.name, user_table.c.fullname)
print(f"stmt: {stmt}")
def select_from_labeled_sql_expressions(engine):
# The ColumnElement.label() method as well as the same-named method
# available on ORM attributes provides a SQL label of a column or
# expression, allowing it to have a specific name in a result set.
# This can be helpful when referring to arbitrary SQL expressions in
# a result row by name.
# The label names we create may also be referred towards in the
# ORDER BY or GROUP BY clause of the Select.
stmt = select(
("Username: " + user_table.c.name).label("username"),
).order_by(user_table.c.name)
print(stmt)
with engine.connect() as conn:
for row in conn.execute(stmt):
print(f"{row.username}")
def select_with_textual_expressions(engine):
# Sometimes there is also the need to manufacture arbitrary SQL blocks
# inside of statements, such as constant string expressions, or just some
# arbitrary SQL that’s quicker to write literally.
#
# Note that in both cases, when using text() or literal_column(), we are
# writing a syntactical SQL expression, and not a literal value. We
# therefore have to include whatever quoting or syntaxes are necessary for
# the SQL we want to see rendered.
stmt = select(text("'some phrase'"), user_table.c.name).order_by(
user_table.c.name
)
with engine.connect() as conn:
print(conn.execute(stmt).all())
# In this common case we can get more functionality out of our textual
# fragment using the literal_column() construct instead. This object is
# similar to text() except that instead of representing arbitrary SQL of
# any form, it explicitly represents a single “column” and can then be
# labeled and referred towards in subqueries and other expressions.
stmt = select(
literal_column("'some phrase'").label("p"), user_table.c.name
).order_by(user_table.c.name)
with engine.connect() as conn:
for row in conn.execute(stmt):
print(f"{row.p}, {row.name} ")
def where_clause(engine):
# SQLAlchemy allows us to compose SQL expressions, such as name = 'squidward'
# or user_id > 10, by making use of standard Python operators in conjunction
# with Column and similar objects. For boolean expressions, most Python
# operators such as ==, !=, <, >= etc. generate new SQL Expression objects,
# rather than plain boolean True/False values.
print(user_table.c.name == "squidward")
print(address_table.c.user_id > 10)
# We can use expressions like these to generate the WHERE clause by passing
# the resulting objects to the Select.where() method.
print(select(user_table).where(user_table.c.name == "squidward"))
# To produce multiple expressions joined by AND, the Select.where() method
# may be invoked any number of times.
print(
select(address_table.c.email_address)
.where(user_table.c.name == "squidward")
.where(address_table.c.user_id == user_table.c.id)
)
# A single call to Select.where() also accepts multiple expressions with
# the same effect.
print(
select(address_table.c.email_address).where(
user_table.c.name == "squidward",
address_table.c.user_id == user_table.c.id,
)
)
# “AND” and “OR” conjunctions are both available directly using the and_()
# and or_() functions
print(
select(address_table.c.email_address).where(
and_(
or_(
user_table.c.name == "squidward",
user_table.c.name == "sandy",
),
address_table.c.user_id == user_table.c.id,
)
)
)
def explicit_from_clause_and_join(engine):
# As mentioned previously, the FROM clause is usually inferred based on the
# expressions that we are setting in the columns clause as well as other
# elements of the Select.
print(select(user_table.c.name))
# If we were to put columns from two tables, then we get a comma-separated
# FROM clause.
print(select(user_table.c.name, address_table.c.email_address))
# In order to JOIN these two tables together, we typically use one of two
# methods on Select. The first is the Select.join_from() method, which
# allows us to indicate the left and right side of the JOIN explicitly.
print(
select(user_table.c.name, address_table.c.email_address).join_from(
user_table, address_table
)
)
# The other is the the Select.join() method, which indicates only the right
# side of the JOIN, the left hand-side is inferred.
print(
select(user_table.c.name, address_table.c.email_address).join(
address_table
)
)
# We also have the option to add elements to the FROM clause explicitly, if
# it is not inferred the way we want from the columns clause. We use the
# Select.select_from() method to achieve this, as below where we establish
# user_table as the first element in the FROM clause and Select.join() to
# establish address_table as the second.
print(
select(user_table.c.name, address_table.c.email_address)
.select_from(user_table)
.join(address_table)
)
# Another example where we might want to use Select.select_from() is if
# our columns clause doesn’t have enough information to provide for a
# FROM clause.
print(select(func.count("*")).select_from(user_table))
def setting_on_clause(engine):
# If the left and right targets of the join do not have such a constraint,
# or there are multiple constraints in place, we need to specify the ON
# clause directly. Both Select.join() and Select.join_from() accept an
# additional argument for the ON clause, which is stated using the same SQL
# Expression mechanics as we saw about in The WHERE clause.
print(
select(address_table.c.email_address)
.select_from(user_table)
.join(address_table, user_table.c.id == address_table.c.user_id)
)
def outer_and_full_join(engine):
# Both the Select.join() and Select.join_from() methods accept keyword
# arguments Select.join.isouter and Select.join.full which will render
# LEFT OUTER JOIN and FULL OUTER JOIN, respectively.
#
# There is also a method Select.outerjoin() that is equivalent to using
# .join(..., isouter=True).
#
# SQL also has a “RIGHT OUTER JOIN”. SQLAlchemy doesn’t render this directly;
# instead, reverse the order of the tables and use “LEFT OUTER JOIN”.
print(select(user_table).join(address_table, isouter=True))
print(select(user_table).join(address_table, full=True))
def order_by_stmt(engine):
# The SELECT SQL statement includes a clause called ORDER BY which is used to
# return the selected rows within a given ordering.
print(select(user_table).order_by(user_table.c.name))
# Ascending / descending is available from the ColumnElement.asc() and
# ColumnElement.desc() modifiers, which are present from ORM-bound attributes
# as well.
# https://docs.sqlalchemy.org/en/14/core/tutorial.html#ordering-grouping-limiting-offset-ing
print(select(user_table).order_by(user_table.c.name.desc()))
def aggregate_functions(engine):
# In SQL, aggregate functions allow column expressions across multiple rows
# to be aggregated together to produce a single result. Examples include
# counting, computing averages, as well as locating the maximum or minimum
# value in a set of values.
#
# SQLAlchemy provides for SQL functions in an open-ended way using a
# namespace known as func.
count_fn = func.count(user_table.c.id)
print(count_fn)
def group_by_having_stmt(engine):
# The GROUP BY clause is constructed similarly to the ORDER BY clause, and has
# the purpose of sub-dividing the selected rows into specific groups upon which
# aggregate functions may be invoked. The HAVING clause is usually used with
# GROUP BY and is of a similar form to the WHERE clause, except that it’s
# applied to the aggregated functions used within groups.
# TUTORIAL ERROR : engine.connect() with ORM usage
with engine.connect() as conn:
result = conn.execute(
select(
user_table.c.name, func.count(address_table.c.id).label("count")
)
.join(address_table)
.group_by(user_table.c.name)
.having(func.count(address_table.c.id) > 1)
)
print(result.all())
def using_alias(engine):
# Now that we are selecting from multiple tables and using joins, we quickly
# run into the case where we need to refer to the same table mutiple times
# in the FROM clause of a statement. We accomplish this using SQL aliases.
user_alias_1 = user_table.alias()
user_alias_2 = user_table.alias()
# The SELECT statement below for example returns all unique pairs of user names
stmt = select(user_alias_1.c.name, user_alias_2.c.name).join_from(
user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id
)
print(f"stmt: {stmt}")
with engine.connect() as conn:
result = conn.execute(stmt)
print(result.all())
def using_subqueries(engine):
# A subquery in SQL is a SELECT statement that is rendered within
# parenthesis and placed within the context of an enclosing statement,
# typically a SELECT statement but not necessarily.
# SQLAlchemy uses the Subquery object to represent a subquery and the
# CTE to represent a CTE, usually obtained from the Select.subquery()
# and Select.cte() methods, respectively. Either object can be used as
# a FROM element inside of a larger select() construct.
subq = (
select(
func.count(address_table.c.id).label("count"),
address_table.c.user_id,
)
.group_by(address_table.c.user_id)
.subquery()
)
# Stringifying the subquery by itself without it being embedded inside
# of another Select or other statement produces the plain SELECT statement
# without any enclosing parenthesis.
print(f"subq: {subq}")
# The Subquery object behaves like any other FROM object such as a Table,
# notably that it includes a Subquery.c namespace of the columns which it
# selects.
print(select(subq.c.user_id, subq.c.count))
stmt = select(
user_table.c.name, user_table.c.fullname, subq.c.count
).join_from(user_table, subq)
print(f"stmt: {stmt}")
def using_cte(engine):
# Usage of the CTE construct in SQLAlchemy is virtually the same as how
# the Subquery construct is used. By changing the invocation of the
# Select.subquery() method to use Select.cte() instead, we can use the
# resulting object as a FROM element in the same way, but the SQL rendered
# is the very different common table expression syntax.
subq = (
select(
func.count(address_table.c.id).label("count"),
address_table.c.user_id,
)
.group_by(address_table.c.user_id)
.cte()
)
print(f"subq: {subq}")
# In both cases, the subquery and CTE were named at the SQL level using an
# “anonymous” name. In the Python code, we don’t need to provide these
# names at all.
# A name that will be rendered in the SQL can be provided by passing it as
# the first argument of the Select.subquery() or Select.cte() methods.
# Ex : subq = select( ... ).cte("new_table")
stmt = select(
user_table.c.name, user_table.c.fullname, subq.c.count
).join_from(user_table, subq)
print(f"stmt: {stmt}")
def scalar_and_correlated_subqueries(engine):
# A scalar subquery is a subquery that returns exactly zero or one row and
# exactly one column. The subquery is then used in the COLUMNS or WHERE
# clause of an enclosing SELECT statement and is different than a regular
# subquery in that it is not used in the FROM clause. A correlated subquery
# is a scalar subquery that refers to a table in the enclosing SELECT
# statement.
# Scalar subqueries are often, but not necessarily, used with aggregate
# functions.
subq = (
select(func.count(address_table.c.id))
.where(user_table.c.id == address_table.c.user_id)
.scalar_subquery()
)
print(f"subq: {subq}")
# The above subq object now falls within the ColumnElement SQL expression
# hierarchy, in that it may be used like any other column expression.
print(subq == 5)
# Although the scalar subquery by itself renders both user_account and
# address in its FROM clause when stringified by itself, when embedding
# it into an enclosing select() construct that deals with the
# user_account table, the user_account table is automatically correlated,
# meaning it does not render in the FROM clause of the subquery.
stmt = select(user_table.c.name, subq.label("address_count"))
print(f"stmt: {stmt}")
# Simple correlated subqueries will usually do the right thing that’s
# desired. However, in the case where the correlation is ambiguous,
# SQLAlchemy will let us know that more clarity is needed.
stmt = (
select(
user_table.c.name,
address_table.c.email_address,
subq.label("address_count"),
)
.join_from(user_table, address_table)
.order_by(user_table.c.id, address_table.c.id)
)
# print(f"stmt: {stmt}")
#
# sqlalchemy.exc.InvalidRequestError: Select statement
# '<sqlalchemy.sql.selectable.Select object at 0x7f5e4d313970>' returned
# no FROM clauses due to auto-correlation; specify correlate(<tables>) to
# control correlation manually.
# To specify that the user_table is the one we seek to correlate we specify
# this using the ScalarSelect.correlate() or ScalarSelect.correlate_except()
# methods.
subq = (
select(func.count(address_table.c.id))
.where(user_table.c.id == address_table.c.user_id)
.scalar_subquery()
.correlate(user_table)
)
with engine.connect() as conn:
result = conn.execute(
select(
user_table.c.name,
address_table.c.email_address,
subq.label("address_count"),
)
.join_from(user_table, address_table)
.order_by(user_table.c.id, address_table.c.id)
)
print(result.all())
def union_and_other_set_operations(engine):
# In SQL,SELECT statements can be merged together using the UNION or
# UNION ALL SQL operation, which produces the set of all rows produced by
# one or more statements together. Other set operations such as
# INTERSECT [ALL] and EXCEPT [ALL] are also possible.
# SQLAlchemy’s Select construct supports compositions of this nature using
# functions like union(), intersect() and except_(), and the “all”
# counterparts union_all(), intersect_all() and except_all().
# The construct produced by these functions is the CompoundSelect, which is
# used in the same manner as the Select construct, except that it has fewer
# methods.
stmt1 = select(user_table).where(user_table.c.name == "sandy")
stmt2 = select(user_table).where(user_table.c.name == "spongebob")
u = union_all(stmt1, stmt2)
with engine.connect() as conn:
result = conn.execute(u)
print(result.all())
# To use a CompoundSelect as a subquery, just like Select it provides a
# SelectBase.subquery() method which will produce a Subquery object with a
# FromClause.c collection that may be referred towards in an enclosing
# select().
u_subq = u.subquery()
stmt = (
select(u_subq.c.name, address_table.c.email_address)
.join_from(address_table, u_subq)
.order_by(u_subq.c.name, address_table.c.email_address)
)
with engine.connect() as conn:
result = conn.execute(stmt)
print(result.all())
def exists_subqueries(engine):
# The SQL EXISTS keyword is an operator that is used with scalar subqueries
# to return a boolean true or false depending on if the SELECT statement
# would return a row.
# Below we produce an EXISTS so that we can return user_account rows that
# have more than one related row in address.
subq = (
select(func.count(address_table.c.id))
.where(user_table.c.id == address_table.c.user_id)
.group_by(address_table.c.user_id)
.having(func.count(address_table.c.id) > 1)
).exists()
with engine.connect() as conn:
result = conn.execute(select(user_table.c.name).where(subq))
print(result.all())
# The EXISTS construct is more often than not used as a negation, e.g.
# NOT EXISTS, as it provides a SQL-efficient form of locating rows for
# which a related table has no rows. Below we select user names that have
# no email addresses; note the binary negation operator (~) used inside
# the second WHERE clause.
subq = (
select(address_table.c.id).where(
user_table.c.id == address_table.c.user_id
)
).exists()
with engine.connect() as conn:
result = conn.execute(select(user_table.c.name).where(~subq))
print(result.all())
def sql_functions(engine):
# The func object serves as a factory for creating new Function objects,
# which when used in a construct like select(), produce a SQL function
# display, typically consisting of a name, some parenthesis (although not
# always), and possibly some arguments.
print(select(func.count()).select_from(user_table))
stmt_lower = select(func.lower("A String With Much UPPERCASE"))
print(stmt_lower)
stmt_now = select(func.now())
with engine.connect() as conn:
result = conn.execute(stmt_lower)
print(result.all())
result = conn.execute(stmt_now)
print(result.all())
# As most database backends feature dozens if not hundreds of different
# SQL functions, func tries to be as liberal as possible in what it
# accepts. Any name that is accessed from this namespace is automatically
# considered to be a SQL function that will render in a generic way.
print(select(func.some_crazy_function(user_table.c.name, 17)))
# At the same time, a relatively small set of extremely common SQL
# functions such as count, now, max, concat include pre-packaged versions
# of themselves which provide for proper typing information as well as
# backend-specific SQL generation in some cases. The example below
# contrasts the SQL generation that occurs for the PostgreSQL dialect
# compared to the Oracle dialect for the now function.
print(
"postgresql dialect: ",
select(func.now()).compile(dialect=postgresql.dialect()),
)
print(
"oracle dialect: ", select(func.now()).compile(dialect=oracle.dialect())
)
def functions_return_types(engine):
# As functions are column expressions, they also have SQL datatypes that
# describe the data type of a generated SQL expression. We refer to these
# types here as “SQL return types”, in reference to the type of SQL value
# that is returned by the function in the context of a database-side SQL
# expression, as opposed to the “return type” of a Python function.
# The SQL return type of any SQL function may be accessed, typically for
# debugging purposes, by referring to the Function.type attribut.
print(f"func.now().type: {func.now().type}")
# The SQL return type of the function may also be significant when
# executing a statement and getting rows back, for those cases where
# SQLAlchemy has to apply result-set processing. A prime example of this
# are date-related functions on SQLite, where SQLAlchemy’s DateTime and
# related datatypes take on the role of converting from string values to
# Python datetime() objects as result rows are received.
with engine.connect() as conn:
result = conn.execute(func.now())
now_val = result.all()[0][0]
print(f"func.now(), value: {now_val}, type: {type(now_val)}")
# To apply a specific type to a function we’re creating, we pass it using
# the Function.type_ parameter; the type argument may be either a
# TypeEngine class or an instance. In the example below we pass the JSON
# class to generate the PostgreSQL json_object() function, noting that the
# SQL return type will be of type JSON.
function_expr = func.json_object('{a, 1, b, "def", c, 3.5}', type_=JSON)
# By creating our JSON function with the JSON datatype, the SQL expression
# object takes on JSON-related features, such as that of accessing elements.
stmt = select(function_expr["def"])
print(f"stmt: {stmt}")
def builtin_functions_return_types(engine):
# For common aggregate functions like count, max, min as well as a very
# small number of date functions like now and string functions like concat,
# the SQL return type is set up appropriately, sometimes based on usage.
# The max function and similar aggregate filtering functions will set up
# the SQL return type based on the argument given.
m1 = func.max(Column("some_int", Integer))
print(f"m1.type: {m1.type}")
m2 = func.max(Column("some_str", String))
print(f"m2.type: {m2.type}")
print(f"func.now().type: {func.now().type}")
print(f"func.current_date().type: {func.current_date().type}")
print(f"func.concat('x', 'y').type: {func.concat('x', 'y').type}")
# However, for the vast majority of SQL functions, SQLAlchemy does not have
# them explicitly present in its very small list of known functions. For
# example, while there is typically no issue using SQL functions func.lower()
# and func.upper() to convert the casing of strings, SQLAlchemy doesn’t
# actually know about these functions, so they have a “null” SQL return type.
print(f"func.upper('lowercase').type: {func.upper('lowercase').type}")
# For simple functions like upper and lower, the issue is not usually
# significant, as string values may be received from the database without
# any special type handling on the SQLAlchemy side, and SQLAlchemy’s type
# coercion rules can often correctly guess intent as well; the
# Python + operator for example will be correctly interpreted as the string
# concatenation operator based on looking at both sides of the expression.
print(select(func.upper("lowercase") + "suffix"))
# Overall, the scenario where the Function.type_ parameter is likely
# necessary is:
# 1. The function is not already a SQLAlchemy built-in function; this can
# be evidenced by creating the function and observing the Function.type
# attribute, that is
print(f"func.count().type: {func.count().type}")
# versus
j_obj_type = func.json_object('{"a", "b"}').type
print(f"j_obj_type: {j_obj_type}")
# 2. Function-aware expression support is needed; this most typically
# refers to special operators related to datatypes such as JSON or ARRAY
# 3. Result value processing is needed, which may include types such as
# DateTime, Boolean, Enum, or again special datatypes such as JSON, ARRAY
def using_window_functions(engine):
# A window function is a special use of a SQL aggregate function which
# calculates the aggregate value over the rows being returned in a group
# as the individual result rows are processed. Whereas a function like
# MAX() will give you the highest value of a column within a set of rows,
# using the same function as a “window function” will given you the highest
# value for each row, as of that row.
#
# In SQL, window functions allow one to specify the rows over which the
# function should be applied, a “partition” value which considers the window
# over different sub-sets of rows, and an “order by” expression which
# importantly indicates the order in which rows should be applied to the
# aggregate function.
#
# In SQLAlchemy, all SQL functions generated by the func namespace include
# a method FunctionElement.over() which grants the window function, or
# “OVER”, syntax; the construct produced is the Over construct.
#
# A common function used with window functions is the row_number() function
# which simply counts rows. We may partition this row count against user
# name to number the email addresses of individual users.
stmt = (
select(
func.row_number().over(partition_by=user_table.c.name),
user_table.c.name,
address_table.c.email_address,
)
.select_from(user_table)
.join(address_table)
)
with engine.connect() as conn:
result = conn.execute(stmt)
print(result.all())
# Above, the FunctionElement.over.partition_by parameter is used so that
# the PARTITION BY clause is rendered within the OVER clause. We also may
# make use of the ORDER BY clause using FunctionElement.over.order_by
stmt = (
select(
func.count().over(order_by=user_table.c.name),
user_table.c.name,
address_table.c.email_address,
)
.select_from(user_table)
.join(address_table)
)
with engine.connect() as conn:
result = conn.execute(stmt)
print(result.all())
# with engine.connect() as conn:
# result = conn.execute(
# select(user_table.c.name, address_table.c.email_address)
# .select_from(user_table)
# .join(address_table, isouter=True)
# )
# print(result.all())
def modifiers_within_group_filter(engine):
# The “WITHIN GROUP” SQL syntax is used in conjunction with an “ordered set”
# or a “hypothetical set” aggregate function. Common “ordered set” functions
# include percentile_cont() and rank(). SQLAlchemy includes built in
# implementations rank, dense_rank, mode, percentile_cont and percentile_disc
# which include a FunctionElement.within_group() method.
# L'UTILISATION N'EST PAS DEMONTREE (DEMONTRABLE ?) SUR CETTE BASE OU AVEC SQLITE ?
stmt = func.unnest(
func.percentile_disc([0.25, 0.5, 0.75, 1]).within_group(
user_table.c.name
)
)
print(stmt)
# “FILTER” is supported by some backends to limit the range of an aggregate
# function to a particular subset of rows compared to the total range of
# rows returned, available using the FunctionElement.filter() method.
stmt = (
select(
func.count(address_table.c.email_address).filter(
user_table.c.name == "sandy"
),
func.count(address_table.c.email_address).filter(
user_table.c.name == "spongebob"
),
)
.select_from(user_table)
.join(address_table)
)
with engine.connect() as conn:
result = conn.execute(stmt)
print(result.all())
def table_valued_functions(engine):
# Table-valued SQL functions support a scalar representation that contains
# named sub-elements. Often used for JSON and ARRAY-oriented functions as
# well as functions like generate_series(), the table-valued function is
# specified in the FROM clause, and is then referred towards as a table,
# or sometimes even as a column. Functions of this form are prominent within
# the PostgreSQL database, however some forms of table valued functions are
# also supported by SQLite, Oracle, and SQL Server.
#
# SQLAlchemy provides the FunctionElement.table_valued() method as the basic
# “table valued function” construct, which will convert a func object into a
# FROM clause containing a series of named columns, based on string names
# passed positionally.
#
# https://www.sqlite.org/json1.html#examples_using_json_each_and_json_tree_
onetwothree = func.json_each('["one", "two", "three"]').table_valued(
"value"
)
stmt = select(onetwothree).where(onetwothree.c.value.in_(["two", "three"]))
with engine.connect() as conn:
result = conn.execute(stmt)
print(result.all())
def column_valued_functions(engine):
# A special syntax supported by PostgreSQL and Oracle is that of referring
# towards a function in the FROM clause, which then delivers itself as a
# single column in the columns clause of a SELECT statement or other column
# expression context. PostgreSQL makes great use of this syntax for such
# functions as json_array_elements(), json_object_keys(), json_each_text(),
# json_each(), etc.
stmt = select(func.json_array_elements('["one", "two"]').column_valued("x"))
print(stmt)
if __name__ == "__main__":
show_objects()
dbname = "sqlite+pysqlite:///:memory:"
engine = create_engine(dbname, echo=True, future=True)
emit_database_DDL(engine)
insert_stmt(engine)
insert_two_rows(engine)
scalar_subqueries(engine)
insert_character_without_email(engine)
"""
select_stmt(engine)
select_columns_and_from(engine)
select_from_labeled_sql_expressions(engine)
select_with_textual_expressions(engine)
where_clause(engine)
explicit_from_clause_and_join(engine)
setting_on_clause(engine)
outer_and_full_join(engine)
order_by_stmt(engine)
aggregate_functions(engine)
group_by_having_stmt(engine)
using_alias(engine)
using_subqueries(engine)
using_cte(engine)
scalar_and_correlated_subqueries(engine)
union_and_other_set_operations(engine)
exists_subqueries(engine)
sql_functions(engine)
functions_return_types(engine)
builtin_functions_return_types(engine)
using_window_functions(engine)
modifiers_within_group_filter(engine)
table_valued_functions(engine)
"""
column_valued_functions(engine)