Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions plugins/source/postgresql/client/cdc.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
"strings"
"time"

"github.com/cloudquery/plugin-sdk/v2/schema"
"github.com/cloudquery/plugin-sdk/v3/schema"
"github.com/jackc/pglogrepl"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
Expand Down Expand Up @@ -282,7 +282,11 @@ func (c *Client) resourceFromCDCValues(tableName string, values map[string]any)
table := c.Tables.Get(tableName)
resource := schema.NewResourceData(table, nil, values)
for _, col := range table.Columns {
if err := resource.Set(col.Name, values[col.Name]); err != nil {
v, err := prepareValueForResourceSet(col, values[col.Name])
if err != nil {
return nil, err
}
if err := resource.Set(col.Name, v); err != nil {
return nil, err
}
}
Expand Down
4 changes: 2 additions & 2 deletions plugins/source/postgresql/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ import (
"strings"

"github.com/cloudquery/plugin-pb-go/specs"
"github.com/cloudquery/plugin-sdk/v2/plugins/source"
"github.com/cloudquery/plugin-sdk/v2/schema"
"github.com/cloudquery/plugin-sdk/v3/plugins/source"
"github.com/cloudquery/plugin-sdk/v3/schema"
pgx_zero_log "github.com/jackc/pgx-zerolog"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
Expand Down
42 changes: 24 additions & 18 deletions plugins/source/postgresql/client/list_tables.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,18 @@ import (
"context"
"fmt"

"github.com/cloudquery/plugin-sdk/v2/schema"
"github.com/cloudquery/plugin-sdk/v3/schema"
)

// this returns the following table in sorted manner:
// +----------------+-------------+-------------+------------+---------------+-----------+---------------------+
// | ordinal_position | table_name | column_name | data_type | is_primary_key| not_null | pk_constraint_name |
// +----------------+-------------+-------------+------------+---------------+-----------+---------------------+
// | 1 | users | id | bigint | YES | true | cq_users_pk |
// | 2 | users | name | text | NO | false | |
// | 3 | users | email | text | NO | true | cq_users_pk |
// | 1 | posts | id | bigint | YES | true | cq_posts_pk |
// | 2 | posts | title | text | NO | false | |
// +----------------+-------------+-------------+------------+----------------+-----------+-----------+---------------------+
// | ordinal_position | table_name | column_name | data_type | is_primary_key | not_null | is_unique | pk_constraint_name |
// +----------------+-------------+-------------+------------+----------------+-----------+-----------+---------------------+
// | 1 | users | id | bigint | YES | true | true | cq_users_pk |
// | 2 | users | name | text | NO | false | false | |
// | 3 | users | email | text | NO | true | false | cq_users_pk |
// | 1 | posts | id | bigint | YES | true | true | cq_posts_pk |
// | 2 | posts | title | text | NO | false | false | |
const selectTables = `
SELECT
columns.ordinal_position AS ordinal_position,
Expand All @@ -30,6 +30,10 @@ SELECT
WHEN pg_attribute.attnotnull THEN true
ELSE false
END AS not_null,
CASE
WHEN pg_index.indisunique THEN true
ELSE false
END AS is_unique,
COALESCE(pg_constraint.conname, '') AS primary_key_constraint_name
FROM
pg_catalog.pg_attribute
Expand All @@ -41,6 +45,8 @@ FROM
pg_catalog.pg_constraint ON pg_constraint.conrelid = pg_attribute.attrelid
AND conkey IS NOT NULL AND array_position(conkey, pg_attribute.attnum) > 0
AND contype = 'p'
LEFT JOIN pg_catalog.pg_index ON pg_index.indrelid = pg_attribute.attrelid
AND pg_index.indkey::text LIKE '%%' || pg_attribute.attnum || '%%'
INNER JOIN
information_schema.columns ON columns.table_name = pg_class.relname AND columns.column_name = pg_attribute.attname AND columns.table_schema = pg_catalog.pg_namespace.nspname
WHERE
Expand All @@ -53,16 +59,17 @@ ORDER BY

func (c *Client) listTables(ctx context.Context) (schema.Tables, error) {
var tables schema.Tables
rows, err := c.Conn.Query(ctx, fmt.Sprintf(selectTables, c.currentSchemaName))
q := fmt.Sprintf(selectTables, c.currentSchemaName)
rows, err := c.Conn.Query(ctx, q)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var ordinalPosition int
var tableName, columnName, columnType, pkName string
var isPrimaryKey, notNull bool
if err := rows.Scan(&ordinalPosition, &tableName, &columnName, &columnType, &isPrimaryKey, &notNull, &pkName); err != nil {
var isPrimaryKey, notNull, isUnique bool
if err := rows.Scan(&ordinalPosition, &tableName, &columnName, &columnType, &isPrimaryKey, &notNull, &isUnique, &pkName); err != nil {
return nil, err
}
if ordinalPosition == 1 {
Expand All @@ -76,12 +83,11 @@ func (c *Client) listTables(ctx context.Context) (schema.Tables, error) {
table.PkConstraintName = pkName
}
table.Columns = append(table.Columns, schema.Column{
Name: columnName,
CreationOptions: schema.ColumnCreationOptions{
PrimaryKey: isPrimaryKey,
NotNull: notNull,
},
Type: c.PgToSchemaType(columnType),
Name: columnName,
PrimaryKey: isPrimaryKey,
NotNull: notNull,
Unique: isUnique,
Type: c.PgToSchemaType(columnType),
})
}
return tables, nil
Expand Down
129 changes: 95 additions & 34 deletions plugins/source/postgresql/client/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,19 @@ package client

import (
"context"
"database/sql/driver"
"errors"
"fmt"
"strings"

"github.com/cloudquery/plugin-sdk/v2/plugins/source"
"github.com/cloudquery/plugin-sdk/v2/schema"
"github.com/apache/arrow/go/v13/arrow"
"github.com/cloudquery/plugin-sdk/v3/plugins/source"
"github.com/cloudquery/plugin-sdk/v3/schema"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
)

func (c *Client) Sync(ctx context.Context, metrics *source.Metrics, res chan<- *schema.Resource) error {
// var conn *pgconn.PgConn
var err error
var snapshotName string
c.metrics = metrics
Expand Down Expand Up @@ -57,35 +59,6 @@ func (c *Client) Sync(ctx context.Context, metrics *source.Metrics, res chan<- *
return nil
}

func (c *Client) syncTable(ctx context.Context, tx pgx.Tx, table *schema.Table, res chan<- *schema.Resource) error {
colNames := make([]string, len(table.Columns))
for i, col := range table.Columns {
colNames[i] = pgx.Identifier{col.Name}.Sanitize()
}
query := "SELECT " + strings.Join(colNames, ",") + " FROM " + pgx.Identifier{table.Name}.Sanitize()
rows, err := tx.Query(ctx, query)
if err != nil {
c.metrics.TableClient[table.Name][c.ID()].Errors++
return err
}
defer rows.Close()
for rows.Next() {
values, err := rows.Values()
if err != nil {
c.metrics.TableClient[table.Name][c.ID()].Errors++
return err
}
resource, err := c.resourceFromValues(table.Name, values)
if err != nil {
c.metrics.TableClient[table.Name][c.ID()].Errors++
return err
}
c.metrics.TableClient[table.Name][c.ID()].Resources++
res <- resource
}
return nil
}

func (c *Client) syncTables(ctx context.Context, snapshotName string, res chan<- *schema.Resource) error {
tx, err := c.Conn.BeginTx(ctx, pgx.TxOptions{
// this transaction is needed for us to take a snapshot and we need to close it only at the end of the initial sync
Expand Down Expand Up @@ -123,13 +96,101 @@ func (c *Client) syncTables(ctx context.Context, snapshotName string, res chan<-
return nil
}

func (c *Client) syncTable(ctx context.Context, tx pgx.Tx, table *schema.Table, res chan<- *schema.Resource) error {
colNames := make([]string, 0, len(table.Columns)-2)
for _, col := range table.Columns {
colNames = append(colNames, pgx.Identifier{col.Name}.Sanitize())
}
query := "SELECT " + strings.Join(colNames, ",") + " FROM " + pgx.Identifier{table.Name}.Sanitize()
rows, err := tx.Query(ctx, query)
if err != nil {
c.metrics.TableClient[table.Name][c.ID()].Errors++
return err
}
defer rows.Close()
for rows.Next() {
values, err := rows.Values()
if err != nil {
c.metrics.TableClient[table.Name][c.ID()].Errors++
return err
}
resource, err := c.resourceFromValues(table.Name, values)
if err != nil {
c.metrics.TableClient[table.Name][c.ID()].Errors++
return err
}

c.metrics.TableClient[table.Name][c.ID()].Resources++

res <- resource
}
return nil
}

func (c *Client) resourceFromValues(tableName string, values []any) (*schema.Resource, error) {
table := c.Tables.Get(tableName)
resource := schema.NewResourceData(table, nil, values)
for i, col := range table.Columns {
if err := resource.Set(col.Name, values[i]); err != nil {
var i int
for _, col := range table.Columns {
v, err := prepareValueForResourceSet(col, values[i])
if err != nil {
return nil, err
}
if err := resource.Set(col.Name, v); err != nil {
return nil, err
}
i++
}
return resource, nil
}

func prepareValueForResourceSet(col schema.Column, v any) (any, error) {
switch tp := col.Type.(type) {
case *arrow.StringType:
if value, ok := v.(driver.Valuer); ok {
if value == driver.Valuer(nil) {
v = nil
} else {
val, err := value.Value()
if err != nil {
return nil, err
}
if s, ok := val.(string); ok {
v = s
}
}
}
case *arrow.Time32Type:
t, err := v.(pgtype.Time).TimeValue()
if err != nil {
return nil, err
}
v = stringForTime(t, tp.Unit)
case *arrow.Time64Type:
t, err := v.(pgtype.Time).TimeValue()
if err != nil {
return nil, err
}
v = stringForTime(t, tp.Unit)
}
return v, nil
}

func stringForTime(t pgtype.Time, unit arrow.TimeUnit) string {
extra := ""
hour := t.Microseconds / 1e6 / 60 / 60
minute := t.Microseconds / 1e6 / 60 % 60
second := t.Microseconds / 1e6 % 60
micros := t.Microseconds % 1e6
switch unit {
case arrow.Millisecond:
extra = fmt.Sprintf(".%03d", (micros)/1e3)
case arrow.Microsecond:
extra = fmt.Sprintf(".%06d", micros)
case arrow.Nanosecond:
// postgres doesn't support nanosecond precision
extra = fmt.Sprintf(".%06d", micros)
}

return fmt.Sprintf("%02d:%02d:%02d"+extra, hour, minute, second)
}
Loading