|
3 | 3 | from cloudquery.sdk import plugin
|
4 | 4 | from cloudquery.sdk import message
|
5 | 5 | from cloudquery.sdk import schema
|
6 |
| -from typing import List, Generator, Dict |
| 6 | +from cloudquery.sdk.scheduler import Scheduler, TableResolver |
| 7 | +from typing import List, Generator, Dict, Any |
7 | 8 | import pyarrow as pa
|
8 | 9 | from cloudquery.sdk.schema.table import Table
|
| 10 | +from cloudquery.sdk.schema.arrow import METADATA_TABLE_NAME |
9 | 11 | from cloudquery.sdk.types import JSONType
|
10 | 12 | from dataclasses import dataclass, field
|
11 | 13 |
|
12 | 14 | NAME = "memdb"
|
13 | 15 | VERSION = "development"
|
14 | 16 |
|
15 | 17 |
|
| 18 | +class Client: |
| 19 | + def __init__(self) -> None: |
| 20 | + pass |
| 21 | + |
| 22 | + def id(self): |
| 23 | + return "memdb" |
| 24 | + |
| 25 | + |
| 26 | +class MemDBResolver(TableResolver): |
| 27 | + def __init__( |
| 28 | + self, table: Table, records: List, child_resolvers: list[TableResolver] = None |
| 29 | + ) -> None: |
| 30 | + super().__init__(table=table, child_resolvers=child_resolvers) |
| 31 | + self._records = records |
| 32 | + |
| 33 | + def resolve(self, client: None, parent_resource) -> Generator[Any, None, None]: |
| 34 | + for record in self._records: |
| 35 | + yield record |
| 36 | + |
| 37 | + |
| 38 | +class Table1Relation1(Table): |
| 39 | + def __init__(self) -> None: |
| 40 | + super().__init__( |
| 41 | + name="table_1_relation_1", |
| 42 | + columns=[ |
| 43 | + schema.Column( |
| 44 | + name="name", |
| 45 | + type=pa.string(), |
| 46 | + primary_key=True, |
| 47 | + not_null=True, |
| 48 | + unique=True, |
| 49 | + ), |
| 50 | + schema.Column(name="data", type=JSONType()), |
| 51 | + ], |
| 52 | + title="Table 1 Relation 1", |
| 53 | + description="Test Table 1 Relation 1", |
| 54 | + ) |
| 55 | + |
| 56 | + @property |
| 57 | + def resolver(self): |
| 58 | + return MemDBResolver( |
| 59 | + self, |
| 60 | + records=[ |
| 61 | + {"name": "a", "data": {"a": 1}}, |
| 62 | + {"name": "b", "data": {"b": 2}}, |
| 63 | + {"name": "c", "data": {"c": 3}}, |
| 64 | + ], |
| 65 | + ) |
| 66 | + |
| 67 | + |
| 68 | +class Table1(Table): |
| 69 | + def __init__(self) -> None: |
| 70 | + super().__init__( |
| 71 | + name="table_1", |
| 72 | + columns=[ |
| 73 | + schema.Column( |
| 74 | + name="name", |
| 75 | + type=pa.string(), |
| 76 | + primary_key=True, |
| 77 | + not_null=True, |
| 78 | + unique=True, |
| 79 | + ), |
| 80 | + schema.Column( |
| 81 | + name="id", |
| 82 | + type=pa.int64(), |
| 83 | + primary_key=True, |
| 84 | + not_null=True, |
| 85 | + unique=True, |
| 86 | + incremental_key=True, |
| 87 | + ), |
| 88 | + ], |
| 89 | + title="Table 1", |
| 90 | + description="Test Table 1", |
| 91 | + is_incremental=True, |
| 92 | + relations=[Table1Relation1()], |
| 93 | + ) |
| 94 | + |
| 95 | + @property |
| 96 | + def resolver(self): |
| 97 | + child_resolvers: list[TableResolver] = [] |
| 98 | + for rel in self.relations: |
| 99 | + child_resolvers.append(rel.resolver) |
| 100 | + |
| 101 | + return MemDBResolver( |
| 102 | + self, |
| 103 | + records=[ |
| 104 | + {"name": "a", "id": 1}, |
| 105 | + {"name": "b", "id": 2}, |
| 106 | + {"name": "c", "id": 3}, |
| 107 | + ], |
| 108 | + child_resolvers=child_resolvers, |
| 109 | + ) |
| 110 | + |
| 111 | + |
| 112 | +class Table2(Table): |
| 113 | + def __init__(self) -> None: |
| 114 | + super().__init__( |
| 115 | + name="table_2", |
| 116 | + columns=[ |
| 117 | + schema.Column( |
| 118 | + name="name", |
| 119 | + type=pa.string(), |
| 120 | + primary_key=True, |
| 121 | + not_null=True, |
| 122 | + unique=True, |
| 123 | + ), |
| 124 | + schema.Column(name="id", type=pa.int64()), |
| 125 | + ], |
| 126 | + title="Table 2", |
| 127 | + description="Test Table 2", |
| 128 | + ) |
| 129 | + |
| 130 | + @property |
| 131 | + def resolver(self): |
| 132 | + return MemDBResolver( |
| 133 | + self, |
| 134 | + records=[ |
| 135 | + {"name": "a", "id": 1}, |
| 136 | + {"name": "b", "id": 2}, |
| 137 | + {"name": "c", "id": 3}, |
| 138 | + ], |
| 139 | + ) |
| 140 | + |
| 141 | + |
16 | 142 | @dataclass
|
17 | 143 | class Spec:
|
18 |
| - abc: str = field(default="abc") |
| 144 | + concurrency: int = field(default=1000) |
| 145 | + queue_size: int = field(default=1000) |
19 | 146 |
|
20 | 147 |
|
21 | 148 | class MemDB(plugin.Plugin):
|
22 | 149 | def __init__(self) -> None:
|
23 | 150 | super().__init__(
|
24 | 151 | NAME, VERSION, opts=plugin.plugin.Options(team="cloudquery", kind="source")
|
25 | 152 | )
|
26 |
| - self._db: Dict[str, pa.RecordBatch] = {} |
| 153 | + table1 = Table1() |
| 154 | + table2 = Table2() |
27 | 155 | self._tables: Dict[str, schema.Table] = {
|
28 |
| - "table_1": schema.Table( |
29 |
| - name="table_1", |
30 |
| - columns=[ |
31 |
| - schema.Column( |
32 |
| - name="name", |
33 |
| - type=pa.string(), |
34 |
| - primary_key=True, |
35 |
| - not_null=True, |
36 |
| - unique=True, |
37 |
| - ), |
38 |
| - schema.Column( |
39 |
| - name="id", |
40 |
| - type=pa.string(), |
41 |
| - primary_key=True, |
42 |
| - not_null=True, |
43 |
| - unique=True, |
44 |
| - incremental_key=True, |
45 |
| - ), |
46 |
| - ], |
47 |
| - title="Table 1", |
48 |
| - description="Test Table 1", |
49 |
| - is_incremental=True, |
50 |
| - relations=[ |
51 |
| - schema.Table( |
52 |
| - name="table_1_relation_1", |
53 |
| - columns=[ |
54 |
| - schema.Column( |
55 |
| - name="name", |
56 |
| - type=pa.string(), |
57 |
| - primary_key=True, |
58 |
| - not_null=True, |
59 |
| - unique=True, |
60 |
| - ), |
61 |
| - schema.Column(name="data", type=JSONType()), |
62 |
| - ], |
63 |
| - title="Table 1 Relation 1", |
64 |
| - description="Test Table 1 Relation 1", |
65 |
| - ) |
66 |
| - ], |
67 |
| - ), |
68 |
| - "table_2": schema.Table( |
69 |
| - name="table_2", |
70 |
| - columns=[ |
71 |
| - schema.Column( |
72 |
| - name="name", |
73 |
| - type=pa.string(), |
74 |
| - primary_key=True, |
75 |
| - not_null=True, |
76 |
| - unique=True, |
77 |
| - ), |
78 |
| - schema.Column(name="id", type=pa.string()), |
79 |
| - ], |
80 |
| - title="Table 2", |
81 |
| - description="Test Table 2", |
82 |
| - ), |
| 156 | + table1.name: table1, |
| 157 | + table2.name: table2, |
83 | 158 | }
|
| 159 | + self._db: List[pa.RecordBatch] = [] |
| 160 | + self._client = Client() |
| 161 | + |
| 162 | + def set_logger(self, logger) -> None: |
| 163 | + self._logger = logger |
84 | 164 |
|
85 | 165 | def init(self, spec, no_connection: bool = False):
|
86 | 166 | if no_connection:
|
87 | 167 | return
|
88 | 168 | self._spec_json = json.loads(spec)
|
89 | 169 | self._spec = Spec(**self._spec_json)
|
| 170 | + self._scheduler = Scheduler( |
| 171 | + concurrency=self._spec.concurrency, |
| 172 | + queue_size=self._spec.queue_size, |
| 173 | + logger=self._logger, |
| 174 | + ) |
90 | 175 |
|
91 | 176 | def get_tables(self, options: plugin.TableOptions = None) -> List[plugin.Table]:
|
92 | 177 | tables = list(self._tables.values())
|
| 178 | + |
| 179 | + # set parent table relationships |
| 180 | + for table in tables: |
| 181 | + for relation in table.relations: |
| 182 | + relation.parent = table |
| 183 | + |
93 | 184 | return schema.filter_dfs(tables, options.tables, options.skip_tables)
|
94 | 185 |
|
95 | 186 | def sync(
|
96 | 187 | self, options: plugin.SyncOptions
|
97 | 188 | ) -> Generator[message.SyncMessage, None, None]:
|
98 |
| - for table, record in self._db.items(): |
99 |
| - yield message.SyncInsertMessage(record) |
| 189 | + resolvers: list[TableResolver] = [] |
| 190 | + for table in self.get_tables( |
| 191 | + plugin.TableOptions( |
| 192 | + tables=options.tables, |
| 193 | + skip_tables=options.skip_tables, |
| 194 | + skip_dependent_tables=options.skip_dependent_tables, |
| 195 | + ) |
| 196 | + ): |
| 197 | + resolvers.append(table.resolver) |
| 198 | + |
| 199 | + return self._scheduler.sync( |
| 200 | + self._client, resolvers, options.deterministic_cq_id |
| 201 | + ) |
100 | 202 |
|
101 | 203 | def write(self, writer: Generator[message.WriteMessage, None, None]) -> None:
|
102 | 204 | for msg in writer:
|
103 | 205 | if isinstance(msg, message.WriteMigrateTableMessage):
|
104 |
| - if msg.table.name not in self._db: |
105 |
| - self._db[msg.table.name] = msg.table |
106 |
| - self._tables[msg.table.name] = msg.table |
| 206 | + pass |
107 | 207 | elif isinstance(msg, message.WriteInsertMessage):
|
108 |
| - table = schema.Table.from_arrow_schema(msg.record.schema) |
109 |
| - self._db[table.name] = msg.record |
| 208 | + self._db.append(msg.record) |
110 | 209 | else:
|
111 | 210 | raise NotImplementedError(f"Unknown message type {type(msg)}")
|
112 | 211 |
|
113 | 212 | def read(self, table: Table) -> Generator[message.ReadMessage, None, None]:
|
114 |
| - for table, record in self._db.items(): |
115 |
| - yield message.ReadMessage(record) |
| 213 | + for record in self._db: |
| 214 | + recordMetadata = record.schema.metadata.get(METADATA_TABLE_NAME).decode( |
| 215 | + "utf-8" |
| 216 | + ) |
| 217 | + if recordMetadata == table.name: |
| 218 | + yield message.ReadMessage(record) |
116 | 219 |
|
117 | 220 | def close(self) -> None:
|
118 | 221 | self._db = {}
|
0 commit comments