1
1
#!/usr/bin/env python
2
2
#coding: utf-8
3
3
4
+ import shutil
4
5
import os
5
6
import contextlib
6
7
import sys
9
10
import subprocess
10
11
import difflib
11
12
12
- repo_dir = os .path .abspath (os .path .join ('../..' , os .path .dirname (__file__ )))
13
+ my_dir = os .path .dirname (os .path .abspath (__file__ ))
14
+ repo_dir = os .path .abspath (os .path .join (my_dir , '../../' ))
15
+ print (repo_dir )
13
16
14
17
compilation = '''
15
18
make USE_PGXS=1 clean
31
34
32
35
CREATE TABLE range_rel (
33
36
id SERIAL PRIMARY KEY,
34
- dt TIMESTAMP,
37
+ dt TIMESTAMP not null ,
35
38
txt TEXT);
36
39
CREATE INDEX ON range_rel (dt);
37
40
INSERT INTO range_rel (dt, txt)
49
52
INSERT INTO improved_dummy_test1 (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g;
50
53
SELECT create_range_partitions('improved_dummy_test1', 'id', 1, 10);
51
54
INSERT INTO improved_dummy_test1 (name) VALUES ('test'); /* spawns new partition */
52
- ALTER TABLE improved_dummy_1 ADD CHECK (name != 'ib'); /* make improved_dummy_1 disappear */
55
+ ALTER TABLE improved_dummy_test1 ADD CHECK (name != 'ib');
53
56
54
57
CREATE TABLE test_improved_dummy_test2 (val INT NOT NULL);
55
58
SELECT create_range_partitions('test_improved_dummy_test2', 'val',
61
64
SELECT create_range_partitions('insert_into_select', 'val', 1, 20);
62
65
CREATE TABLE insert_into_select_copy (LIKE insert_into_select); /* INSERT INTO ... SELECT ... */
63
66
64
- # just a lot of actions
67
+ -- just a lot of actions
65
68
66
69
SELECT split_range_partition('num_range_rel_1', 500);
67
70
SELECT split_range_partition('range_rel_1', '2015-01-15'::DATE);
81
84
SELECT append_range_partition('range_rel');
82
85
SELECT prepend_range_partition('range_rel');
83
86
SELECT drop_range_partition('range_rel_7');
84
- SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE);
85
87
SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE);
86
88
87
- CREATE TABLE range_rel_archive (LIKE range_rel INCLUDING ALL);
88
- SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE);
89
- SELECT attach_range_partition('range_rel', 'range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE);
90
- SELECT detach_range_partition('range_rel_archive');
91
-
92
- CREATE TABLE range_rel_test1 (
93
- id SERIAL PRIMARY KEY,
94
- dt TIMESTAMP,
95
- txt TEXT,
96
- abc INTEGER);
97
- SELECT attach_range_partition('range_rel', 'range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE);
98
- CREATE TABLE range_rel_test2 (
99
- id SERIAL PRIMARY KEY,
100
- dt TIMESTAMP);
101
- SELECT attach_range_partition('range_rel', 'range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE);
102
-
103
- /* Half open ranges */
104
- SELECT add_range_partition('range_rel', NULL, '2014-12-01'::DATE, 'range_rel_minus_infinity');
105
- SELECT add_range_partition('range_rel', '2015-06-01'::DATE, NULL, 'range_rel_plus_infinity');
106
- SELECT append_range_partition('range_rel');
107
- SELECT prepend_range_partition('range_rel');
108
-
109
89
CREATE TABLE range_rel_minus_infinity (LIKE range_rel INCLUDING ALL);
110
90
SELECT attach_range_partition('range_rel', 'range_rel_minus_infinity', NULL, '2014-12-01'::DATE);
111
91
INSERT INTO range_rel (dt) VALUES ('2012-06-15');
112
92
INSERT INTO range_rel (dt) VALUES ('2015-12-15');
113
93
114
- CREATE TABLE zero(
115
- id SERIAL PRIMARY KEY,
116
- value INT NOT NULL);
117
- INSERT INTO zero SELECT g, g FROM generate_series(1, 100) as g;
118
- SELECT create_range_partitions('zero', 'value', 50, 10, 0);
119
- SELECT append_range_partition('zero', 'zero_0');
120
- SELECT prepend_range_partition('zero', 'zero_1');
121
- SELECT add_range_partition('zero', 50, 70, 'zero_50');
122
- SELECT append_range_partition('zero', 'zero_appended');
123
- SELECT prepend_range_partition('zero', 'zero_prepended');
124
- SELECT split_range_partition('zero_50', 60, 'zero_60');
125
-
126
94
CREATE TABLE hash_rel_extern (LIKE hash_rel INCLUDING ALL);
127
95
SELECT replace_hash_partition('hash_rel_0', 'hash_rel_extern');
128
96
140
108
141
109
/* CaMeL cAsE table names and attributes */
142
110
CREATE TABLE "TeSt" (a INT NOT NULL, b INT);
143
- SELECT create_hash_partitions('TeSt', 'a', 3);
144
111
SELECT create_hash_partitions('"TeSt"', 'a', 3);
145
112
INSERT INTO "TeSt" VALUES (1, 1);
146
113
INSERT INTO "TeSt" VALUES (2, 2);
163
130
value INTEGER NOT NULL);
164
131
INSERT INTO hash_rel_next1 (value) SELECT g FROM generate_series(1, 10000) as g;
165
132
SELECT create_hash_partitions('hash_rel_next1', 'value', 3);
166
-
167
- CREATE TABLE range_rel_next1 (
168
- id SERIAL PRIMARY KEY,
169
- dt TIMESTAMP NOT NULL,
170
- value INTEGER);
171
- INSERT INTO range_rel_next1 (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g;
172
- SELECT create_range_partitions('range_rel_next1', 'dt', '2010-01-01'::date, '1 month'::interval, 12);
173
- SELECT merge_range_partitions('range_rel_1', 'range_rel_2');
174
- SELECT split_range_partition('range_rel_1', '2010-02-15'::date);
175
- SELECT append_range_partition('range_rel_next1');
176
- SELECT prepend_range_partition('range_rel_next1');
177
133
'''
178
134
179
135
@contextlib .contextmanager
@@ -188,6 +144,10 @@ def cwd(path):
188
144
print ("cwd:" , curdir )
189
145
os .chdir (curdir )
190
146
147
+ def shell (cmd ):
148
+ print (cmd )
149
+ subprocess .check_output (cmd , shell = True )
150
+
191
151
dump1_file = '/tmp/dump1.sql'
192
152
dump2_file = '/tmp/dump2.sql'
193
153
@@ -198,12 +158,17 @@ def cwd(path):
198
158
199
159
args = parser .parse_args ()
200
160
201
- with open ('dump_pathman_objects.sql' ) as f :
161
+ with open (os . path . join ( my_dir , 'dump_pathman_objects.sql' ), 'r ' ) as f :
202
162
dump_sql = f .read ()
203
163
204
- with cwd (repo_dir ):
205
- subprocess .check_output ("git checkout %s" % args .branches [0 ], shell = True )
206
- subprocess .check_output (compilation , shell = True )
164
+ shutil .rmtree ('/tmp/pg_pathman' )
165
+ shutil .copytree (repo_dir , '/tmp/pg_pathman' )
166
+
167
+ with cwd ('/tmp/pg_pathman' ):
168
+ shell ("git clean -fdx" )
169
+ shell ("git reset --hard" )
170
+ shell ("git checkout %s" % args .branches [0 ])
171
+ shell (compilation )
207
172
208
173
with testgres .get_new_node ('updated' ) as node :
209
174
node .init ()
@@ -214,22 +179,24 @@ def cwd(path):
214
179
node .dump (dump1_file , 'postgres' )
215
180
node .stop ()
216
181
217
- subprocess .check_output ("git checkout %s" % args .branches [1 ], shell = True )
218
- subprocess .check_output (compilation , shell = True )
182
+ shell ("git clean -fdx" )
183
+ shell ("git checkout %s" % args .branches [1 ])
184
+ shell (compilation )
219
185
220
186
version = None
221
187
with open ('pg_pathman.control' ) as f :
222
188
for line in f .readlines ():
223
189
if line .startswith ('default_version' ):
224
- version = line .split ('=' ).strip ()
190
+ version = line .split ('=' )[ 1 ] .strip ()
225
191
226
192
if version is None :
227
193
print ("cound not find version in second branch" )
228
194
exit (1 )
229
195
230
196
node .start ()
231
- node .safe_psql ("postgres" , "alter extension pg_pathman update to %s" % version )
232
- dumped_objects_old = node .safe_psql ("postgres" , dump_sql )
197
+ p = subprocess .Popen (["psql" , "postgres" ], stdin = subprocess .PIPE ,
198
+ stdout = subprocess .PIPE )
199
+ dumped_objects_old = p .communicate (input = dump_sql .encode ())[0 ].decode ()
233
200
node .stop ()
234
201
235
202
# now make clean install
@@ -238,7 +205,9 @@ def cwd(path):
238
205
node .append_conf ("shared_preload_libraries='pg_pathman'\n " )
239
206
node .start ()
240
207
node .safe_psql ('postgres' , run_sql )
241
- dumped_objects_new = node .safe_psql ("postgres" , dump_sql )
208
+ p = subprocess .Popen (["psql" , "postgres" ], stdin = subprocess .PIPE ,
209
+ stdout = subprocess .PIPE )
210
+ dumped_objects_new = p .communicate (input = dump_sql .encode ())[0 ].decode ()
242
211
node .dump (dump2_file , 'postgres' )
243
212
244
213
# check dumps
@@ -249,5 +218,9 @@ def cwd(path):
249
218
node .restore (dump2_file , 'd2' )
250
219
node .stop ()
251
220
252
- if dumped_objects != dumped_objects_new :
253
- pass
221
+ if dumped_objects_old != dumped_objects_new :
222
+ print ("\n DIFF:" )
223
+ for line in difflib .context_diff (dumped_objects_old .split ('\n ' ), dumped_objects_new .split ('\n ' )):
224
+ print (line )
225
+ else :
226
+ print ("\n UPDATE CHECK: ALL GOOD" )
0 commit comments