Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 6f0cf7e

Browse files
committed
Consolidate a bunch of CVS or RCS logs read from stdin.
1 parent c4d6c4d commit 6f0cf7e

1 file changed

Lines changed: 124 additions & 0 deletions

File tree

Tools/scripts/logmerge.py

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
#! /usr/bin/env python
2+
3+
"""Consolidate a bunch of CVS or RCS logs read from stdin.
4+
5+
Input should be the output of a CVS or RCS logging command, e.g.
6+
7+
cvs log -rrelease14
8+
9+
which dumps all log messages from release1.4 upwards (assuming that
10+
release 1.4 was tagged with tag 'release14').
11+
12+
This collects all the revision records and outputs them sorted by date
13+
rather than by file, collapsing duplicate revision record, i.e.,
14+
records with the same message for different files.
15+
16+
The -t option causes it to truncate (discard) the last revision log
17+
entry; this is useful when using something like the above cvs log
18+
command, which shows the revisions including the given tag, while you
19+
probably want everything *since* that tag.
20+
21+
XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7.
22+
23+
"""
24+
25+
import os, sys, getopt, string, re
26+
27+
sep1 = '='*77 + '\n' # file separator
28+
sep2 = '-'*28 + '\n' # revision separator
29+
30+
def main():
31+
"""Main program"""
32+
truncate_last = 0
33+
opts, args = getopt.getopt(sys.argv[1:], "-t")
34+
for o, a in opts:
35+
if o == '-t':
36+
truncate_last = 1
37+
database = []
38+
while 1:
39+
chunk = read_chunk(sys.stdin)
40+
if not chunk:
41+
break
42+
records = digest_chunk(chunk)
43+
if truncate_last:
44+
del records[-1]
45+
database[len(database):] = records
46+
database.sort()
47+
database.reverse()
48+
format_output(database)
49+
50+
def read_chunk(fp):
51+
"""Read a chunk -- data for one file, ending with sep1.
52+
53+
Split the chunk in parts separated by sep2.
54+
55+
"""
56+
chunk = []
57+
lines = []
58+
while 1:
59+
line = fp.readline()
60+
if not line:
61+
break
62+
if line == sep1:
63+
if lines:
64+
chunk.append(lines)
65+
break
66+
if line == sep2:
67+
if lines:
68+
chunk.append(lines)
69+
lines = []
70+
else:
71+
lines.append(line)
72+
return chunk
73+
74+
def digest_chunk(chunk):
75+
"""Digest a chunk -- extrach working file name and revisions"""
76+
lines = chunk[0]
77+
key = 'Working file:'
78+
keylen = len(key)
79+
for line in lines:
80+
if line[:keylen] == key:
81+
working_file = string.strip(line[keylen:])
82+
break
83+
else:
84+
working_file = None
85+
records = []
86+
for lines in chunk[1:]:
87+
revline = lines[0]
88+
dateline = lines[1]
89+
text = lines[2:]
90+
words = string.split(dateline)
91+
if len(words) >= 3 and words[0] == 'date:':
92+
dateword = words[1]
93+
timeword = words[2]
94+
if timeword[-1:] == ';':
95+
timeword = timeword[:-1]
96+
date = dateword + ' ' + timeword
97+
else:
98+
date = None
99+
text.insert(0, revline)
100+
words = string.split(revline)
101+
if len(words) >= 2 and words[0] == 'revision':
102+
rev = words[1]
103+
else:
104+
rev = None
105+
text.insert(0, revline)
106+
records.append((date, working_file, rev, text))
107+
return records
108+
109+
def format_output(database):
110+
prevtext = None
111+
prev = []
112+
database.append((None, None, None, None)) # Sentinel
113+
for (date, working_file, rev, text) in database:
114+
if text != prevtext:
115+
if prev:
116+
print sep2,
117+
for (date, working_file, rev) in prev:
118+
print date, working_file
119+
sys.stdout.writelines(prevtext)
120+
prev = []
121+
prev.append((date, working_file, rev))
122+
prevtext = text
123+
124+
main()

0 commit comments

Comments
 (0)