update windows build to Python 3.7
This commit is contained in:
parent
73105fa71e
commit
ddc59ab92d
5761 changed files with 750298 additions and 213405 deletions
25
Tools/demo/beer.py
Normal file
25
Tools/demo/beer.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
A Python version of the classic "bottles of beer on the wall" programming
|
||||
example.
|
||||
|
||||
By Guido van Rossum, demystified after a version by Fredrik Lundh.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
n = 100
|
||||
if sys.argv[1:]:
|
||||
n = int(sys.argv[1])
|
||||
|
||||
def bottle(n):
|
||||
if n == 0: return "no more bottles of beer"
|
||||
if n == 1: return "one bottle of beer"
|
||||
return str(n) + " bottles of beer"
|
||||
|
||||
for i in range(n, 0, -1):
|
||||
print(bottle(i), "on the wall,")
|
||||
print(bottle(i) + ".")
|
||||
print("Take one down, pass it around,")
|
||||
print(bottle(i-1), "on the wall.")
|
||||
146
Tools/demo/eiffel.py
Normal file
146
Tools/demo/eiffel.py
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Support Eiffel-style preconditions and postconditions for functions.
|
||||
|
||||
An example for Python metaclasses.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from types import FunctionType as function
|
||||
|
||||
class EiffelBaseMetaClass(type):
|
||||
|
||||
def __new__(meta, name, bases, dict):
|
||||
meta.convert_methods(dict)
|
||||
return super(EiffelBaseMetaClass, meta).__new__(
|
||||
meta, name, bases, dict)
|
||||
|
||||
@classmethod
|
||||
def convert_methods(cls, dict):
|
||||
"""Replace functions in dict with EiffelMethod wrappers.
|
||||
|
||||
The dict is modified in place.
|
||||
|
||||
If a method ends in _pre or _post, it is removed from the dict
|
||||
regardless of whether there is a corresponding method.
|
||||
"""
|
||||
# find methods with pre or post conditions
|
||||
methods = []
|
||||
for k, v in dict.items():
|
||||
if k.endswith('_pre') or k.endswith('_post'):
|
||||
assert isinstance(v, function)
|
||||
elif isinstance(v, function):
|
||||
methods.append(k)
|
||||
for m in methods:
|
||||
pre = dict.get("%s_pre" % m)
|
||||
post = dict.get("%s_post" % m)
|
||||
if pre or post:
|
||||
dict[m] = cls.make_eiffel_method(dict[m], pre, post)
|
||||
|
||||
|
||||
class EiffelMetaClass1(EiffelBaseMetaClass):
|
||||
# an implementation of the "eiffel" meta class that uses nested functions
|
||||
|
||||
@staticmethod
|
||||
def make_eiffel_method(func, pre, post):
|
||||
def method(self, *args, **kwargs):
|
||||
if pre:
|
||||
pre(self, *args, **kwargs)
|
||||
rv = func(self, *args, **kwargs)
|
||||
if post:
|
||||
post(self, rv, *args, **kwargs)
|
||||
return rv
|
||||
|
||||
if func.__doc__:
|
||||
method.__doc__ = func.__doc__
|
||||
|
||||
return method
|
||||
|
||||
|
||||
class EiffelMethodWrapper:
|
||||
|
||||
def __init__(self, inst, descr):
|
||||
self._inst = inst
|
||||
self._descr = descr
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self._descr.callmethod(self._inst, args, kwargs)
|
||||
|
||||
|
||||
class EiffelDescriptor:
|
||||
|
||||
def __init__(self, func, pre, post):
|
||||
self._func = func
|
||||
self._pre = pre
|
||||
self._post = post
|
||||
|
||||
self.__name__ = func.__name__
|
||||
self.__doc__ = func.__doc__
|
||||
|
||||
def __get__(self, obj, cls):
|
||||
return EiffelMethodWrapper(obj, self)
|
||||
|
||||
def callmethod(self, inst, args, kwargs):
|
||||
if self._pre:
|
||||
self._pre(inst, *args, **kwargs)
|
||||
x = self._func(inst, *args, **kwargs)
|
||||
if self._post:
|
||||
self._post(inst, x, *args, **kwargs)
|
||||
return x
|
||||
|
||||
|
||||
class EiffelMetaClass2(EiffelBaseMetaClass):
|
||||
# an implementation of the "eiffel" meta class that uses descriptors
|
||||
|
||||
make_eiffel_method = EiffelDescriptor
|
||||
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
|
||||
def testEiffelMetaClass1(self):
|
||||
self._test(EiffelMetaClass1)
|
||||
|
||||
def testEiffelMetaClass2(self):
|
||||
self._test(EiffelMetaClass2)
|
||||
|
||||
def _test(self, metaclass):
|
||||
class Eiffel(metaclass=metaclass):
|
||||
pass
|
||||
|
||||
class Test(Eiffel):
|
||||
def m(self, arg):
|
||||
"""Make it a little larger"""
|
||||
return arg + 1
|
||||
|
||||
def m2(self, arg):
|
||||
"""Make it a little larger"""
|
||||
return arg + 1
|
||||
|
||||
def m2_pre(self, arg):
|
||||
assert arg > 0
|
||||
|
||||
def m2_post(self, result, arg):
|
||||
assert result > arg
|
||||
|
||||
class Sub(Test):
|
||||
def m2(self, arg):
|
||||
return arg**2
|
||||
|
||||
def m2_post(self, Result, arg):
|
||||
super(Sub, self).m2_post(Result, arg)
|
||||
assert Result < 100
|
||||
|
||||
t = Test()
|
||||
self.assertEqual(t.m(1), 2)
|
||||
self.assertEqual(t.m2(1), 2)
|
||||
self.assertRaises(AssertionError, t.m2, 0)
|
||||
|
||||
s = Sub()
|
||||
self.assertRaises(AssertionError, s.m2, 1)
|
||||
self.assertRaises(AssertionError, s.m2, 10)
|
||||
self.assertEqual(s.m2(5), 25)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
154
Tools/demo/hanoi.py
Normal file
154
Tools/demo/hanoi.py
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Animated Towers of Hanoi using Tk with optional bitmap file in background.
|
||||
|
||||
Usage: hanoi.py [n [bitmapfile]]
|
||||
|
||||
n is the number of pieces to animate; default is 4, maximum 15.
|
||||
|
||||
The bitmap file can be any X11 bitmap file (look in /usr/include/X11/bitmaps for
|
||||
samples); it is displayed as the background of the animation. Default is no
|
||||
bitmap.
|
||||
"""
|
||||
|
||||
from tkinter import Tk, Canvas
|
||||
|
||||
# Basic Towers-of-Hanoi algorithm: move n pieces from a to b, using c
|
||||
# as temporary. For each move, call report()
|
||||
def hanoi(n, a, b, c, report):
|
||||
if n <= 0: return
|
||||
hanoi(n-1, a, c, b, report)
|
||||
report(n, a, b)
|
||||
hanoi(n-1, c, b, a, report)
|
||||
|
||||
|
||||
# The graphical interface
|
||||
class Tkhanoi:
|
||||
|
||||
# Create our objects
|
||||
def __init__(self, n, bitmap = None):
|
||||
self.n = n
|
||||
self.tk = tk = Tk()
|
||||
self.canvas = c = Canvas(tk)
|
||||
c.pack()
|
||||
width, height = tk.getint(c['width']), tk.getint(c['height'])
|
||||
|
||||
# Add background bitmap
|
||||
if bitmap:
|
||||
self.bitmap = c.create_bitmap(width//2, height//2,
|
||||
bitmap=bitmap,
|
||||
foreground='blue')
|
||||
|
||||
# Generate pegs
|
||||
pegwidth = 10
|
||||
pegheight = height//2
|
||||
pegdist = width//3
|
||||
x1, y1 = (pegdist-pegwidth)//2, height*1//3
|
||||
x2, y2 = x1+pegwidth, y1+pegheight
|
||||
self.pegs = []
|
||||
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
|
||||
self.pegs.append(p)
|
||||
x1, x2 = x1+pegdist, x2+pegdist
|
||||
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
|
||||
self.pegs.append(p)
|
||||
x1, x2 = x1+pegdist, x2+pegdist
|
||||
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
|
||||
self.pegs.append(p)
|
||||
self.tk.update()
|
||||
|
||||
# Generate pieces
|
||||
pieceheight = pegheight//16
|
||||
maxpiecewidth = pegdist*2//3
|
||||
minpiecewidth = 2*pegwidth
|
||||
self.pegstate = [[], [], []]
|
||||
self.pieces = {}
|
||||
x1, y1 = (pegdist-maxpiecewidth)//2, y2-pieceheight-2
|
||||
x2, y2 = x1+maxpiecewidth, y1+pieceheight
|
||||
dx = (maxpiecewidth-minpiecewidth) // (2*max(1, n-1))
|
||||
for i in range(n, 0, -1):
|
||||
p = c.create_rectangle(x1, y1, x2, y2, fill='red')
|
||||
self.pieces[i] = p
|
||||
self.pegstate[0].append(i)
|
||||
x1, x2 = x1 + dx, x2-dx
|
||||
y1, y2 = y1 - pieceheight-2, y2-pieceheight-2
|
||||
self.tk.update()
|
||||
self.tk.after(25)
|
||||
|
||||
# Run -- never returns
|
||||
def run(self):
|
||||
while 1:
|
||||
hanoi(self.n, 0, 1, 2, self.report)
|
||||
hanoi(self.n, 1, 2, 0, self.report)
|
||||
hanoi(self.n, 2, 0, 1, self.report)
|
||||
hanoi(self.n, 0, 2, 1, self.report)
|
||||
hanoi(self.n, 2, 1, 0, self.report)
|
||||
hanoi(self.n, 1, 0, 2, self.report)
|
||||
|
||||
# Reporting callback for the actual hanoi function
|
||||
def report(self, i, a, b):
|
||||
if self.pegstate[a][-1] != i: raise RuntimeError # Assertion
|
||||
del self.pegstate[a][-1]
|
||||
p = self.pieces[i]
|
||||
c = self.canvas
|
||||
|
||||
# Lift the piece above peg a
|
||||
ax1, ay1, ax2, ay2 = c.bbox(self.pegs[a])
|
||||
while 1:
|
||||
x1, y1, x2, y2 = c.bbox(p)
|
||||
if y2 < ay1: break
|
||||
c.move(p, 0, -1)
|
||||
self.tk.update()
|
||||
|
||||
# Move it towards peg b
|
||||
bx1, by1, bx2, by2 = c.bbox(self.pegs[b])
|
||||
newcenter = (bx1+bx2)//2
|
||||
while 1:
|
||||
x1, y1, x2, y2 = c.bbox(p)
|
||||
center = (x1+x2)//2
|
||||
if center == newcenter: break
|
||||
if center > newcenter: c.move(p, -1, 0)
|
||||
else: c.move(p, 1, 0)
|
||||
self.tk.update()
|
||||
|
||||
# Move it down on top of the previous piece
|
||||
pieceheight = y2-y1
|
||||
newbottom = by2 - pieceheight*len(self.pegstate[b]) - 2
|
||||
while 1:
|
||||
x1, y1, x2, y2 = c.bbox(p)
|
||||
if y2 >= newbottom: break
|
||||
c.move(p, 0, 1)
|
||||
self.tk.update()
|
||||
|
||||
# Update peg state
|
||||
self.pegstate[b].append(i)
|
||||
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
# First argument is number of pegs, default 4
|
||||
if sys.argv[1:]:
|
||||
n = int(sys.argv[1])
|
||||
else:
|
||||
n = 4
|
||||
|
||||
# Second argument is bitmap file, default none
|
||||
if sys.argv[2:]:
|
||||
bitmap = sys.argv[2]
|
||||
# Reverse meaning of leading '@' compared to Tk
|
||||
if bitmap[0] == '@': bitmap = bitmap[1:]
|
||||
else: bitmap = '@' + bitmap
|
||||
else:
|
||||
bitmap = None
|
||||
|
||||
# Create the graphical objects...
|
||||
h = Tkhanoi(n, bitmap)
|
||||
|
||||
# ...and run!
|
||||
h.run()
|
||||
|
||||
|
||||
# Call main when run as script
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
262
Tools/demo/life.py
Normal file
262
Tools/demo/life.py
Normal file
|
|
@ -0,0 +1,262 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
A curses-based version of Conway's Game of Life.
|
||||
|
||||
An empty board will be displayed, and the following commands are available:
|
||||
E : Erase the board
|
||||
R : Fill the board randomly
|
||||
S : Step for a single generation
|
||||
C : Update continuously until a key is struck
|
||||
Q : Quit
|
||||
Cursor keys : Move the cursor around the board
|
||||
Space or Enter : Toggle the contents of the cursor's position
|
||||
|
||||
Contributed by Andrew Kuchling, Mouse support and color by Dafydd Crosby.
|
||||
"""
|
||||
|
||||
import curses
|
||||
import random
|
||||
|
||||
|
||||
class LifeBoard:
|
||||
"""Encapsulates a Life board
|
||||
|
||||
Attributes:
|
||||
X,Y : horizontal and vertical size of the board
|
||||
state : dictionary mapping (x,y) to 0 or 1
|
||||
|
||||
Methods:
|
||||
display(update_board) -- If update_board is true, compute the
|
||||
next generation. Then display the state
|
||||
of the board and refresh the screen.
|
||||
erase() -- clear the entire board
|
||||
make_random() -- fill the board randomly
|
||||
set(y,x) -- set the given cell to Live; doesn't refresh the screen
|
||||
toggle(y,x) -- change the given cell from live to dead, or vice
|
||||
versa, and refresh the screen display
|
||||
|
||||
"""
|
||||
def __init__(self, scr, char=ord('*')):
|
||||
"""Create a new LifeBoard instance.
|
||||
|
||||
scr -- curses screen object to use for display
|
||||
char -- character used to render live cells (default: '*')
|
||||
"""
|
||||
self.state = {}
|
||||
self.scr = scr
|
||||
Y, X = self.scr.getmaxyx()
|
||||
self.X, self.Y = X - 2, Y - 2 - 1
|
||||
self.char = char
|
||||
self.scr.clear()
|
||||
|
||||
# Draw a border around the board
|
||||
border_line = '+' + (self.X * '-') + '+'
|
||||
self.scr.addstr(0, 0, border_line)
|
||||
self.scr.addstr(self.Y + 1, 0, border_line)
|
||||
for y in range(0, self.Y):
|
||||
self.scr.addstr(1 + y, 0, '|')
|
||||
self.scr.addstr(1 + y, self.X + 1, '|')
|
||||
self.scr.refresh()
|
||||
|
||||
def set(self, y, x):
|
||||
"""Set a cell to the live state"""
|
||||
if x < 0 or self.X <= x or y < 0 or self.Y <= y:
|
||||
raise ValueError("Coordinates out of range %i,%i" % (y, x))
|
||||
self.state[x, y] = 1
|
||||
|
||||
def toggle(self, y, x):
|
||||
"""Toggle a cell's state between live and dead"""
|
||||
if x < 0 or self.X <= x or y < 0 or self.Y <= y:
|
||||
raise ValueError("Coordinates out of range %i,%i" % (y, x))
|
||||
if (x, y) in self.state:
|
||||
del self.state[x, y]
|
||||
self.scr.addch(y + 1, x + 1, ' ')
|
||||
else:
|
||||
self.state[x, y] = 1
|
||||
if curses.has_colors():
|
||||
# Let's pick a random color!
|
||||
self.scr.attrset(curses.color_pair(random.randrange(1, 7)))
|
||||
self.scr.addch(y + 1, x + 1, self.char)
|
||||
self.scr.attrset(0)
|
||||
self.scr.refresh()
|
||||
|
||||
def erase(self):
|
||||
"""Clear the entire board and update the board display"""
|
||||
self.state = {}
|
||||
self.display(update_board=False)
|
||||
|
||||
def display(self, update_board=True):
|
||||
"""Display the whole board, optionally computing one generation"""
|
||||
M, N = self.X, self.Y
|
||||
if not update_board:
|
||||
for i in range(0, M):
|
||||
for j in range(0, N):
|
||||
if (i, j) in self.state:
|
||||
self.scr.addch(j + 1, i + 1, self.char)
|
||||
else:
|
||||
self.scr.addch(j + 1, i + 1, ' ')
|
||||
self.scr.refresh()
|
||||
return
|
||||
|
||||
d = {}
|
||||
self.boring = 1
|
||||
for i in range(0, M):
|
||||
L = range(max(0, i - 1), min(M, i + 2))
|
||||
for j in range(0, N):
|
||||
s = 0
|
||||
live = (i, j) in self.state
|
||||
for k in range(max(0, j - 1), min(N, j + 2)):
|
||||
for l in L:
|
||||
if (l, k) in self.state:
|
||||
s += 1
|
||||
s -= live
|
||||
if s == 3:
|
||||
# Birth
|
||||
d[i, j] = 1
|
||||
if curses.has_colors():
|
||||
# Let's pick a random color!
|
||||
self.scr.attrset(curses.color_pair(
|
||||
random.randrange(1, 7)))
|
||||
self.scr.addch(j + 1, i + 1, self.char)
|
||||
self.scr.attrset(0)
|
||||
if not live:
|
||||
self.boring = 0
|
||||
elif s == 2 and live:
|
||||
# Survival
|
||||
d[i, j] = 1
|
||||
elif live:
|
||||
# Death
|
||||
self.scr.addch(j + 1, i + 1, ' ')
|
||||
self.boring = 0
|
||||
self.state = d
|
||||
self.scr.refresh()
|
||||
|
||||
def make_random(self):
|
||||
"Fill the board with a random pattern"
|
||||
self.state = {}
|
||||
for i in range(0, self.X):
|
||||
for j in range(0, self.Y):
|
||||
if random.random() > 0.5:
|
||||
self.set(j, i)
|
||||
|
||||
|
||||
def erase_menu(stdscr, menu_y):
|
||||
"Clear the space where the menu resides"
|
||||
stdscr.move(menu_y, 0)
|
||||
stdscr.clrtoeol()
|
||||
stdscr.move(menu_y + 1, 0)
|
||||
stdscr.clrtoeol()
|
||||
|
||||
|
||||
def display_menu(stdscr, menu_y):
|
||||
"Display the menu of possible keystroke commands"
|
||||
erase_menu(stdscr, menu_y)
|
||||
|
||||
# If color, then light the menu up :-)
|
||||
if curses.has_colors():
|
||||
stdscr.attrset(curses.color_pair(1))
|
||||
stdscr.addstr(menu_y, 4,
|
||||
'Use the cursor keys to move, and space or Enter to toggle a cell.')
|
||||
stdscr.addstr(menu_y + 1, 4,
|
||||
'E)rase the board, R)andom fill, S)tep once or C)ontinuously, Q)uit')
|
||||
stdscr.attrset(0)
|
||||
|
||||
|
||||
def keyloop(stdscr):
|
||||
# Clear the screen and display the menu of keys
|
||||
stdscr.clear()
|
||||
stdscr_y, stdscr_x = stdscr.getmaxyx()
|
||||
menu_y = (stdscr_y - 3) - 1
|
||||
display_menu(stdscr, menu_y)
|
||||
|
||||
# If color, then initialize the color pairs
|
||||
if curses.has_colors():
|
||||
curses.init_pair(1, curses.COLOR_BLUE, 0)
|
||||
curses.init_pair(2, curses.COLOR_CYAN, 0)
|
||||
curses.init_pair(3, curses.COLOR_GREEN, 0)
|
||||
curses.init_pair(4, curses.COLOR_MAGENTA, 0)
|
||||
curses.init_pair(5, curses.COLOR_RED, 0)
|
||||
curses.init_pair(6, curses.COLOR_YELLOW, 0)
|
||||
curses.init_pair(7, curses.COLOR_WHITE, 0)
|
||||
|
||||
# Set up the mask to listen for mouse events
|
||||
curses.mousemask(curses.BUTTON1_CLICKED)
|
||||
|
||||
# Allocate a subwindow for the Life board and create the board object
|
||||
subwin = stdscr.subwin(stdscr_y - 3, stdscr_x, 0, 0)
|
||||
board = LifeBoard(subwin, char=ord('*'))
|
||||
board.display(update_board=False)
|
||||
|
||||
# xpos, ypos are the cursor's position
|
||||
xpos, ypos = board.X // 2, board.Y // 2
|
||||
|
||||
# Main loop:
|
||||
while True:
|
||||
stdscr.move(1 + ypos, 1 + xpos) # Move the cursor
|
||||
c = stdscr.getch() # Get a keystroke
|
||||
if 0 < c < 256:
|
||||
c = chr(c)
|
||||
if c in ' \n':
|
||||
board.toggle(ypos, xpos)
|
||||
elif c in 'Cc':
|
||||
erase_menu(stdscr, menu_y)
|
||||
stdscr.addstr(menu_y, 6, ' Hit any key to stop continuously '
|
||||
'updating the screen.')
|
||||
stdscr.refresh()
|
||||
# Activate nodelay mode; getch() will return -1
|
||||
# if no keystroke is available, instead of waiting.
|
||||
stdscr.nodelay(1)
|
||||
while True:
|
||||
c = stdscr.getch()
|
||||
if c != -1:
|
||||
break
|
||||
stdscr.addstr(0, 0, '/')
|
||||
stdscr.refresh()
|
||||
board.display()
|
||||
stdscr.addstr(0, 0, '+')
|
||||
stdscr.refresh()
|
||||
|
||||
stdscr.nodelay(0) # Disable nodelay mode
|
||||
display_menu(stdscr, menu_y)
|
||||
|
||||
elif c in 'Ee':
|
||||
board.erase()
|
||||
elif c in 'Qq':
|
||||
break
|
||||
elif c in 'Rr':
|
||||
board.make_random()
|
||||
board.display(update_board=False)
|
||||
elif c in 'Ss':
|
||||
board.display()
|
||||
else:
|
||||
# Ignore incorrect keys
|
||||
pass
|
||||
elif c == curses.KEY_UP and ypos > 0:
|
||||
ypos -= 1
|
||||
elif c == curses.KEY_DOWN and ypos + 1 < board.Y:
|
||||
ypos += 1
|
||||
elif c == curses.KEY_LEFT and xpos > 0:
|
||||
xpos -= 1
|
||||
elif c == curses.KEY_RIGHT and xpos + 1 < board.X:
|
||||
xpos += 1
|
||||
elif c == curses.KEY_MOUSE:
|
||||
mouse_id, mouse_x, mouse_y, mouse_z, button_state = curses.getmouse()
|
||||
if (mouse_x > 0 and mouse_x < board.X + 1 and
|
||||
mouse_y > 0 and mouse_y < board.Y + 1):
|
||||
xpos = mouse_x - 1
|
||||
ypos = mouse_y - 1
|
||||
board.toggle(ypos, xpos)
|
||||
else:
|
||||
# They've clicked outside the board
|
||||
curses.flash()
|
||||
else:
|
||||
# Ignore incorrect keys
|
||||
pass
|
||||
|
||||
|
||||
def main(stdscr):
|
||||
keyloop(stdscr) # Enter the main loop
|
||||
|
||||
if __name__ == '__main__':
|
||||
curses.wrapper(main)
|
||||
125
Tools/demo/markov.py
Normal file
125
Tools/demo/markov.py
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Markov chain simulation of words or characters.
|
||||
"""
|
||||
|
||||
class Markov:
|
||||
def __init__(self, histsize, choice):
|
||||
self.histsize = histsize
|
||||
self.choice = choice
|
||||
self.trans = {}
|
||||
|
||||
def add(self, state, next):
|
||||
self.trans.setdefault(state, []).append(next)
|
||||
|
||||
def put(self, seq):
|
||||
n = self.histsize
|
||||
add = self.add
|
||||
add(None, seq[:0])
|
||||
for i in range(len(seq)):
|
||||
add(seq[max(0, i-n):i], seq[i:i+1])
|
||||
add(seq[len(seq)-n:], None)
|
||||
|
||||
def get(self):
|
||||
choice = self.choice
|
||||
trans = self.trans
|
||||
n = self.histsize
|
||||
seq = choice(trans[None])
|
||||
while True:
|
||||
subseq = seq[max(0, len(seq)-n):]
|
||||
options = trans[subseq]
|
||||
next = choice(options)
|
||||
if not next:
|
||||
break
|
||||
seq += next
|
||||
return seq
|
||||
|
||||
|
||||
def test():
|
||||
import sys, random, getopt
|
||||
args = sys.argv[1:]
|
||||
try:
|
||||
opts, args = getopt.getopt(args, '0123456789cdwq')
|
||||
except getopt.error:
|
||||
print('Usage: %s [-#] [-cddqw] [file] ...' % sys.argv[0])
|
||||
print('Options:')
|
||||
print('-#: 1-digit history size (default 2)')
|
||||
print('-c: characters (default)')
|
||||
print('-w: words')
|
||||
print('-d: more debugging output')
|
||||
print('-q: no debugging output')
|
||||
print('Input files (default stdin) are split in paragraphs')
|
||||
print('separated blank lines and each paragraph is split')
|
||||
print('in words by whitespace, then reconcatenated with')
|
||||
print('exactly one space separating words.')
|
||||
print('Output consists of paragraphs separated by blank')
|
||||
print('lines, where lines are no longer than 72 characters.')
|
||||
sys.exit(2)
|
||||
histsize = 2
|
||||
do_words = False
|
||||
debug = 1
|
||||
for o, a in opts:
|
||||
if '-0' <= o <= '-9': histsize = int(o[1:])
|
||||
if o == '-c': do_words = False
|
||||
if o == '-d': debug += 1
|
||||
if o == '-q': debug = 0
|
||||
if o == '-w': do_words = True
|
||||
if not args:
|
||||
args = ['-']
|
||||
|
||||
m = Markov(histsize, random.choice)
|
||||
try:
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
f = sys.stdin
|
||||
if f.isatty():
|
||||
print('Sorry, need stdin from file')
|
||||
continue
|
||||
else:
|
||||
f = open(filename, 'r')
|
||||
if debug: print('processing', filename, '...')
|
||||
text = f.read()
|
||||
f.close()
|
||||
paralist = text.split('\n\n')
|
||||
for para in paralist:
|
||||
if debug > 1: print('feeding ...')
|
||||
words = para.split()
|
||||
if words:
|
||||
if do_words:
|
||||
data = tuple(words)
|
||||
else:
|
||||
data = ' '.join(words)
|
||||
m.put(data)
|
||||
except KeyboardInterrupt:
|
||||
print('Interrupted -- continue with data read so far')
|
||||
if not m.trans:
|
||||
print('No valid input files')
|
||||
return
|
||||
if debug: print('done.')
|
||||
|
||||
if debug > 1:
|
||||
for key in m.trans.keys():
|
||||
if key is None or len(key) < histsize:
|
||||
print(repr(key), m.trans[key])
|
||||
if histsize == 0: print(repr(''), m.trans[''])
|
||||
print()
|
||||
while True:
|
||||
data = m.get()
|
||||
if do_words:
|
||||
words = data
|
||||
else:
|
||||
words = data.split()
|
||||
n = 0
|
||||
limit = 72
|
||||
for w in words:
|
||||
if n + len(w) > limit:
|
||||
print()
|
||||
n = 0
|
||||
print(w, end=' ')
|
||||
n += len(w) + 1
|
||||
print()
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
test()
|
||||
82
Tools/demo/mcast.py
Normal file
82
Tools/demo/mcast.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Send/receive UDP multicast packets.
|
||||
Requires that your OS kernel supports IP multicast.
|
||||
|
||||
Usage:
|
||||
mcast -s (sender, IPv4)
|
||||
mcast -s -6 (sender, IPv6)
|
||||
mcast (receivers, IPv4)
|
||||
mcast -6 (receivers, IPv6)
|
||||
"""
|
||||
|
||||
MYPORT = 8123
|
||||
MYGROUP_4 = '225.0.0.250'
|
||||
MYGROUP_6 = 'ff15:7079:7468:6f6e:6465:6d6f:6d63:6173'
|
||||
MYTTL = 1 # Increase to reach other networks
|
||||
|
||||
import time
|
||||
import struct
|
||||
import socket
|
||||
import sys
|
||||
|
||||
def main():
|
||||
group = MYGROUP_6 if "-6" in sys.argv[1:] else MYGROUP_4
|
||||
|
||||
if "-s" in sys.argv[1:]:
|
||||
sender(group)
|
||||
else:
|
||||
receiver(group)
|
||||
|
||||
|
||||
def sender(group):
|
||||
addrinfo = socket.getaddrinfo(group, None)[0]
|
||||
|
||||
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
|
||||
|
||||
# Set Time-to-live (optional)
|
||||
ttl_bin = struct.pack('@i', MYTTL)
|
||||
if addrinfo[0] == socket.AF_INET: # IPv4
|
||||
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl_bin)
|
||||
else:
|
||||
s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
|
||||
|
||||
while True:
|
||||
data = repr(time.time()).encode('utf-8') + b'\0'
|
||||
s.sendto(data, (addrinfo[4][0], MYPORT))
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def receiver(group):
|
||||
# Look up multicast group address in name server and find out IP version
|
||||
addrinfo = socket.getaddrinfo(group, None)[0]
|
||||
|
||||
# Create a socket
|
||||
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
|
||||
|
||||
# Allow multiple copies of this program on one machine
|
||||
# (not strictly needed)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
# Bind it to the port
|
||||
s.bind(('', MYPORT))
|
||||
|
||||
group_bin = socket.inet_pton(addrinfo[0], addrinfo[4][0])
|
||||
# Join group
|
||||
if addrinfo[0] == socket.AF_INET: # IPv4
|
||||
mreq = group_bin + struct.pack('=I', socket.INADDR_ANY)
|
||||
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
|
||||
else:
|
||||
mreq = group_bin + struct.pack('@I', 0)
|
||||
s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
|
||||
|
||||
# Loop, printing any data we receive
|
||||
while True:
|
||||
data, sender = s.recvfrom(1500)
|
||||
while data[-1:] == '\0': data = data[:-1] # Strip trailing \0's
|
||||
print(str(sender) + ' ' + repr(data))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
85
Tools/demo/queens.py
Normal file
85
Tools/demo/queens.py
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
N queens problem.
|
||||
|
||||
The (well-known) problem is due to Niklaus Wirth.
|
||||
|
||||
This solution is inspired by Dijkstra (Structured Programming). It is
|
||||
a classic recursive backtracking approach.
|
||||
"""
|
||||
|
||||
N = 8 # Default; command line overrides
|
||||
|
||||
class Queens:
|
||||
|
||||
def __init__(self, n=N):
|
||||
self.n = n
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
n = self.n
|
||||
self.y = [None] * n # Where is the queen in column x
|
||||
self.row = [0] * n # Is row[y] safe?
|
||||
self.up = [0] * (2*n-1) # Is upward diagonal[x-y] safe?
|
||||
self.down = [0] * (2*n-1) # Is downward diagonal[x+y] safe?
|
||||
self.nfound = 0 # Instrumentation
|
||||
|
||||
def solve(self, x=0): # Recursive solver
|
||||
for y in range(self.n):
|
||||
if self.safe(x, y):
|
||||
self.place(x, y)
|
||||
if x+1 == self.n:
|
||||
self.display()
|
||||
else:
|
||||
self.solve(x+1)
|
||||
self.remove(x, y)
|
||||
|
||||
def safe(self, x, y):
|
||||
return not self.row[y] and not self.up[x-y] and not self.down[x+y]
|
||||
|
||||
def place(self, x, y):
|
||||
self.y[x] = y
|
||||
self.row[y] = 1
|
||||
self.up[x-y] = 1
|
||||
self.down[x+y] = 1
|
||||
|
||||
def remove(self, x, y):
|
||||
self.y[x] = None
|
||||
self.row[y] = 0
|
||||
self.up[x-y] = 0
|
||||
self.down[x+y] = 0
|
||||
|
||||
silent = 0 # If true, count solutions only
|
||||
|
||||
def display(self):
|
||||
self.nfound = self.nfound + 1
|
||||
if self.silent:
|
||||
return
|
||||
print('+-' + '--'*self.n + '+')
|
||||
for y in range(self.n-1, -1, -1):
|
||||
print('|', end=' ')
|
||||
for x in range(self.n):
|
||||
if self.y[x] == y:
|
||||
print("Q", end=' ')
|
||||
else:
|
||||
print(".", end=' ')
|
||||
print('|')
|
||||
print('+-' + '--'*self.n + '+')
|
||||
|
||||
def main():
|
||||
import sys
|
||||
silent = 0
|
||||
n = N
|
||||
if sys.argv[1:2] == ['-n']:
|
||||
silent = 1
|
||||
del sys.argv[1]
|
||||
if sys.argv[1:]:
|
||||
n = int(sys.argv[1])
|
||||
q = Queens(n)
|
||||
q.silent = silent
|
||||
q.solve()
|
||||
print("Found", q.nfound, "solutions.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
172
Tools/demo/redemo.py
Normal file
172
Tools/demo/redemo.py
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""Basic regular expression demonstration facility (Perl style syntax)."""
|
||||
|
||||
from tkinter import *
|
||||
import re
|
||||
|
||||
class ReDemo:
|
||||
|
||||
def __init__(self, master):
|
||||
self.master = master
|
||||
|
||||
self.promptdisplay = Label(self.master, anchor=W,
|
||||
text="Enter a Perl-style regular expression:")
|
||||
self.promptdisplay.pack(side=TOP, fill=X)
|
||||
|
||||
self.regexdisplay = Entry(self.master)
|
||||
self.regexdisplay.pack(fill=X)
|
||||
self.regexdisplay.focus_set()
|
||||
|
||||
self.addoptions()
|
||||
|
||||
self.statusdisplay = Label(self.master, text="", anchor=W)
|
||||
self.statusdisplay.pack(side=TOP, fill=X)
|
||||
|
||||
self.labeldisplay = Label(self.master, anchor=W,
|
||||
text="Enter a string to search:")
|
||||
self.labeldisplay.pack(fill=X)
|
||||
self.labeldisplay.pack(fill=X)
|
||||
|
||||
self.showframe = Frame(master)
|
||||
self.showframe.pack(fill=X, anchor=W)
|
||||
|
||||
self.showvar = StringVar(master)
|
||||
self.showvar.set("first")
|
||||
|
||||
self.showfirstradio = Radiobutton(self.showframe,
|
||||
text="Highlight first match",
|
||||
variable=self.showvar,
|
||||
value="first",
|
||||
command=self.recompile)
|
||||
self.showfirstradio.pack(side=LEFT)
|
||||
|
||||
self.showallradio = Radiobutton(self.showframe,
|
||||
text="Highlight all matches",
|
||||
variable=self.showvar,
|
||||
value="all",
|
||||
command=self.recompile)
|
||||
self.showallradio.pack(side=LEFT)
|
||||
|
||||
self.stringdisplay = Text(self.master, width=60, height=4)
|
||||
self.stringdisplay.pack(fill=BOTH, expand=1)
|
||||
self.stringdisplay.tag_configure("hit", background="yellow")
|
||||
|
||||
self.grouplabel = Label(self.master, text="Groups:", anchor=W)
|
||||
self.grouplabel.pack(fill=X)
|
||||
|
||||
self.grouplist = Listbox(self.master)
|
||||
self.grouplist.pack(expand=1, fill=BOTH)
|
||||
|
||||
self.regexdisplay.bind('<Key>', self.recompile)
|
||||
self.stringdisplay.bind('<Key>', self.reevaluate)
|
||||
|
||||
self.compiled = None
|
||||
self.recompile()
|
||||
|
||||
btags = self.regexdisplay.bindtags()
|
||||
self.regexdisplay.bindtags(btags[1:] + btags[:1])
|
||||
|
||||
btags = self.stringdisplay.bindtags()
|
||||
self.stringdisplay.bindtags(btags[1:] + btags[:1])
|
||||
|
||||
def addoptions(self):
|
||||
self.frames = []
|
||||
self.boxes = []
|
||||
self.vars = []
|
||||
for name in ('IGNORECASE',
|
||||
'MULTILINE',
|
||||
'DOTALL',
|
||||
'VERBOSE'):
|
||||
if len(self.boxes) % 3 == 0:
|
||||
frame = Frame(self.master)
|
||||
frame.pack(fill=X)
|
||||
self.frames.append(frame)
|
||||
val = getattr(re, name).value
|
||||
var = IntVar()
|
||||
box = Checkbutton(frame,
|
||||
variable=var, text=name,
|
||||
offvalue=0, onvalue=val,
|
||||
command=self.recompile)
|
||||
box.pack(side=LEFT)
|
||||
self.boxes.append(box)
|
||||
self.vars.append(var)
|
||||
|
||||
def getflags(self):
|
||||
flags = 0
|
||||
for var in self.vars:
|
||||
flags = flags | var.get()
|
||||
flags = flags
|
||||
return flags
|
||||
|
||||
def recompile(self, event=None):
|
||||
try:
|
||||
self.compiled = re.compile(self.regexdisplay.get(),
|
||||
self.getflags())
|
||||
bg = self.promptdisplay['background']
|
||||
self.statusdisplay.config(text="", background=bg)
|
||||
except re.error as msg:
|
||||
self.compiled = None
|
||||
self.statusdisplay.config(
|
||||
text="re.error: %s" % str(msg),
|
||||
background="red")
|
||||
self.reevaluate()
|
||||
|
||||
def reevaluate(self, event=None):
|
||||
try:
|
||||
self.stringdisplay.tag_remove("hit", "1.0", END)
|
||||
except TclError:
|
||||
pass
|
||||
try:
|
||||
self.stringdisplay.tag_remove("hit0", "1.0", END)
|
||||
except TclError:
|
||||
pass
|
||||
self.grouplist.delete(0, END)
|
||||
if not self.compiled:
|
||||
return
|
||||
self.stringdisplay.tag_configure("hit", background="yellow")
|
||||
self.stringdisplay.tag_configure("hit0", background="orange")
|
||||
text = self.stringdisplay.get("1.0", END)
|
||||
last = 0
|
||||
nmatches = 0
|
||||
while last <= len(text):
|
||||
m = self.compiled.search(text, last)
|
||||
if m is None:
|
||||
break
|
||||
first, last = m.span()
|
||||
if last == first:
|
||||
last = first+1
|
||||
tag = "hit0"
|
||||
else:
|
||||
tag = "hit"
|
||||
pfirst = "1.0 + %d chars" % first
|
||||
plast = "1.0 + %d chars" % last
|
||||
self.stringdisplay.tag_add(tag, pfirst, plast)
|
||||
if nmatches == 0:
|
||||
self.stringdisplay.yview_pickplace(pfirst)
|
||||
groups = list(m.groups())
|
||||
groups.insert(0, m.group())
|
||||
for i in range(len(groups)):
|
||||
g = "%2d: %r" % (i, groups[i])
|
||||
self.grouplist.insert(END, g)
|
||||
nmatches = nmatches + 1
|
||||
if self.showvar.get() == "first":
|
||||
break
|
||||
|
||||
if nmatches == 0:
|
||||
self.statusdisplay.config(text="(no match)",
|
||||
background="yellow")
|
||||
else:
|
||||
self.statusdisplay.config(text="")
|
||||
|
||||
|
||||
# Main function, run when invoked as a stand-alone Python program.
|
||||
|
||||
def main():
|
||||
root = Tk()
|
||||
demo = ReDemo(root)
|
||||
root.protocol('WM_DELETE_WINDOW', root.quit)
|
||||
root.mainloop()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
38
Tools/demo/rpython.py
Normal file
38
Tools/demo/rpython.py
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Remote python client.
|
||||
Execute Python commands remotely and send output back.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from socket import socket, AF_INET, SOCK_STREAM, SHUT_WR
|
||||
|
||||
PORT = 4127
|
||||
BUFSIZE = 1024
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print("usage: rpython host command")
|
||||
sys.exit(2)
|
||||
host = sys.argv[1]
|
||||
port = PORT
|
||||
i = host.find(':')
|
||||
if i >= 0:
|
||||
port = int(port[i+1:])
|
||||
host = host[:i]
|
||||
command = ' '.join(sys.argv[2:])
|
||||
s = socket(AF_INET, SOCK_STREAM)
|
||||
s.connect((host, port))
|
||||
s.send(command.encode())
|
||||
s.shutdown(SHUT_WR)
|
||||
reply = b''
|
||||
while True:
|
||||
data = s.recv(BUFSIZE)
|
||||
if not data:
|
||||
break
|
||||
reply += data
|
||||
print(reply.decode(), end=' ')
|
||||
s.close()
|
||||
|
||||
main()
|
||||
58
Tools/demo/rpythond.py
Normal file
58
Tools/demo/rpythond.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Remote python server.
|
||||
Execute Python commands remotely and send output back.
|
||||
|
||||
WARNING: This version has a gaping security hole -- it accepts requests
|
||||
from any host on the Internet!
|
||||
"""
|
||||
|
||||
import sys
|
||||
from socket import socket, AF_INET, SOCK_STREAM
|
||||
import io
|
||||
import traceback
|
||||
|
||||
PORT = 4127
|
||||
BUFSIZE = 1024
|
||||
|
||||
def main():
|
||||
if len(sys.argv) > 1:
|
||||
port = int(sys.argv[1])
|
||||
else:
|
||||
port = PORT
|
||||
s = socket(AF_INET, SOCK_STREAM)
|
||||
s.bind(('', port))
|
||||
s.listen(1)
|
||||
while True:
|
||||
conn, (remotehost, remoteport) = s.accept()
|
||||
print('connection from', remotehost, remoteport)
|
||||
request = b''
|
||||
while 1:
|
||||
data = conn.recv(BUFSIZE)
|
||||
if not data:
|
||||
break
|
||||
request += data
|
||||
reply = execute(request.decode())
|
||||
conn.send(reply.encode())
|
||||
conn.close()
|
||||
|
||||
def execute(request):
|
||||
stdout = sys.stdout
|
||||
stderr = sys.stderr
|
||||
sys.stdout = sys.stderr = fakefile = io.StringIO()
|
||||
try:
|
||||
try:
|
||||
exec(request, {}, {})
|
||||
except:
|
||||
print()
|
||||
traceback.print_exc(100)
|
||||
finally:
|
||||
sys.stderr = stderr
|
||||
sys.stdout = stdout
|
||||
return fakefile.getvalue()
|
||||
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
635
Tools/demo/sortvisu.py
Normal file
635
Tools/demo/sortvisu.py
Normal file
|
|
@ -0,0 +1,635 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Sorting algorithms visualizer using Tkinter.
|
||||
|
||||
This module is comprised of three ``components'':
|
||||
|
||||
- an array visualizer with methods that implement basic sorting
|
||||
operations (compare, swap) as well as methods for ``annotating'' the
|
||||
sorting algorithm (e.g. to show the pivot element);
|
||||
|
||||
- a number of sorting algorithms (currently quicksort, insertion sort,
|
||||
selection sort and bubble sort, as well as a randomization function),
|
||||
all using the array visualizer for its basic operations and with calls
|
||||
to its annotation methods;
|
||||
|
||||
- and a ``driver'' class which can be used as a Grail applet or as a
|
||||
stand-alone application.
|
||||
"""
|
||||
|
||||
from tkinter import *
|
||||
import random
|
||||
|
||||
XGRID = 10
|
||||
YGRID = 10
|
||||
WIDTH = 6
|
||||
|
||||
|
||||
class Array:
|
||||
|
||||
class Cancelled(BaseException):
|
||||
pass
|
||||
|
||||
def __init__(self, master, data=None):
|
||||
self.master = master
|
||||
self.frame = Frame(self.master)
|
||||
self.frame.pack(fill=X)
|
||||
self.label = Label(self.frame)
|
||||
self.label.pack()
|
||||
self.canvas = Canvas(self.frame)
|
||||
self.canvas.pack()
|
||||
self.report = Label(self.frame)
|
||||
self.report.pack()
|
||||
self.left = self.canvas.create_line(0, 0, 0, 0)
|
||||
self.right = self.canvas.create_line(0, 0, 0, 0)
|
||||
self.pivot = self.canvas.create_line(0, 0, 0, 0)
|
||||
self.items = []
|
||||
self.size = self.maxvalue = 0
|
||||
if data:
|
||||
self.setdata(data)
|
||||
|
||||
def setdata(self, data):
|
||||
olditems = self.items
|
||||
self.items = []
|
||||
for item in olditems:
|
||||
item.delete()
|
||||
self.size = len(data)
|
||||
self.maxvalue = max(data)
|
||||
self.canvas.config(width=(self.size+1)*XGRID,
|
||||
height=(self.maxvalue+1)*YGRID)
|
||||
for i in range(self.size):
|
||||
self.items.append(ArrayItem(self, i, data[i]))
|
||||
self.reset("Sort demo, size %d" % self.size)
|
||||
|
||||
speed = "normal"
|
||||
|
||||
def setspeed(self, speed):
|
||||
self.speed = speed
|
||||
|
||||
def destroy(self):
|
||||
self.frame.destroy()
|
||||
|
||||
in_mainloop = 0
|
||||
stop_mainloop = 0
|
||||
|
||||
def cancel(self):
|
||||
self.stop_mainloop = 1
|
||||
if self.in_mainloop:
|
||||
self.master.quit()
|
||||
|
||||
def step(self):
|
||||
if self.in_mainloop:
|
||||
self.master.quit()
|
||||
|
||||
def wait(self, msecs):
|
||||
if self.speed == "fastest":
|
||||
msecs = 0
|
||||
elif self.speed == "fast":
|
||||
msecs = msecs//10
|
||||
elif self.speed == "single-step":
|
||||
msecs = 1000000000
|
||||
if not self.stop_mainloop:
|
||||
self.master.update()
|
||||
id = self.master.after(msecs, self.master.quit)
|
||||
self.in_mainloop = 1
|
||||
self.master.mainloop()
|
||||
self.master.after_cancel(id)
|
||||
self.in_mainloop = 0
|
||||
if self.stop_mainloop:
|
||||
self.stop_mainloop = 0
|
||||
self.message("Cancelled")
|
||||
raise Array.Cancelled
|
||||
|
||||
def getsize(self):
|
||||
return self.size
|
||||
|
||||
def show_partition(self, first, last):
|
||||
for i in range(self.size):
|
||||
item = self.items[i]
|
||||
if first <= i < last:
|
||||
self.canvas.itemconfig(item, fill='red')
|
||||
else:
|
||||
self.canvas.itemconfig(item, fill='orange')
|
||||
self.hide_left_right_pivot()
|
||||
|
||||
def hide_partition(self):
|
||||
for i in range(self.size):
|
||||
item = self.items[i]
|
||||
self.canvas.itemconfig(item, fill='red')
|
||||
self.hide_left_right_pivot()
|
||||
|
||||
def show_left(self, left):
|
||||
if not 0 <= left < self.size:
|
||||
self.hide_left()
|
||||
return
|
||||
x1, y1, x2, y2 = self.items[left].position()
|
||||
## top, bot = HIRO
|
||||
self.canvas.coords(self.left, (x1 - 2, 0, x1 - 2, 9999))
|
||||
self.master.update()
|
||||
|
||||
def show_right(self, right):
|
||||
if not 0 <= right < self.size:
|
||||
self.hide_right()
|
||||
return
|
||||
x1, y1, x2, y2 = self.items[right].position()
|
||||
self.canvas.coords(self.right, (x2 + 2, 0, x2 + 2, 9999))
|
||||
self.master.update()
|
||||
|
||||
def hide_left_right_pivot(self):
|
||||
self.hide_left()
|
||||
self.hide_right()
|
||||
self.hide_pivot()
|
||||
|
||||
def hide_left(self):
|
||||
self.canvas.coords(self.left, (0, 0, 0, 0))
|
||||
|
||||
def hide_right(self):
|
||||
self.canvas.coords(self.right, (0, 0, 0, 0))
|
||||
|
||||
def show_pivot(self, pivot):
|
||||
x1, y1, x2, y2 = self.items[pivot].position()
|
||||
self.canvas.coords(self.pivot, (0, y1 - 2, 9999, y1 - 2))
|
||||
|
||||
def hide_pivot(self):
|
||||
self.canvas.coords(self.pivot, (0, 0, 0, 0))
|
||||
|
||||
def swap(self, i, j):
|
||||
if i == j: return
|
||||
self.countswap()
|
||||
item = self.items[i]
|
||||
other = self.items[j]
|
||||
self.items[i], self.items[j] = other, item
|
||||
item.swapwith(other)
|
||||
|
||||
def compare(self, i, j):
|
||||
self.countcompare()
|
||||
item = self.items[i]
|
||||
other = self.items[j]
|
||||
return item.compareto(other)
|
||||
|
||||
def reset(self, msg):
|
||||
self.ncompares = 0
|
||||
self.nswaps = 0
|
||||
self.message(msg)
|
||||
self.updatereport()
|
||||
self.hide_partition()
|
||||
|
||||
def message(self, msg):
|
||||
self.label.config(text=msg)
|
||||
|
||||
def countswap(self):
|
||||
self.nswaps = self.nswaps + 1
|
||||
self.updatereport()
|
||||
|
||||
def countcompare(self):
|
||||
self.ncompares = self.ncompares + 1
|
||||
self.updatereport()
|
||||
|
||||
def updatereport(self):
|
||||
text = "%d cmps, %d swaps" % (self.ncompares, self.nswaps)
|
||||
self.report.config(text=text)
|
||||
|
||||
|
||||
class ArrayItem:
|
||||
|
||||
def __init__(self, array, index, value):
|
||||
self.array = array
|
||||
self.index = index
|
||||
self.value = value
|
||||
self.canvas = array.canvas
|
||||
x1, y1, x2, y2 = self.position()
|
||||
self.item_id = array.canvas.create_rectangle(x1, y1, x2, y2,
|
||||
fill='red', outline='black', width=1)
|
||||
self.canvas.tag_bind(self.item_id, '<Button-1>', self.mouse_down)
|
||||
self.canvas.tag_bind(self.item_id, '<Button1-Motion>', self.mouse_move)
|
||||
self.canvas.tag_bind(self.item_id, '<ButtonRelease-1>', self.mouse_up)
|
||||
|
||||
def delete(self):
|
||||
item_id = self.item_id
|
||||
self.array = None
|
||||
self.item_id = None
|
||||
self.canvas.delete(item_id)
|
||||
|
||||
def mouse_down(self, event):
|
||||
self.lastx = event.x
|
||||
self.lasty = event.y
|
||||
self.origx = event.x
|
||||
self.origy = event.y
|
||||
self.canvas.tag_raise(self.item_id)
|
||||
|
||||
def mouse_move(self, event):
|
||||
self.canvas.move(self.item_id,
|
||||
event.x - self.lastx, event.y - self.lasty)
|
||||
self.lastx = event.x
|
||||
self.lasty = event.y
|
||||
|
||||
def mouse_up(self, event):
|
||||
i = self.nearestindex(event.x)
|
||||
if i >= self.array.getsize():
|
||||
i = self.array.getsize() - 1
|
||||
if i < 0:
|
||||
i = 0
|
||||
other = self.array.items[i]
|
||||
here = self.index
|
||||
self.array.items[here], self.array.items[i] = other, self
|
||||
self.index = i
|
||||
x1, y1, x2, y2 = self.position()
|
||||
self.canvas.coords(self.item_id, (x1, y1, x2, y2))
|
||||
other.setindex(here)
|
||||
|
||||
def setindex(self, index):
|
||||
nsteps = steps(self.index, index)
|
||||
if not nsteps: return
|
||||
if self.array.speed == "fastest":
|
||||
nsteps = 0
|
||||
oldpts = self.position()
|
||||
self.index = index
|
||||
newpts = self.position()
|
||||
trajectory = interpolate(oldpts, newpts, nsteps)
|
||||
self.canvas.tag_raise(self.item_id)
|
||||
for pts in trajectory:
|
||||
self.canvas.coords(self.item_id, pts)
|
||||
self.array.wait(50)
|
||||
|
||||
def swapwith(self, other):
|
||||
nsteps = steps(self.index, other.index)
|
||||
if not nsteps: return
|
||||
if self.array.speed == "fastest":
|
||||
nsteps = 0
|
||||
myoldpts = self.position()
|
||||
otheroldpts = other.position()
|
||||
self.index, other.index = other.index, self.index
|
||||
mynewpts = self.position()
|
||||
othernewpts = other.position()
|
||||
myfill = self.canvas.itemcget(self.item_id, 'fill')
|
||||
otherfill = self.canvas.itemcget(other.item_id, 'fill')
|
||||
self.canvas.itemconfig(self.item_id, fill='green')
|
||||
self.canvas.itemconfig(other.item_id, fill='yellow')
|
||||
self.array.master.update()
|
||||
if self.array.speed == "single-step":
|
||||
self.canvas.coords(self.item_id, mynewpts)
|
||||
self.canvas.coords(other.item_id, othernewpts)
|
||||
self.array.master.update()
|
||||
self.canvas.itemconfig(self.item_id, fill=myfill)
|
||||
self.canvas.itemconfig(other.item_id, fill=otherfill)
|
||||
self.array.wait(0)
|
||||
return
|
||||
mytrajectory = interpolate(myoldpts, mynewpts, nsteps)
|
||||
othertrajectory = interpolate(otheroldpts, othernewpts, nsteps)
|
||||
if self.value > other.value:
|
||||
self.canvas.tag_raise(self.item_id)
|
||||
self.canvas.tag_raise(other.item_id)
|
||||
else:
|
||||
self.canvas.tag_raise(other.item_id)
|
||||
self.canvas.tag_raise(self.item_id)
|
||||
try:
|
||||
for i in range(len(mytrajectory)):
|
||||
mypts = mytrajectory[i]
|
||||
otherpts = othertrajectory[i]
|
||||
self.canvas.coords(self.item_id, mypts)
|
||||
self.canvas.coords(other.item_id, otherpts)
|
||||
self.array.wait(50)
|
||||
finally:
|
||||
mypts = mytrajectory[-1]
|
||||
otherpts = othertrajectory[-1]
|
||||
self.canvas.coords(self.item_id, mypts)
|
||||
self.canvas.coords(other.item_id, otherpts)
|
||||
self.canvas.itemconfig(self.item_id, fill=myfill)
|
||||
self.canvas.itemconfig(other.item_id, fill=otherfill)
|
||||
|
||||
def compareto(self, other):
|
||||
myfill = self.canvas.itemcget(self.item_id, 'fill')
|
||||
otherfill = self.canvas.itemcget(other.item_id, 'fill')
|
||||
if self.value < other.value:
|
||||
myflash = 'white'
|
||||
otherflash = 'black'
|
||||
outcome = -1
|
||||
elif self.value > other.value:
|
||||
myflash = 'black'
|
||||
otherflash = 'white'
|
||||
outcome = 1
|
||||
else:
|
||||
myflash = otherflash = 'grey'
|
||||
outcome = 0
|
||||
try:
|
||||
self.canvas.itemconfig(self.item_id, fill=myflash)
|
||||
self.canvas.itemconfig(other.item_id, fill=otherflash)
|
||||
self.array.wait(500)
|
||||
finally:
|
||||
self.canvas.itemconfig(self.item_id, fill=myfill)
|
||||
self.canvas.itemconfig(other.item_id, fill=otherfill)
|
||||
return outcome
|
||||
|
||||
def position(self):
|
||||
x1 = (self.index+1)*XGRID - WIDTH//2
|
||||
x2 = x1+WIDTH
|
||||
y2 = (self.array.maxvalue+1)*YGRID
|
||||
y1 = y2 - (self.value)*YGRID
|
||||
return x1, y1, x2, y2
|
||||
|
||||
def nearestindex(self, x):
|
||||
return int(round(float(x)/XGRID)) - 1
|
||||
|
||||
|
||||
# Subroutines that don't need an object
|
||||
|
||||
def steps(here, there):
|
||||
nsteps = abs(here - there)
|
||||
if nsteps <= 3:
|
||||
nsteps = nsteps * 3
|
||||
elif nsteps <= 5:
|
||||
nsteps = nsteps * 2
|
||||
elif nsteps > 10:
|
||||
nsteps = 10
|
||||
return nsteps
|
||||
|
||||
def interpolate(oldpts, newpts, n):
|
||||
if len(oldpts) != len(newpts):
|
||||
raise ValueError("can't interpolate arrays of different length")
|
||||
pts = [0]*len(oldpts)
|
||||
res = [tuple(oldpts)]
|
||||
for i in range(1, n):
|
||||
for k in range(len(pts)):
|
||||
pts[k] = oldpts[k] + (newpts[k] - oldpts[k])*i//n
|
||||
res.append(tuple(pts))
|
||||
res.append(tuple(newpts))
|
||||
return res
|
||||
|
||||
|
||||
# Various (un)sorting algorithms
|
||||
|
||||
def uniform(array):
|
||||
size = array.getsize()
|
||||
array.setdata([(size+1)//2] * size)
|
||||
array.reset("Uniform data, size %d" % size)
|
||||
|
||||
def distinct(array):
|
||||
size = array.getsize()
|
||||
array.setdata(range(1, size+1))
|
||||
array.reset("Distinct data, size %d" % size)
|
||||
|
||||
def randomize(array):
|
||||
array.reset("Randomizing")
|
||||
n = array.getsize()
|
||||
for i in range(n):
|
||||
j = random.randint(0, n-1)
|
||||
array.swap(i, j)
|
||||
array.message("Randomized")
|
||||
|
||||
def insertionsort(array):
|
||||
size = array.getsize()
|
||||
array.reset("Insertion sort")
|
||||
for i in range(1, size):
|
||||
j = i-1
|
||||
while j >= 0:
|
||||
if array.compare(j, j+1) <= 0:
|
||||
break
|
||||
array.swap(j, j+1)
|
||||
j = j-1
|
||||
array.message("Sorted")
|
||||
|
||||
def selectionsort(array):
|
||||
size = array.getsize()
|
||||
array.reset("Selection sort")
|
||||
try:
|
||||
for i in range(size):
|
||||
array.show_partition(i, size)
|
||||
for j in range(i+1, size):
|
||||
if array.compare(i, j) > 0:
|
||||
array.swap(i, j)
|
||||
array.message("Sorted")
|
||||
finally:
|
||||
array.hide_partition()
|
||||
|
||||
def bubblesort(array):
|
||||
size = array.getsize()
|
||||
array.reset("Bubble sort")
|
||||
for i in range(size):
|
||||
for j in range(1, size):
|
||||
if array.compare(j-1, j) > 0:
|
||||
array.swap(j-1, j)
|
||||
array.message("Sorted")
|
||||
|
||||
def quicksort(array):
|
||||
size = array.getsize()
|
||||
array.reset("Quicksort")
|
||||
try:
|
||||
stack = [(0, size)]
|
||||
while stack:
|
||||
first, last = stack[-1]
|
||||
del stack[-1]
|
||||
array.show_partition(first, last)
|
||||
if last-first < 5:
|
||||
array.message("Insertion sort")
|
||||
for i in range(first+1, last):
|
||||
j = i-1
|
||||
while j >= first:
|
||||
if array.compare(j, j+1) <= 0:
|
||||
break
|
||||
array.swap(j, j+1)
|
||||
j = j-1
|
||||
continue
|
||||
array.message("Choosing pivot")
|
||||
j, i, k = first, (first+last) // 2, last-1
|
||||
if array.compare(k, i) < 0:
|
||||
array.swap(k, i)
|
||||
if array.compare(k, j) < 0:
|
||||
array.swap(k, j)
|
||||
if array.compare(j, i) < 0:
|
||||
array.swap(j, i)
|
||||
pivot = j
|
||||
array.show_pivot(pivot)
|
||||
array.message("Pivot at left of partition")
|
||||
array.wait(1000)
|
||||
left = first
|
||||
right = last
|
||||
while 1:
|
||||
array.message("Sweep right pointer")
|
||||
right = right-1
|
||||
array.show_right(right)
|
||||
while right > first and array.compare(right, pivot) >= 0:
|
||||
right = right-1
|
||||
array.show_right(right)
|
||||
array.message("Sweep left pointer")
|
||||
left = left+1
|
||||
array.show_left(left)
|
||||
while left < last and array.compare(left, pivot) <= 0:
|
||||
left = left+1
|
||||
array.show_left(left)
|
||||
if left > right:
|
||||
array.message("End of partition")
|
||||
break
|
||||
array.message("Swap items")
|
||||
array.swap(left, right)
|
||||
array.message("Swap pivot back")
|
||||
array.swap(pivot, right)
|
||||
n1 = right-first
|
||||
n2 = last-left
|
||||
if n1 > 1: stack.append((first, right))
|
||||
if n2 > 1: stack.append((left, last))
|
||||
array.message("Sorted")
|
||||
finally:
|
||||
array.hide_partition()
|
||||
|
||||
def demosort(array):
|
||||
while 1:
|
||||
for alg in [quicksort, insertionsort, selectionsort, bubblesort]:
|
||||
randomize(array)
|
||||
alg(array)
|
||||
|
||||
|
||||
# Sort demo class -- usable as a Grail applet
|
||||
|
||||
class SortDemo:
|
||||
|
||||
def __init__(self, master, size=15):
|
||||
self.master = master
|
||||
self.size = size
|
||||
self.busy = 0
|
||||
self.array = Array(self.master)
|
||||
|
||||
self.botframe = Frame(master)
|
||||
self.botframe.pack(side=BOTTOM)
|
||||
self.botleftframe = Frame(self.botframe)
|
||||
self.botleftframe.pack(side=LEFT, fill=Y)
|
||||
self.botrightframe = Frame(self.botframe)
|
||||
self.botrightframe.pack(side=RIGHT, fill=Y)
|
||||
|
||||
self.b_qsort = Button(self.botleftframe,
|
||||
text="Quicksort", command=self.c_qsort)
|
||||
self.b_qsort.pack(fill=X)
|
||||
self.b_isort = Button(self.botleftframe,
|
||||
text="Insertion sort", command=self.c_isort)
|
||||
self.b_isort.pack(fill=X)
|
||||
self.b_ssort = Button(self.botleftframe,
|
||||
text="Selection sort", command=self.c_ssort)
|
||||
self.b_ssort.pack(fill=X)
|
||||
self.b_bsort = Button(self.botleftframe,
|
||||
text="Bubble sort", command=self.c_bsort)
|
||||
self.b_bsort.pack(fill=X)
|
||||
|
||||
# Terrible hack to overcome limitation of OptionMenu...
|
||||
class MyIntVar(IntVar):
|
||||
def __init__(self, master, demo):
|
||||
self.demo = demo
|
||||
IntVar.__init__(self, master)
|
||||
def set(self, value):
|
||||
IntVar.set(self, value)
|
||||
if str(value) != '0':
|
||||
self.demo.resize(value)
|
||||
|
||||
self.v_size = MyIntVar(self.master, self)
|
||||
self.v_size.set(size)
|
||||
sizes = [1, 2, 3, 4] + list(range(5, 55, 5))
|
||||
if self.size not in sizes:
|
||||
sizes.append(self.size)
|
||||
sizes.sort()
|
||||
self.m_size = OptionMenu(self.botleftframe, self.v_size, *sizes)
|
||||
self.m_size.pack(fill=X)
|
||||
|
||||
self.v_speed = StringVar(self.master)
|
||||
self.v_speed.set("normal")
|
||||
self.m_speed = OptionMenu(self.botleftframe, self.v_speed,
|
||||
"single-step", "normal", "fast", "fastest")
|
||||
self.m_speed.pack(fill=X)
|
||||
|
||||
self.b_step = Button(self.botleftframe,
|
||||
text="Step", command=self.c_step)
|
||||
self.b_step.pack(fill=X)
|
||||
|
||||
self.b_randomize = Button(self.botrightframe,
|
||||
text="Randomize", command=self.c_randomize)
|
||||
self.b_randomize.pack(fill=X)
|
||||
self.b_uniform = Button(self.botrightframe,
|
||||
text="Uniform", command=self.c_uniform)
|
||||
self.b_uniform.pack(fill=X)
|
||||
self.b_distinct = Button(self.botrightframe,
|
||||
text="Distinct", command=self.c_distinct)
|
||||
self.b_distinct.pack(fill=X)
|
||||
self.b_demo = Button(self.botrightframe,
|
||||
text="Demo", command=self.c_demo)
|
||||
self.b_demo.pack(fill=X)
|
||||
self.b_cancel = Button(self.botrightframe,
|
||||
text="Cancel", command=self.c_cancel)
|
||||
self.b_cancel.pack(fill=X)
|
||||
self.b_cancel.config(state=DISABLED)
|
||||
self.b_quit = Button(self.botrightframe,
|
||||
text="Quit", command=self.c_quit)
|
||||
self.b_quit.pack(fill=X)
|
||||
|
||||
def resize(self, newsize):
|
||||
if self.busy:
|
||||
self.master.bell()
|
||||
return
|
||||
self.size = newsize
|
||||
self.array.setdata(range(1, self.size+1))
|
||||
|
||||
def c_qsort(self):
|
||||
self.run(quicksort)
|
||||
|
||||
def c_isort(self):
|
||||
self.run(insertionsort)
|
||||
|
||||
def c_ssort(self):
|
||||
self.run(selectionsort)
|
||||
|
||||
def c_bsort(self):
|
||||
self.run(bubblesort)
|
||||
|
||||
def c_demo(self):
|
||||
self.run(demosort)
|
||||
|
||||
def c_randomize(self):
|
||||
self.run(randomize)
|
||||
|
||||
def c_uniform(self):
|
||||
self.run(uniform)
|
||||
|
||||
def c_distinct(self):
|
||||
self.run(distinct)
|
||||
|
||||
def run(self, func):
|
||||
if self.busy:
|
||||
self.master.bell()
|
||||
return
|
||||
self.busy = 1
|
||||
self.array.setspeed(self.v_speed.get())
|
||||
self.b_cancel.config(state=NORMAL)
|
||||
try:
|
||||
func(self.array)
|
||||
except Array.Cancelled:
|
||||
pass
|
||||
self.b_cancel.config(state=DISABLED)
|
||||
self.busy = 0
|
||||
|
||||
def c_cancel(self):
|
||||
if not self.busy:
|
||||
self.master.bell()
|
||||
return
|
||||
self.array.cancel()
|
||||
|
||||
def c_step(self):
|
||||
if not self.busy:
|
||||
self.master.bell()
|
||||
return
|
||||
self.v_speed.set("single-step")
|
||||
self.array.setspeed("single-step")
|
||||
self.array.step()
|
||||
|
||||
def c_quit(self):
|
||||
if self.busy:
|
||||
self.array.cancel()
|
||||
self.master.after_idle(self.master.quit)
|
||||
|
||||
|
||||
# Main program -- for stand-alone operation outside Grail
|
||||
|
||||
def main():
|
||||
root = Tk()
|
||||
demo = SortDemo(root)
|
||||
root.protocol('WM_DELETE_WINDOW', demo.c_quit)
|
||||
root.mainloop()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
829
Tools/demo/ss1.py
Normal file
829
Tools/demo/ss1.py
Normal file
|
|
@ -0,0 +1,829 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
SS1 -- a spreadsheet-like application.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from xml.parsers import expat
|
||||
from xml.sax.saxutils import escape
|
||||
|
||||
LEFT, CENTER, RIGHT = "LEFT", "CENTER", "RIGHT"
|
||||
|
||||
def ljust(x, n):
|
||||
return x.ljust(n)
|
||||
def center(x, n):
|
||||
return x.center(n)
|
||||
def rjust(x, n):
|
||||
return x.rjust(n)
|
||||
align2action = {LEFT: ljust, CENTER: center, RIGHT: rjust}
|
||||
|
||||
align2xml = {LEFT: "left", CENTER: "center", RIGHT: "right"}
|
||||
xml2align = {"left": LEFT, "center": CENTER, "right": RIGHT}
|
||||
|
||||
align2anchor = {LEFT: "w", CENTER: "center", RIGHT: "e"}
|
||||
|
||||
def sum(seq):
|
||||
total = 0
|
||||
for x in seq:
|
||||
if x is not None:
|
||||
total += x
|
||||
return total
|
||||
|
||||
class Sheet:
|
||||
|
||||
def __init__(self):
|
||||
self.cells = {} # {(x, y): cell, ...}
|
||||
self.ns = dict(
|
||||
cell = self.cellvalue,
|
||||
cells = self.multicellvalue,
|
||||
sum = sum,
|
||||
)
|
||||
|
||||
def cellvalue(self, x, y):
|
||||
cell = self.getcell(x, y)
|
||||
if hasattr(cell, 'recalc'):
|
||||
return cell.recalc(self.ns)
|
||||
else:
|
||||
return cell
|
||||
|
||||
def multicellvalue(self, x1, y1, x2, y2):
|
||||
if x1 > x2:
|
||||
x1, x2 = x2, x1
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
seq = []
|
||||
for y in range(y1, y2+1):
|
||||
for x in range(x1, x2+1):
|
||||
seq.append(self.cellvalue(x, y))
|
||||
return seq
|
||||
|
||||
def getcell(self, x, y):
|
||||
return self.cells.get((x, y))
|
||||
|
||||
def setcell(self, x, y, cell):
|
||||
assert x > 0 and y > 0
|
||||
assert isinstance(cell, BaseCell)
|
||||
self.cells[x, y] = cell
|
||||
|
||||
def clearcell(self, x, y):
|
||||
try:
|
||||
del self.cells[x, y]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def clearcells(self, x1, y1, x2, y2):
|
||||
for xy in self.selectcells(x1, y1, x2, y2):
|
||||
del self.cells[xy]
|
||||
|
||||
def clearrows(self, y1, y2):
|
||||
self.clearcells(0, y1, sys.maxsize, y2)
|
||||
|
||||
def clearcolumns(self, x1, x2):
|
||||
self.clearcells(x1, 0, x2, sys.maxsize)
|
||||
|
||||
def selectcells(self, x1, y1, x2, y2):
|
||||
if x1 > x2:
|
||||
x1, x2 = x2, x1
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
return [(x, y) for x, y in self.cells
|
||||
if x1 <= x <= x2 and y1 <= y <= y2]
|
||||
|
||||
def movecells(self, x1, y1, x2, y2, dx, dy):
|
||||
if dx == 0 and dy == 0:
|
||||
return
|
||||
if x1 > x2:
|
||||
x1, x2 = x2, x1
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
assert x1+dx > 0 and y1+dy > 0
|
||||
new = {}
|
||||
for x, y in self.cells:
|
||||
cell = self.cells[x, y]
|
||||
if hasattr(cell, 'renumber'):
|
||||
cell = cell.renumber(x1, y1, x2, y2, dx, dy)
|
||||
if x1 <= x <= x2 and y1 <= y <= y2:
|
||||
x += dx
|
||||
y += dy
|
||||
new[x, y] = cell
|
||||
self.cells = new
|
||||
|
||||
def insertrows(self, y, n):
|
||||
assert n > 0
|
||||
self.movecells(0, y, sys.maxsize, sys.maxsize, 0, n)
|
||||
|
||||
def deleterows(self, y1, y2):
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
self.clearrows(y1, y2)
|
||||
self.movecells(0, y2+1, sys.maxsize, sys.maxsize, 0, y1-y2-1)
|
||||
|
||||
def insertcolumns(self, x, n):
|
||||
assert n > 0
|
||||
self.movecells(x, 0, sys.maxsize, sys.maxsize, n, 0)
|
||||
|
||||
def deletecolumns(self, x1, x2):
|
||||
if x1 > x2:
|
||||
x1, x2 = x2, x1
|
||||
self.clearcells(x1, x2)
|
||||
self.movecells(x2+1, 0, sys.maxsize, sys.maxsize, x1-x2-1, 0)
|
||||
|
||||
def getsize(self):
|
||||
maxx = maxy = 0
|
||||
for x, y in self.cells:
|
||||
maxx = max(maxx, x)
|
||||
maxy = max(maxy, y)
|
||||
return maxx, maxy
|
||||
|
||||
def reset(self):
|
||||
for cell in self.cells.values():
|
||||
if hasattr(cell, 'reset'):
|
||||
cell.reset()
|
||||
|
||||
def recalc(self):
|
||||
self.reset()
|
||||
for cell in self.cells.values():
|
||||
if hasattr(cell, 'recalc'):
|
||||
cell.recalc(self.ns)
|
||||
|
||||
def display(self):
|
||||
maxx, maxy = self.getsize()
|
||||
width, height = maxx+1, maxy+1
|
||||
colwidth = [1] * width
|
||||
full = {}
|
||||
# Add column heading labels in row 0
|
||||
for x in range(1, width):
|
||||
full[x, 0] = text, alignment = colnum2name(x), RIGHT
|
||||
colwidth[x] = max(colwidth[x], len(text))
|
||||
# Add row labels in column 0
|
||||
for y in range(1, height):
|
||||
full[0, y] = text, alignment = str(y), RIGHT
|
||||
colwidth[0] = max(colwidth[0], len(text))
|
||||
# Add sheet cells in columns with x>0 and y>0
|
||||
for (x, y), cell in self.cells.items():
|
||||
if x <= 0 or y <= 0:
|
||||
continue
|
||||
if hasattr(cell, 'recalc'):
|
||||
cell.recalc(self.ns)
|
||||
if hasattr(cell, 'format'):
|
||||
text, alignment = cell.format()
|
||||
assert isinstance(text, str)
|
||||
assert alignment in (LEFT, CENTER, RIGHT)
|
||||
else:
|
||||
text = str(cell)
|
||||
if isinstance(cell, str):
|
||||
alignment = LEFT
|
||||
else:
|
||||
alignment = RIGHT
|
||||
full[x, y] = (text, alignment)
|
||||
colwidth[x] = max(colwidth[x], len(text))
|
||||
# Calculate the horizontal separator line (dashes and dots)
|
||||
sep = ""
|
||||
for x in range(width):
|
||||
if sep:
|
||||
sep += "+"
|
||||
sep += "-"*colwidth[x]
|
||||
# Now print The full grid
|
||||
for y in range(height):
|
||||
line = ""
|
||||
for x in range(width):
|
||||
text, alignment = full.get((x, y)) or ("", LEFT)
|
||||
text = align2action[alignment](text, colwidth[x])
|
||||
if line:
|
||||
line += '|'
|
||||
line += text
|
||||
print(line)
|
||||
if y == 0:
|
||||
print(sep)
|
||||
|
||||
def xml(self):
|
||||
out = ['<spreadsheet>']
|
||||
for (x, y), cell in self.cells.items():
|
||||
if hasattr(cell, 'xml'):
|
||||
cellxml = cell.xml()
|
||||
else:
|
||||
cellxml = '<value>%s</value>' % escape(cell)
|
||||
out.append('<cell row="%s" col="%s">\n %s\n</cell>' %
|
||||
(y, x, cellxml))
|
||||
out.append('</spreadsheet>')
|
||||
return '\n'.join(out)
|
||||
|
||||
def save(self, filename):
|
||||
text = self.xml()
|
||||
with open(filename, "w", encoding='utf-8') as f:
|
||||
f.write(text)
|
||||
if text and not text.endswith('\n'):
|
||||
f.write('\n')
|
||||
|
||||
def load(self, filename):
|
||||
with open(filename, 'rb') as f:
|
||||
SheetParser(self).parsefile(f)
|
||||
|
||||
class SheetParser:
|
||||
|
||||
def __init__(self, sheet):
|
||||
self.sheet = sheet
|
||||
|
||||
def parsefile(self, f):
|
||||
parser = expat.ParserCreate()
|
||||
parser.StartElementHandler = self.startelement
|
||||
parser.EndElementHandler = self.endelement
|
||||
parser.CharacterDataHandler = self.data
|
||||
parser.ParseFile(f)
|
||||
|
||||
def startelement(self, tag, attrs):
|
||||
method = getattr(self, 'start_'+tag, None)
|
||||
if method:
|
||||
method(attrs)
|
||||
self.texts = []
|
||||
|
||||
def data(self, text):
|
||||
self.texts.append(text)
|
||||
|
||||
def endelement(self, tag):
|
||||
method = getattr(self, 'end_'+tag, None)
|
||||
if method:
|
||||
method("".join(self.texts))
|
||||
|
||||
def start_cell(self, attrs):
|
||||
self.y = int(attrs.get("row"))
|
||||
self.x = int(attrs.get("col"))
|
||||
|
||||
def start_value(self, attrs):
|
||||
self.fmt = attrs.get('format')
|
||||
self.alignment = xml2align.get(attrs.get('align'))
|
||||
|
||||
start_formula = start_value
|
||||
|
||||
def end_int(self, text):
|
||||
try:
|
||||
self.value = int(text)
|
||||
except (TypeError, ValueError):
|
||||
self.value = None
|
||||
|
||||
end_long = end_int
|
||||
|
||||
def end_double(self, text):
|
||||
try:
|
||||
self.value = float(text)
|
||||
except (TypeError, ValueError):
|
||||
self.value = None
|
||||
|
||||
def end_complex(self, text):
|
||||
try:
|
||||
self.value = complex(text)
|
||||
except (TypeError, ValueError):
|
||||
self.value = None
|
||||
|
||||
def end_string(self, text):
|
||||
self.value = text
|
||||
|
||||
def end_value(self, text):
|
||||
if isinstance(self.value, BaseCell):
|
||||
self.cell = self.value
|
||||
elif isinstance(self.value, str):
|
||||
self.cell = StringCell(self.value,
|
||||
self.fmt or "%s",
|
||||
self.alignment or LEFT)
|
||||
else:
|
||||
self.cell = NumericCell(self.value,
|
||||
self.fmt or "%s",
|
||||
self.alignment or RIGHT)
|
||||
|
||||
def end_formula(self, text):
|
||||
self.cell = FormulaCell(text,
|
||||
self.fmt or "%s",
|
||||
self.alignment or RIGHT)
|
||||
|
||||
def end_cell(self, text):
|
||||
self.sheet.setcell(self.x, self.y, self.cell)
|
||||
|
||||
class BaseCell:
|
||||
__init__ = None # Must provide
|
||||
"""Abstract base class for sheet cells.
|
||||
|
||||
Subclasses may but needn't provide the following APIs:
|
||||
|
||||
cell.reset() -- prepare for recalculation
|
||||
cell.recalc(ns) -> value -- recalculate formula
|
||||
cell.format() -> (value, alignment) -- return formatted value
|
||||
cell.xml() -> string -- return XML
|
||||
"""
|
||||
|
||||
class NumericCell(BaseCell):
|
||||
|
||||
def __init__(self, value, fmt="%s", alignment=RIGHT):
|
||||
assert isinstance(value, (int, float, complex))
|
||||
assert alignment in (LEFT, CENTER, RIGHT)
|
||||
self.value = value
|
||||
self.fmt = fmt
|
||||
self.alignment = alignment
|
||||
|
||||
def recalc(self, ns):
|
||||
return self.value
|
||||
|
||||
def format(self):
|
||||
try:
|
||||
text = self.fmt % self.value
|
||||
except:
|
||||
text = str(self.value)
|
||||
return text, self.alignment
|
||||
|
||||
def xml(self):
|
||||
method = getattr(self, '_xml_' + type(self.value).__name__)
|
||||
return '<value align="%s" format="%s">%s</value>' % (
|
||||
align2xml[self.alignment],
|
||||
self.fmt,
|
||||
method())
|
||||
|
||||
def _xml_int(self):
|
||||
if -2**31 <= self.value < 2**31:
|
||||
return '<int>%s</int>' % self.value
|
||||
else:
|
||||
return '<long>%s</long>' % self.value
|
||||
|
||||
def _xml_float(self):
|
||||
return '<double>%r</double>' % self.value
|
||||
|
||||
def _xml_complex(self):
|
||||
return '<complex>%r</complex>' % self.value
|
||||
|
||||
class StringCell(BaseCell):
|
||||
|
||||
def __init__(self, text, fmt="%s", alignment=LEFT):
|
||||
assert isinstance(text, str)
|
||||
assert alignment in (LEFT, CENTER, RIGHT)
|
||||
self.text = text
|
||||
self.fmt = fmt
|
||||
self.alignment = alignment
|
||||
|
||||
def recalc(self, ns):
|
||||
return self.text
|
||||
|
||||
def format(self):
|
||||
return self.text, self.alignment
|
||||
|
||||
def xml(self):
|
||||
s = '<value align="%s" format="%s"><string>%s</string></value>'
|
||||
return s % (
|
||||
align2xml[self.alignment],
|
||||
self.fmt,
|
||||
escape(self.text))
|
||||
|
||||
class FormulaCell(BaseCell):
|
||||
|
||||
def __init__(self, formula, fmt="%s", alignment=RIGHT):
|
||||
assert alignment in (LEFT, CENTER, RIGHT)
|
||||
self.formula = formula
|
||||
self.translated = translate(self.formula)
|
||||
self.fmt = fmt
|
||||
self.alignment = alignment
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.value = None
|
||||
|
||||
def recalc(self, ns):
|
||||
if self.value is None:
|
||||
try:
|
||||
self.value = eval(self.translated, ns)
|
||||
except:
|
||||
exc = sys.exc_info()[0]
|
||||
if hasattr(exc, "__name__"):
|
||||
self.value = exc.__name__
|
||||
else:
|
||||
self.value = str(exc)
|
||||
return self.value
|
||||
|
||||
def format(self):
|
||||
try:
|
||||
text = self.fmt % self.value
|
||||
except:
|
||||
text = str(self.value)
|
||||
return text, self.alignment
|
||||
|
||||
def xml(self):
|
||||
return '<formula align="%s" format="%s">%s</formula>' % (
|
||||
align2xml[self.alignment],
|
||||
self.fmt,
|
||||
escape(self.formula))
|
||||
|
||||
def renumber(self, x1, y1, x2, y2, dx, dy):
|
||||
out = []
|
||||
for part in re.split(r'(\w+)', self.formula):
|
||||
m = re.match('^([A-Z]+)([1-9][0-9]*)$', part)
|
||||
if m is not None:
|
||||
sx, sy = m.groups()
|
||||
x = colname2num(sx)
|
||||
y = int(sy)
|
||||
if x1 <= x <= x2 and y1 <= y <= y2:
|
||||
part = cellname(x+dx, y+dy)
|
||||
out.append(part)
|
||||
return FormulaCell("".join(out), self.fmt, self.alignment)
|
||||
|
||||
def translate(formula):
|
||||
"""Translate a formula containing fancy cell names to valid Python code.
|
||||
|
||||
Examples:
|
||||
B4 -> cell(2, 4)
|
||||
B4:Z100 -> cells(2, 4, 26, 100)
|
||||
"""
|
||||
out = []
|
||||
for part in re.split(r"(\w+(?::\w+)?)", formula):
|
||||
m = re.match(r"^([A-Z]+)([1-9][0-9]*)(?::([A-Z]+)([1-9][0-9]*))?$", part)
|
||||
if m is None:
|
||||
out.append(part)
|
||||
else:
|
||||
x1, y1, x2, y2 = m.groups()
|
||||
x1 = colname2num(x1)
|
||||
if x2 is None:
|
||||
s = "cell(%s, %s)" % (x1, y1)
|
||||
else:
|
||||
x2 = colname2num(x2)
|
||||
s = "cells(%s, %s, %s, %s)" % (x1, y1, x2, y2)
|
||||
out.append(s)
|
||||
return "".join(out)
|
||||
|
||||
def cellname(x, y):
|
||||
"Translate a cell coordinate to a fancy cell name (e.g. (1, 1)->'A1')."
|
||||
assert x > 0 # Column 0 has an empty name, so can't use that
|
||||
return colnum2name(x) + str(y)
|
||||
|
||||
def colname2num(s):
|
||||
"Translate a column name to number (e.g. 'A'->1, 'Z'->26, 'AA'->27)."
|
||||
s = s.upper()
|
||||
n = 0
|
||||
for c in s:
|
||||
assert 'A' <= c <= 'Z'
|
||||
n = n*26 + ord(c) - ord('A') + 1
|
||||
return n
|
||||
|
||||
def colnum2name(n):
|
||||
"Translate a column number to name (e.g. 1->'A', etc.)."
|
||||
assert n > 0
|
||||
s = ""
|
||||
while n:
|
||||
n, m = divmod(n-1, 26)
|
||||
s = chr(m+ord('A')) + s
|
||||
return s
|
||||
|
||||
import tkinter as Tk
|
||||
|
||||
class SheetGUI:
|
||||
|
||||
"""Beginnings of a GUI for a spreadsheet.
|
||||
|
||||
TO DO:
|
||||
- clear multiple cells
|
||||
- Insert, clear, remove rows or columns
|
||||
- Show new contents while typing
|
||||
- Scroll bars
|
||||
- Grow grid when window is grown
|
||||
- Proper menus
|
||||
- Undo, redo
|
||||
- Cut, copy and paste
|
||||
- Formatting and alignment
|
||||
"""
|
||||
|
||||
def __init__(self, filename="sheet1.xml", rows=10, columns=5):
|
||||
"""Constructor.
|
||||
|
||||
Load the sheet from the filename argument.
|
||||
Set up the Tk widget tree.
|
||||
"""
|
||||
# Create and load the sheet
|
||||
self.filename = filename
|
||||
self.sheet = Sheet()
|
||||
if os.path.isfile(filename):
|
||||
self.sheet.load(filename)
|
||||
# Calculate the needed grid size
|
||||
maxx, maxy = self.sheet.getsize()
|
||||
rows = max(rows, maxy)
|
||||
columns = max(columns, maxx)
|
||||
# Create the widgets
|
||||
self.root = Tk.Tk()
|
||||
self.root.wm_title("Spreadsheet: %s" % self.filename)
|
||||
self.beacon = Tk.Label(self.root, text="A1",
|
||||
font=('helvetica', 16, 'bold'))
|
||||
self.entry = Tk.Entry(self.root)
|
||||
self.savebutton = Tk.Button(self.root, text="Save",
|
||||
command=self.save)
|
||||
self.cellgrid = Tk.Frame(self.root)
|
||||
# Configure the widget lay-out
|
||||
self.cellgrid.pack(side="bottom", expand=1, fill="both")
|
||||
self.beacon.pack(side="left")
|
||||
self.savebutton.pack(side="right")
|
||||
self.entry.pack(side="left", expand=1, fill="x")
|
||||
# Bind some events
|
||||
self.entry.bind("<Return>", self.return_event)
|
||||
self.entry.bind("<Shift-Return>", self.shift_return_event)
|
||||
self.entry.bind("<Tab>", self.tab_event)
|
||||
self.entry.bind("<Shift-Tab>", self.shift_tab_event)
|
||||
self.entry.bind("<Delete>", self.delete_event)
|
||||
self.entry.bind("<Escape>", self.escape_event)
|
||||
# Now create the cell grid
|
||||
self.makegrid(rows, columns)
|
||||
# Select the top-left cell
|
||||
self.currentxy = None
|
||||
self.cornerxy = None
|
||||
self.setcurrent(1, 1)
|
||||
# Copy the sheet cells to the GUI cells
|
||||
self.sync()
|
||||
|
||||
def delete_event(self, event):
|
||||
if self.cornerxy != self.currentxy and self.cornerxy is not None:
|
||||
self.sheet.clearcells(*(self.currentxy + self.cornerxy))
|
||||
else:
|
||||
self.sheet.clearcell(*self.currentxy)
|
||||
self.sync()
|
||||
self.entry.delete(0, 'end')
|
||||
return "break"
|
||||
|
||||
def escape_event(self, event):
|
||||
x, y = self.currentxy
|
||||
self.load_entry(x, y)
|
||||
|
||||
def load_entry(self, x, y):
|
||||
cell = self.sheet.getcell(x, y)
|
||||
if cell is None:
|
||||
text = ""
|
||||
elif isinstance(cell, FormulaCell):
|
||||
text = '=' + cell.formula
|
||||
else:
|
||||
text, alignment = cell.format()
|
||||
self.entry.delete(0, 'end')
|
||||
self.entry.insert(0, text)
|
||||
self.entry.selection_range(0, 'end')
|
||||
|
||||
def makegrid(self, rows, columns):
|
||||
"""Helper to create the grid of GUI cells.
|
||||
|
||||
The edge (x==0 or y==0) is filled with labels; the rest is real cells.
|
||||
"""
|
||||
self.rows = rows
|
||||
self.columns = columns
|
||||
self.gridcells = {}
|
||||
# Create the top left corner cell (which selects all)
|
||||
cell = Tk.Label(self.cellgrid, relief='raised')
|
||||
cell.grid_configure(column=0, row=0, sticky='NSWE')
|
||||
cell.bind("<ButtonPress-1>", self.selectall)
|
||||
# Create the top row of labels, and configure the grid columns
|
||||
for x in range(1, columns+1):
|
||||
self.cellgrid.grid_columnconfigure(x, minsize=64)
|
||||
cell = Tk.Label(self.cellgrid, text=colnum2name(x), relief='raised')
|
||||
cell.grid_configure(column=x, row=0, sticky='WE')
|
||||
self.gridcells[x, 0] = cell
|
||||
cell.__x = x
|
||||
cell.__y = 0
|
||||
cell.bind("<ButtonPress-1>", self.selectcolumn)
|
||||
cell.bind("<B1-Motion>", self.extendcolumn)
|
||||
cell.bind("<ButtonRelease-1>", self.extendcolumn)
|
||||
cell.bind("<Shift-Button-1>", self.extendcolumn)
|
||||
# Create the leftmost column of labels
|
||||
for y in range(1, rows+1):
|
||||
cell = Tk.Label(self.cellgrid, text=str(y), relief='raised')
|
||||
cell.grid_configure(column=0, row=y, sticky='WE')
|
||||
self.gridcells[0, y] = cell
|
||||
cell.__x = 0
|
||||
cell.__y = y
|
||||
cell.bind("<ButtonPress-1>", self.selectrow)
|
||||
cell.bind("<B1-Motion>", self.extendrow)
|
||||
cell.bind("<ButtonRelease-1>", self.extendrow)
|
||||
cell.bind("<Shift-Button-1>", self.extendrow)
|
||||
# Create the real cells
|
||||
for x in range(1, columns+1):
|
||||
for y in range(1, rows+1):
|
||||
cell = Tk.Label(self.cellgrid, relief='sunken',
|
||||
bg='white', fg='black')
|
||||
cell.grid_configure(column=x, row=y, sticky='NSWE')
|
||||
self.gridcells[x, y] = cell
|
||||
cell.__x = x
|
||||
cell.__y = y
|
||||
# Bind mouse events
|
||||
cell.bind("<ButtonPress-1>", self.press)
|
||||
cell.bind("<B1-Motion>", self.motion)
|
||||
cell.bind("<ButtonRelease-1>", self.release)
|
||||
cell.bind("<Shift-Button-1>", self.release)
|
||||
|
||||
def selectall(self, event):
|
||||
self.setcurrent(1, 1)
|
||||
self.setcorner(sys.maxsize, sys.maxsize)
|
||||
|
||||
def selectcolumn(self, event):
|
||||
x, y = self.whichxy(event)
|
||||
self.setcurrent(x, 1)
|
||||
self.setcorner(x, sys.maxsize)
|
||||
|
||||
def extendcolumn(self, event):
|
||||
x, y = self.whichxy(event)
|
||||
if x > 0:
|
||||
self.setcurrent(self.currentxy[0], 1)
|
||||
self.setcorner(x, sys.maxsize)
|
||||
|
||||
def selectrow(self, event):
|
||||
x, y = self.whichxy(event)
|
||||
self.setcurrent(1, y)
|
||||
self.setcorner(sys.maxsize, y)
|
||||
|
||||
def extendrow(self, event):
|
||||
x, y = self.whichxy(event)
|
||||
if y > 0:
|
||||
self.setcurrent(1, self.currentxy[1])
|
||||
self.setcorner(sys.maxsize, y)
|
||||
|
||||
def press(self, event):
|
||||
x, y = self.whichxy(event)
|
||||
if x > 0 and y > 0:
|
||||
self.setcurrent(x, y)
|
||||
|
||||
def motion(self, event):
|
||||
x, y = self.whichxy(event)
|
||||
if x > 0 and y > 0:
|
||||
self.setcorner(x, y)
|
||||
|
||||
release = motion
|
||||
|
||||
def whichxy(self, event):
|
||||
w = self.cellgrid.winfo_containing(event.x_root, event.y_root)
|
||||
if w is not None and isinstance(w, Tk.Label):
|
||||
try:
|
||||
return w.__x, w.__y
|
||||
except AttributeError:
|
||||
pass
|
||||
return 0, 0
|
||||
|
||||
def save(self):
|
||||
self.sheet.save(self.filename)
|
||||
|
||||
def setcurrent(self, x, y):
|
||||
"Make (x, y) the current cell."
|
||||
if self.currentxy is not None:
|
||||
self.change_cell()
|
||||
self.clearfocus()
|
||||
self.beacon['text'] = cellname(x, y)
|
||||
self.load_entry(x, y)
|
||||
self.entry.focus_set()
|
||||
self.currentxy = x, y
|
||||
self.cornerxy = None
|
||||
gridcell = self.gridcells.get(self.currentxy)
|
||||
if gridcell is not None:
|
||||
gridcell['bg'] = 'yellow'
|
||||
|
||||
def setcorner(self, x, y):
|
||||
if self.currentxy is None or self.currentxy == (x, y):
|
||||
self.setcurrent(x, y)
|
||||
return
|
||||
self.clearfocus()
|
||||
self.cornerxy = x, y
|
||||
x1, y1 = self.currentxy
|
||||
x2, y2 = self.cornerxy or self.currentxy
|
||||
if x1 > x2:
|
||||
x1, x2 = x2, x1
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
for (x, y), cell in self.gridcells.items():
|
||||
if x1 <= x <= x2 and y1 <= y <= y2:
|
||||
cell['bg'] = 'lightBlue'
|
||||
gridcell = self.gridcells.get(self.currentxy)
|
||||
if gridcell is not None:
|
||||
gridcell['bg'] = 'yellow'
|
||||
self.setbeacon(x1, y1, x2, y2)
|
||||
|
||||
def setbeacon(self, x1, y1, x2, y2):
|
||||
if x1 == y1 == 1 and x2 == y2 == sys.maxsize:
|
||||
name = ":"
|
||||
elif (x1, x2) == (1, sys.maxsize):
|
||||
if y1 == y2:
|
||||
name = "%d" % y1
|
||||
else:
|
||||
name = "%d:%d" % (y1, y2)
|
||||
elif (y1, y2) == (1, sys.maxsize):
|
||||
if x1 == x2:
|
||||
name = "%s" % colnum2name(x1)
|
||||
else:
|
||||
name = "%s:%s" % (colnum2name(x1), colnum2name(x2))
|
||||
else:
|
||||
name1 = cellname(*self.currentxy)
|
||||
name2 = cellname(*self.cornerxy)
|
||||
name = "%s:%s" % (name1, name2)
|
||||
self.beacon['text'] = name
|
||||
|
||||
|
||||
def clearfocus(self):
|
||||
if self.currentxy is not None:
|
||||
x1, y1 = self.currentxy
|
||||
x2, y2 = self.cornerxy or self.currentxy
|
||||
if x1 > x2:
|
||||
x1, x2 = x2, x1
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
for (x, y), cell in self.gridcells.items():
|
||||
if x1 <= x <= x2 and y1 <= y <= y2:
|
||||
cell['bg'] = 'white'
|
||||
|
||||
def return_event(self, event):
|
||||
"Callback for the Return key."
|
||||
self.change_cell()
|
||||
x, y = self.currentxy
|
||||
self.setcurrent(x, y+1)
|
||||
return "break"
|
||||
|
||||
def shift_return_event(self, event):
|
||||
"Callback for the Return key with Shift modifier."
|
||||
self.change_cell()
|
||||
x, y = self.currentxy
|
||||
self.setcurrent(x, max(1, y-1))
|
||||
return "break"
|
||||
|
||||
def tab_event(self, event):
|
||||
"Callback for the Tab key."
|
||||
self.change_cell()
|
||||
x, y = self.currentxy
|
||||
self.setcurrent(x+1, y)
|
||||
return "break"
|
||||
|
||||
def shift_tab_event(self, event):
|
||||
"Callback for the Tab key with Shift modifier."
|
||||
self.change_cell()
|
||||
x, y = self.currentxy
|
||||
self.setcurrent(max(1, x-1), y)
|
||||
return "break"
|
||||
|
||||
def change_cell(self):
|
||||
"Set the current cell from the entry widget."
|
||||
x, y = self.currentxy
|
||||
text = self.entry.get()
|
||||
cell = None
|
||||
if text.startswith('='):
|
||||
cell = FormulaCell(text[1:])
|
||||
else:
|
||||
for cls in int, float, complex:
|
||||
try:
|
||||
value = cls(text)
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
else:
|
||||
cell = NumericCell(value)
|
||||
break
|
||||
if cell is None and text:
|
||||
cell = StringCell(text)
|
||||
if cell is None:
|
||||
self.sheet.clearcell(x, y)
|
||||
else:
|
||||
self.sheet.setcell(x, y, cell)
|
||||
self.sync()
|
||||
|
||||
def sync(self):
|
||||
"Fill the GUI cells from the sheet cells."
|
||||
self.sheet.recalc()
|
||||
for (x, y), gridcell in self.gridcells.items():
|
||||
if x == 0 or y == 0:
|
||||
continue
|
||||
cell = self.sheet.getcell(x, y)
|
||||
if cell is None:
|
||||
gridcell['text'] = ""
|
||||
else:
|
||||
if hasattr(cell, 'format'):
|
||||
text, alignment = cell.format()
|
||||
else:
|
||||
text, alignment = str(cell), LEFT
|
||||
gridcell['text'] = text
|
||||
gridcell['anchor'] = align2anchor[alignment]
|
||||
|
||||
|
||||
def test_basic():
|
||||
"Basic non-gui self-test."
|
||||
a = Sheet()
|
||||
for x in range(1, 11):
|
||||
for y in range(1, 11):
|
||||
if x == 1:
|
||||
cell = NumericCell(y)
|
||||
elif y == 1:
|
||||
cell = NumericCell(x)
|
||||
else:
|
||||
c1 = cellname(x, 1)
|
||||
c2 = cellname(1, y)
|
||||
formula = "%s*%s" % (c1, c2)
|
||||
cell = FormulaCell(formula)
|
||||
a.setcell(x, y, cell)
|
||||
## if os.path.isfile("sheet1.xml"):
|
||||
## print "Loading from sheet1.xml"
|
||||
## a.load("sheet1.xml")
|
||||
a.display()
|
||||
a.save("sheet1.xml")
|
||||
|
||||
def test_gui():
|
||||
"GUI test."
|
||||
if sys.argv[1:]:
|
||||
filename = sys.argv[1]
|
||||
else:
|
||||
filename = "sheet1.xml"
|
||||
g = SheetGUI(filename)
|
||||
g.root.mainloop()
|
||||
|
||||
if __name__ == '__main__':
|
||||
#test_basic()
|
||||
test_gui()
|
||||
74
Tools/demo/vector.py
Normal file
74
Tools/demo/vector.py
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
A demonstration of classes and their special methods in Python.
|
||||
"""
|
||||
|
||||
class Vec:
|
||||
"""A simple vector class.
|
||||
|
||||
Instances of the Vec class can be constructed from numbers
|
||||
|
||||
>>> a = Vec(1, 2, 3)
|
||||
>>> b = Vec(3, 2, 1)
|
||||
|
||||
added
|
||||
>>> a + b
|
||||
Vec(4, 4, 4)
|
||||
|
||||
subtracted
|
||||
>>> a - b
|
||||
Vec(-2, 0, 2)
|
||||
|
||||
and multiplied by a scalar on the left
|
||||
>>> 3.0 * a
|
||||
Vec(3.0, 6.0, 9.0)
|
||||
|
||||
or on the right
|
||||
>>> a * 3.0
|
||||
Vec(3.0, 6.0, 9.0)
|
||||
"""
|
||||
def __init__(self, *v):
|
||||
self.v = list(v)
|
||||
|
||||
@classmethod
|
||||
def fromlist(cls, v):
|
||||
if not isinstance(v, list):
|
||||
raise TypeError
|
||||
inst = cls()
|
||||
inst.v = v
|
||||
return inst
|
||||
|
||||
def __repr__(self):
|
||||
args = ', '.join(repr(x) for x in self.v)
|
||||
return 'Vec({})'.format(args)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.v)
|
||||
|
||||
def __getitem__(self, i):
|
||||
return self.v[i]
|
||||
|
||||
def __add__(self, other):
|
||||
# Element-wise addition
|
||||
v = [x + y for x, y in zip(self.v, other.v)]
|
||||
return Vec.fromlist(v)
|
||||
|
||||
def __sub__(self, other):
|
||||
# Element-wise subtraction
|
||||
v = [x - y for x, y in zip(self.v, other.v)]
|
||||
return Vec.fromlist(v)
|
||||
|
||||
def __mul__(self, scalar):
|
||||
# Multiply by scalar
|
||||
v = [x * scalar for x in self.v]
|
||||
return Vec.fromlist(v)
|
||||
|
||||
__rmul__ = __mul__
|
||||
|
||||
|
||||
def test():
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
|
||||
test()
|
||||
150
Tools/i18n/makelocalealias.py
Normal file
150
Tools/i18n/makelocalealias.py
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Convert the X11 locale.alias file into a mapping dictionary suitable
|
||||
for locale.py.
|
||||
|
||||
Written by Marc-Andre Lemburg <mal@genix.com>, 2004-12-10.
|
||||
|
||||
"""
|
||||
import locale
|
||||
import sys
|
||||
_locale = locale
|
||||
|
||||
# Location of the X11 alias file.
|
||||
LOCALE_ALIAS = '/usr/share/X11/locale/locale.alias'
|
||||
# Location of the glibc SUPPORTED locales file.
|
||||
SUPPORTED = '/usr/share/i18n/SUPPORTED'
|
||||
|
||||
def parse(filename):
|
||||
|
||||
with open(filename, encoding='latin1') as f:
|
||||
lines = list(f)
|
||||
data = {}
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
if line[:1] == '#':
|
||||
continue
|
||||
locale, alias = line.split()
|
||||
# Fix non-standard locale names, e.g. ks_IN@devanagari.UTF-8
|
||||
if '@' in alias:
|
||||
alias_lang, _, alias_mod = alias.partition('@')
|
||||
if '.' in alias_mod:
|
||||
alias_mod, _, alias_enc = alias_mod.partition('.')
|
||||
alias = alias_lang + '.' + alias_enc + '@' + alias_mod
|
||||
# Strip ':'
|
||||
if locale[-1] == ':':
|
||||
locale = locale[:-1]
|
||||
# Lower-case locale
|
||||
locale = locale.lower()
|
||||
# Ignore one letter locale mappings (except for 'c')
|
||||
if len(locale) == 1 and locale != 'c':
|
||||
continue
|
||||
# Normalize encoding, if given
|
||||
if '.' in locale:
|
||||
lang, encoding = locale.split('.')[:2]
|
||||
encoding = encoding.replace('-', '')
|
||||
encoding = encoding.replace('_', '')
|
||||
locale = lang + '.' + encoding
|
||||
data[locale] = alias
|
||||
return data
|
||||
|
||||
def parse_glibc_supported(filename):
|
||||
|
||||
with open(filename, encoding='latin1') as f:
|
||||
lines = list(f)
|
||||
data = {}
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
if line[:1] == '#':
|
||||
continue
|
||||
line = line.replace('/', ' ').strip()
|
||||
line = line.rstrip('\\').rstrip()
|
||||
words = line.split()
|
||||
if len(words) != 2:
|
||||
continue
|
||||
alias, alias_encoding = words
|
||||
# Lower-case locale
|
||||
locale = alias.lower()
|
||||
# Normalize encoding, if given
|
||||
if '.' in locale:
|
||||
lang, encoding = locale.split('.')[:2]
|
||||
encoding = encoding.replace('-', '')
|
||||
encoding = encoding.replace('_', '')
|
||||
locale = lang + '.' + encoding
|
||||
# Add an encoding to alias
|
||||
alias, _, modifier = alias.partition('@')
|
||||
alias = _locale._replace_encoding(alias, alias_encoding)
|
||||
if modifier and not (modifier == 'euro' and alias_encoding == 'ISO-8859-15'):
|
||||
alias += '@' + modifier
|
||||
data[locale] = alias
|
||||
return data
|
||||
|
||||
def pprint(data):
|
||||
items = sorted(data.items())
|
||||
for k, v in items:
|
||||
print(' %-40s%a,' % ('%a:' % k, v))
|
||||
|
||||
def print_differences(data, olddata):
|
||||
items = sorted(olddata.items())
|
||||
for k, v in items:
|
||||
if k not in data:
|
||||
print('# removed %a' % k)
|
||||
elif olddata[k] != data[k]:
|
||||
print('# updated %a -> %a to %a' % \
|
||||
(k, olddata[k], data[k]))
|
||||
# Additions are not mentioned
|
||||
|
||||
def optimize(data):
|
||||
locale_alias = locale.locale_alias
|
||||
locale.locale_alias = data.copy()
|
||||
for k, v in data.items():
|
||||
del locale.locale_alias[k]
|
||||
if locale.normalize(k) != v:
|
||||
locale.locale_alias[k] = v
|
||||
newdata = locale.locale_alias
|
||||
errors = check(data)
|
||||
locale.locale_alias = locale_alias
|
||||
if errors:
|
||||
sys.exit(1)
|
||||
return newdata
|
||||
|
||||
def check(data):
|
||||
# Check that all alias definitions from the X11 file
|
||||
# are actually mapped to the correct alias locales.
|
||||
errors = 0
|
||||
for k, v in data.items():
|
||||
if locale.normalize(k) != v:
|
||||
print('ERROR: %a -> %a != %a' % (k, locale.normalize(k), v),
|
||||
file=sys.stderr)
|
||||
errors += 1
|
||||
return errors
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--locale-alias', default=LOCALE_ALIAS,
|
||||
help='location of the X11 alias file '
|
||||
'(default: %a)' % LOCALE_ALIAS)
|
||||
parser.add_argument('--glibc-supported', default=SUPPORTED,
|
||||
help='location of the glibc SUPPORTED locales file '
|
||||
'(default: %a)' % SUPPORTED)
|
||||
args = parser.parse_args()
|
||||
|
||||
data = locale.locale_alias.copy()
|
||||
data.update(parse_glibc_supported(args.glibc_supported))
|
||||
data.update(parse(args.locale_alias))
|
||||
while True:
|
||||
# Repeat optimization while the size is decreased.
|
||||
n = len(data)
|
||||
data = optimize(data)
|
||||
if len(data) == n:
|
||||
break
|
||||
print_differences(data, locale.locale_alias)
|
||||
print()
|
||||
print('locale_alias = {')
|
||||
pprint(data)
|
||||
print('}')
|
||||
238
Tools/i18n/msgfmt.py
Normal file
238
Tools/i18n/msgfmt.py
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
#! /usr/bin/env python3
|
||||
# Written by Martin v. Löwis <loewis@informatik.hu-berlin.de>
|
||||
|
||||
"""Generate binary message catalog from textual translation description.
|
||||
|
||||
This program converts a textual Uniforum-style message catalog (.po file) into
|
||||
a binary GNU catalog (.mo file). This is essentially the same function as the
|
||||
GNU msgfmt program, however, it is a simpler implementation.
|
||||
|
||||
Usage: msgfmt.py [OPTIONS] filename.po
|
||||
|
||||
Options:
|
||||
-o file
|
||||
--output-file=file
|
||||
Specify the output file to write to. If omitted, output will go to a
|
||||
file named filename.mo (based off the input file name).
|
||||
|
||||
-h
|
||||
--help
|
||||
Print this message and exit.
|
||||
|
||||
-V
|
||||
--version
|
||||
Display version information and exit.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import ast
|
||||
import getopt
|
||||
import struct
|
||||
import array
|
||||
from email.parser import HeaderParser
|
||||
|
||||
__version__ = "1.1"
|
||||
|
||||
MESSAGES = {}
|
||||
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print(__doc__, file=sys.stderr)
|
||||
if msg:
|
||||
print(msg, file=sys.stderr)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
|
||||
def add(id, str, fuzzy):
|
||||
"Add a non-fuzzy translation to the dictionary."
|
||||
global MESSAGES
|
||||
if not fuzzy and str:
|
||||
MESSAGES[id] = str
|
||||
|
||||
|
||||
|
||||
def generate():
|
||||
"Return the generated output."
|
||||
global MESSAGES
|
||||
# the keys are sorted in the .mo file
|
||||
keys = sorted(MESSAGES.keys())
|
||||
offsets = []
|
||||
ids = strs = b''
|
||||
for id in keys:
|
||||
# For each string, we need size and file offset. Each string is NUL
|
||||
# terminated; the NUL does not count into the size.
|
||||
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
|
||||
ids += id + b'\0'
|
||||
strs += MESSAGES[id] + b'\0'
|
||||
output = ''
|
||||
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
|
||||
# the keys start right after the index tables.
|
||||
# translated string.
|
||||
keystart = 7*4+16*len(keys)
|
||||
# and the values start after the keys
|
||||
valuestart = keystart + len(ids)
|
||||
koffsets = []
|
||||
voffsets = []
|
||||
# The string table first has the list of keys, then the list of values.
|
||||
# Each entry has first the size of the string, then the file offset.
|
||||
for o1, l1, o2, l2 in offsets:
|
||||
koffsets += [l1, o1+keystart]
|
||||
voffsets += [l2, o2+valuestart]
|
||||
offsets = koffsets + voffsets
|
||||
output = struct.pack("Iiiiiii",
|
||||
0x950412de, # Magic
|
||||
0, # Version
|
||||
len(keys), # # of entries
|
||||
7*4, # start of key index
|
||||
7*4+len(keys)*8, # start of value index
|
||||
0, 0) # size and offset of hash table
|
||||
output += array.array("i", offsets).tobytes()
|
||||
output += ids
|
||||
output += strs
|
||||
return output
|
||||
|
||||
|
||||
|
||||
def make(filename, outfile):
|
||||
ID = 1
|
||||
STR = 2
|
||||
|
||||
# Compute .mo name from .po name and arguments
|
||||
if filename.endswith('.po'):
|
||||
infile = filename
|
||||
else:
|
||||
infile = filename + '.po'
|
||||
if outfile is None:
|
||||
outfile = os.path.splitext(infile)[0] + '.mo'
|
||||
|
||||
try:
|
||||
with open(infile, 'rb') as f:
|
||||
lines = f.readlines()
|
||||
except IOError as msg:
|
||||
print(msg, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
section = None
|
||||
fuzzy = 0
|
||||
|
||||
# Start off assuming Latin-1, so everything decodes without failure,
|
||||
# until we know the exact encoding
|
||||
encoding = 'latin-1'
|
||||
|
||||
# Parse the catalog
|
||||
lno = 0
|
||||
for l in lines:
|
||||
l = l.decode(encoding)
|
||||
lno += 1
|
||||
# If we get a comment line after a msgstr, this is a new entry
|
||||
if l[0] == '#' and section == STR:
|
||||
add(msgid, msgstr, fuzzy)
|
||||
section = None
|
||||
fuzzy = 0
|
||||
# Record a fuzzy mark
|
||||
if l[:2] == '#,' and 'fuzzy' in l:
|
||||
fuzzy = 1
|
||||
# Skip comments
|
||||
if l[0] == '#':
|
||||
continue
|
||||
# Now we are in a msgid section, output previous section
|
||||
if l.startswith('msgid') and not l.startswith('msgid_plural'):
|
||||
if section == STR:
|
||||
add(msgid, msgstr, fuzzy)
|
||||
if not msgid:
|
||||
# See whether there is an encoding declaration
|
||||
p = HeaderParser()
|
||||
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
|
||||
if charset:
|
||||
encoding = charset
|
||||
section = ID
|
||||
l = l[5:]
|
||||
msgid = msgstr = b''
|
||||
is_plural = False
|
||||
# This is a message with plural forms
|
||||
elif l.startswith('msgid_plural'):
|
||||
if section != ID:
|
||||
print('msgid_plural not preceded by msgid on %s:%d' % (infile, lno),
|
||||
file=sys.stderr)
|
||||
sys.exit(1)
|
||||
l = l[12:]
|
||||
msgid += b'\0' # separator of singular and plural
|
||||
is_plural = True
|
||||
# Now we are in a msgstr section
|
||||
elif l.startswith('msgstr'):
|
||||
section = STR
|
||||
if l.startswith('msgstr['):
|
||||
if not is_plural:
|
||||
print('plural without msgid_plural on %s:%d' % (infile, lno),
|
||||
file=sys.stderr)
|
||||
sys.exit(1)
|
||||
l = l.split(']', 1)[1]
|
||||
if msgstr:
|
||||
msgstr += b'\0' # Separator of the various plural forms
|
||||
else:
|
||||
if is_plural:
|
||||
print('indexed msgstr required for plural on %s:%d' % (infile, lno),
|
||||
file=sys.stderr)
|
||||
sys.exit(1)
|
||||
l = l[6:]
|
||||
# Skip empty lines
|
||||
l = l.strip()
|
||||
if not l:
|
||||
continue
|
||||
l = ast.literal_eval(l)
|
||||
if section == ID:
|
||||
msgid += l.encode(encoding)
|
||||
elif section == STR:
|
||||
msgstr += l.encode(encoding)
|
||||
else:
|
||||
print('Syntax error on %s:%d' % (infile, lno), \
|
||||
'before:', file=sys.stderr)
|
||||
print(l, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
# Add last entry
|
||||
if section == STR:
|
||||
add(msgid, msgstr, fuzzy)
|
||||
|
||||
# Compute output
|
||||
output = generate()
|
||||
|
||||
try:
|
||||
with open(outfile,"wb") as f:
|
||||
f.write(output)
|
||||
except IOError as msg:
|
||||
print(msg, file=sys.stderr)
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'hVo:',
|
||||
['help', 'version', 'output-file='])
|
||||
except getopt.error as msg:
|
||||
usage(1, msg)
|
||||
|
||||
outfile = None
|
||||
# parse options
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-V', '--version'):
|
||||
print("msgfmt.py", __version__)
|
||||
sys.exit(0)
|
||||
elif opt in ('-o', '--output-file'):
|
||||
outfile = arg
|
||||
# do it
|
||||
if not args:
|
||||
print('No input file given', file=sys.stderr)
|
||||
print("Try `msgfmt --help' for more information.", file=sys.stderr)
|
||||
return
|
||||
|
||||
for filename in args:
|
||||
make(filename, outfile)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
631
Tools/i18n/pygettext.py
Normal file
631
Tools/i18n/pygettext.py
Normal file
|
|
@ -0,0 +1,631 @@
|
|||
#! /usr/bin/env python3
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
# Originally written by Barry Warsaw <barry@python.org>
|
||||
#
|
||||
# Minimally patched to make it even more xgettext compatible
|
||||
# by Peter Funk <pf@artcom-gmbh.de>
|
||||
#
|
||||
# 2002-11-22 Jürgen Hermann <jh@web.de>
|
||||
# Added checks that _() only contains string literals, and
|
||||
# command line args are resolved to module lists, i.e. you
|
||||
# can now pass a filename, a module or package name, or a
|
||||
# directory (including globbing chars, important for Win32).
|
||||
# Made docstring fit in 80 chars wide displays using pydoc.
|
||||
#
|
||||
|
||||
# for selftesting
|
||||
try:
|
||||
import fintl
|
||||
_ = fintl.gettext
|
||||
except ImportError:
|
||||
_ = lambda s: s
|
||||
|
||||
__doc__ = _("""pygettext -- Python equivalent of xgettext(1)
|
||||
|
||||
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
|
||||
internationalization of C programs. Most of these tools are independent of
|
||||
the programming language and can be used from within Python programs.
|
||||
Martin von Loewis' work[1] helps considerably in this regard.
|
||||
|
||||
There's one problem though; xgettext is the program that scans source code
|
||||
looking for message strings, but it groks only C (or C++). Python
|
||||
introduces a few wrinkles, such as dual quoting characters, triple quoted
|
||||
strings, and raw strings. xgettext understands none of this.
|
||||
|
||||
Enter pygettext, which uses Python's standard tokenize module to scan
|
||||
Python source code, generating .pot files identical to what GNU xgettext[2]
|
||||
generates for C and C++ code. From there, the standard GNU tools can be
|
||||
used.
|
||||
|
||||
A word about marking Python strings as candidates for translation. GNU
|
||||
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
|
||||
and gettext_noop. But those can be a lot of text to include all over your
|
||||
code. C and C++ have a trick: they use the C preprocessor. Most
|
||||
internationalized C source includes a #define for gettext() to _() so that
|
||||
what has to be written in the source is much less. Thus these are both
|
||||
translatable strings:
|
||||
|
||||
gettext("Translatable String")
|
||||
_("Translatable String")
|
||||
|
||||
Python of course has no preprocessor so this doesn't work so well. Thus,
|
||||
pygettext searches only for _() by default, but see the -k/--keyword flag
|
||||
below for how to augment this.
|
||||
|
||||
[1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
|
||||
[2] http://www.gnu.org/software/gettext/gettext.html
|
||||
|
||||
NOTE: pygettext attempts to be option and feature compatible with GNU
|
||||
xgettext where ever possible. However some options are still missing or are
|
||||
not fully implemented. Also, xgettext's use of command line switches with
|
||||
option arguments is broken, and in these cases, pygettext just defines
|
||||
additional switches.
|
||||
|
||||
Usage: pygettext [options] inputfile ...
|
||||
|
||||
Options:
|
||||
|
||||
-a
|
||||
--extract-all
|
||||
Extract all strings.
|
||||
|
||||
-d name
|
||||
--default-domain=name
|
||||
Rename the default output file from messages.pot to name.pot.
|
||||
|
||||
-E
|
||||
--escape
|
||||
Replace non-ASCII characters with octal escape sequences.
|
||||
|
||||
-D
|
||||
--docstrings
|
||||
Extract module, class, method, and function docstrings. These do
|
||||
not need to be wrapped in _() markers, and in fact cannot be for
|
||||
Python to consider them docstrings. (See also the -X option).
|
||||
|
||||
-h
|
||||
--help
|
||||
Print this help message and exit.
|
||||
|
||||
-k word
|
||||
--keyword=word
|
||||
Keywords to look for in addition to the default set, which are:
|
||||
%(DEFAULTKEYWORDS)s
|
||||
|
||||
You can have multiple -k flags on the command line.
|
||||
|
||||
-K
|
||||
--no-default-keywords
|
||||
Disable the default set of keywords (see above). Any keywords
|
||||
explicitly added with the -k/--keyword option are still recognized.
|
||||
|
||||
--no-location
|
||||
Do not write filename/lineno location comments.
|
||||
|
||||
-n
|
||||
--add-location
|
||||
Write filename/lineno location comments indicating where each
|
||||
extracted string is found in the source. These lines appear before
|
||||
each msgid. The style of comments is controlled by the -S/--style
|
||||
option. This is the default.
|
||||
|
||||
-o filename
|
||||
--output=filename
|
||||
Rename the default output file from messages.pot to filename. If
|
||||
filename is `-' then the output is sent to standard out.
|
||||
|
||||
-p dir
|
||||
--output-dir=dir
|
||||
Output files will be placed in directory dir.
|
||||
|
||||
-S stylename
|
||||
--style stylename
|
||||
Specify which style to use for location comments. Two styles are
|
||||
supported:
|
||||
|
||||
Solaris # File: filename, line: line-number
|
||||
GNU #: filename:line
|
||||
|
||||
The style name is case insensitive. GNU style is the default.
|
||||
|
||||
-v
|
||||
--verbose
|
||||
Print the names of the files being processed.
|
||||
|
||||
-V
|
||||
--version
|
||||
Print the version of pygettext and exit.
|
||||
|
||||
-w columns
|
||||
--width=columns
|
||||
Set width of output to columns.
|
||||
|
||||
-x filename
|
||||
--exclude-file=filename
|
||||
Specify a file that contains a list of strings that are not be
|
||||
extracted from the input files. Each string to be excluded must
|
||||
appear on a line by itself in the file.
|
||||
|
||||
-X filename
|
||||
--no-docstrings=filename
|
||||
Specify a file that contains a list of files (one per line) that
|
||||
should not have their docstrings extracted. This is only useful in
|
||||
conjunction with the -D option above.
|
||||
|
||||
If `inputfile' is -, standard input is read.
|
||||
""")
|
||||
|
||||
import os
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
import sys
|
||||
import glob
|
||||
import time
|
||||
import getopt
|
||||
import token
|
||||
import tokenize
|
||||
|
||||
__version__ = '1.5'
|
||||
|
||||
default_keywords = ['_']
|
||||
DEFAULTKEYWORDS = ', '.join(default_keywords)
|
||||
|
||||
EMPTYSTRING = ''
|
||||
|
||||
|
||||
|
||||
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
|
||||
# there.
|
||||
pot_header = _('''\
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR ORGANIZATION
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\\n"
|
||||
"POT-Creation-Date: %(time)s\\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\\n"
|
||||
"MIME-Version: 1.0\\n"
|
||||
"Content-Type: text/plain; charset=%(charset)s\\n"
|
||||
"Content-Transfer-Encoding: %(encoding)s\\n"
|
||||
"Generated-By: pygettext.py %(version)s\\n"
|
||||
|
||||
''')
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print(__doc__ % globals(), file=sys.stderr)
|
||||
if msg:
|
||||
print(msg, file=sys.stderr)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
|
||||
def make_escapes(pass_nonascii):
|
||||
global escapes, escape
|
||||
if pass_nonascii:
|
||||
# Allow non-ascii characters to pass through so that e.g. 'msgid
|
||||
# "Höhe"' would result not result in 'msgid "H\366he"'. Otherwise we
|
||||
# escape any character outside the 32..126 range.
|
||||
mod = 128
|
||||
escape = escape_ascii
|
||||
else:
|
||||
mod = 256
|
||||
escape = escape_nonascii
|
||||
escapes = [r"\%03o" % i for i in range(mod)]
|
||||
for i in range(32, 127):
|
||||
escapes[i] = chr(i)
|
||||
escapes[ord('\\')] = r'\\'
|
||||
escapes[ord('\t')] = r'\t'
|
||||
escapes[ord('\r')] = r'\r'
|
||||
escapes[ord('\n')] = r'\n'
|
||||
escapes[ord('\"')] = r'\"'
|
||||
|
||||
|
||||
def escape_ascii(s, encoding):
|
||||
return ''.join(escapes[ord(c)] if ord(c) < 128 else c for c in s)
|
||||
|
||||
def escape_nonascii(s, encoding):
|
||||
return ''.join(escapes[b] for b in s.encode(encoding))
|
||||
|
||||
|
||||
def is_literal_string(s):
|
||||
return s[0] in '\'"' or (s[0] in 'rRuU' and s[1] in '\'"')
|
||||
|
||||
|
||||
def safe_eval(s):
|
||||
# unwrap quotes, safely
|
||||
return eval(s, {'__builtins__':{}}, {})
|
||||
|
||||
|
||||
def normalize(s, encoding):
|
||||
# This converts the various Python string types into a format that is
|
||||
# appropriate for .po files, namely much closer to C style.
|
||||
lines = s.split('\n')
|
||||
if len(lines) == 1:
|
||||
s = '"' + escape(s, encoding) + '"'
|
||||
else:
|
||||
if not lines[-1]:
|
||||
del lines[-1]
|
||||
lines[-1] = lines[-1] + '\n'
|
||||
for i in range(len(lines)):
|
||||
lines[i] = escape(lines[i], encoding)
|
||||
lineterm = '\\n"\n"'
|
||||
s = '""\n"' + lineterm.join(lines) + '"'
|
||||
return s
|
||||
|
||||
|
||||
def containsAny(str, set):
|
||||
"""Check whether 'str' contains ANY of the chars in 'set'"""
|
||||
return 1 in [c in str for c in set]
|
||||
|
||||
|
||||
def getFilesForName(name):
|
||||
"""Get a list of module files for a filename, a module or package name,
|
||||
or a directory.
|
||||
"""
|
||||
if not os.path.exists(name):
|
||||
# check for glob chars
|
||||
if containsAny(name, "*?[]"):
|
||||
files = glob.glob(name)
|
||||
list = []
|
||||
for file in files:
|
||||
list.extend(getFilesForName(file))
|
||||
return list
|
||||
|
||||
# try to find module or package
|
||||
try:
|
||||
spec = importlib.util.find_spec(name)
|
||||
name = spec.origin
|
||||
except ImportError:
|
||||
name = None
|
||||
if not name:
|
||||
return []
|
||||
|
||||
if os.path.isdir(name):
|
||||
# find all python files in directory
|
||||
list = []
|
||||
# get extension for python source files
|
||||
_py_ext = importlib.machinery.SOURCE_SUFFIXES[0]
|
||||
for root, dirs, files in os.walk(name):
|
||||
# don't recurse into CVS directories
|
||||
if 'CVS' in dirs:
|
||||
dirs.remove('CVS')
|
||||
# add all *.py files to list
|
||||
list.extend(
|
||||
[os.path.join(root, file) for file in files
|
||||
if os.path.splitext(file)[1] == _py_ext]
|
||||
)
|
||||
return list
|
||||
elif os.path.exists(name):
|
||||
# a single file
|
||||
return [name]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
class TokenEater:
|
||||
def __init__(self, options):
|
||||
self.__options = options
|
||||
self.__messages = {}
|
||||
self.__state = self.__waiting
|
||||
self.__data = []
|
||||
self.__lineno = -1
|
||||
self.__freshmodule = 1
|
||||
self.__curfile = None
|
||||
self.__enclosurecount = 0
|
||||
|
||||
def __call__(self, ttype, tstring, stup, etup, line):
|
||||
# dispatch
|
||||
## import token
|
||||
## print('ttype:', token.tok_name[ttype], 'tstring:', tstring,
|
||||
## file=sys.stderr)
|
||||
self.__state(ttype, tstring, stup[0])
|
||||
|
||||
def __waiting(self, ttype, tstring, lineno):
|
||||
opts = self.__options
|
||||
# Do docstring extractions, if enabled
|
||||
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
|
||||
# module docstring?
|
||||
if self.__freshmodule:
|
||||
if ttype == tokenize.STRING and is_literal_string(tstring):
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__freshmodule = 0
|
||||
elif ttype not in (tokenize.COMMENT, tokenize.NL):
|
||||
self.__freshmodule = 0
|
||||
return
|
||||
# class or func/method docstring?
|
||||
if ttype == tokenize.NAME and tstring in ('class', 'def'):
|
||||
self.__state = self.__suiteseen
|
||||
return
|
||||
if ttype == tokenize.NAME and tstring in opts.keywords:
|
||||
self.__state = self.__keywordseen
|
||||
|
||||
def __suiteseen(self, ttype, tstring, lineno):
|
||||
# skip over any enclosure pairs until we see the colon
|
||||
if ttype == tokenize.OP:
|
||||
if tstring == ':' and self.__enclosurecount == 0:
|
||||
# we see a colon and we're not in an enclosure: end of def
|
||||
self.__state = self.__suitedocstring
|
||||
elif tstring in '([{':
|
||||
self.__enclosurecount += 1
|
||||
elif tstring in ')]}':
|
||||
self.__enclosurecount -= 1
|
||||
|
||||
def __suitedocstring(self, ttype, tstring, lineno):
|
||||
# ignore any intervening noise
|
||||
if ttype == tokenize.STRING and is_literal_string(tstring):
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__state = self.__waiting
|
||||
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
|
||||
tokenize.COMMENT):
|
||||
# there was no class docstring
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __keywordseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == '(':
|
||||
self.__data = []
|
||||
self.__lineno = lineno
|
||||
self.__state = self.__openseen
|
||||
else:
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __openseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == ')':
|
||||
# We've seen the last of the translatable strings. Record the
|
||||
# line number of the first line of the strings and update the list
|
||||
# of messages seen. Reset state for the next batch. If there
|
||||
# were no strings inside _(), then just ignore this entry.
|
||||
if self.__data:
|
||||
self.__addentry(EMPTYSTRING.join(self.__data))
|
||||
self.__state = self.__waiting
|
||||
elif ttype == tokenize.STRING and is_literal_string(tstring):
|
||||
self.__data.append(safe_eval(tstring))
|
||||
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
|
||||
token.NEWLINE, tokenize.NL]:
|
||||
# warn if we see anything else than STRING or whitespace
|
||||
print(_(
|
||||
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
|
||||
) % {
|
||||
'token': tstring,
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno
|
||||
}, file=sys.stderr)
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __addentry(self, msg, lineno=None, isdocstring=0):
|
||||
if lineno is None:
|
||||
lineno = self.__lineno
|
||||
if not msg in self.__options.toexclude:
|
||||
entry = (self.__curfile, lineno)
|
||||
self.__messages.setdefault(msg, {})[entry] = isdocstring
|
||||
|
||||
def set_filename(self, filename):
|
||||
self.__curfile = filename
|
||||
self.__freshmodule = 1
|
||||
|
||||
def write(self, fp):
|
||||
options = self.__options
|
||||
timestamp = time.strftime('%Y-%m-%d %H:%M%z')
|
||||
encoding = fp.encoding if fp.encoding else 'UTF-8'
|
||||
print(pot_header % {'time': timestamp, 'version': __version__,
|
||||
'charset': encoding,
|
||||
'encoding': '8bit'}, file=fp)
|
||||
# Sort the entries. First sort each particular entry's keys, then
|
||||
# sort all the entries by their first item.
|
||||
reverse = {}
|
||||
for k, v in self.__messages.items():
|
||||
keys = sorted(v.keys())
|
||||
reverse.setdefault(tuple(keys), []).append((k, v))
|
||||
rkeys = sorted(reverse.keys())
|
||||
for rkey in rkeys:
|
||||
rentries = reverse[rkey]
|
||||
rentries.sort()
|
||||
for k, v in rentries:
|
||||
# If the entry was gleaned out of a docstring, then add a
|
||||
# comment stating so. This is to aid translators who may wish
|
||||
# to skip translating some unimportant docstrings.
|
||||
isdocstring = any(v.values())
|
||||
# k is the message string, v is a dictionary-set of (filename,
|
||||
# lineno) tuples. We want to sort the entries in v first by
|
||||
# file name and then by line number.
|
||||
v = sorted(v.keys())
|
||||
if not options.writelocations:
|
||||
pass
|
||||
# location comments are different b/w Solaris and GNU:
|
||||
elif options.locationstyle == options.SOLARIS:
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
print(_(
|
||||
'# File: %(filename)s, line: %(lineno)d') % d, file=fp)
|
||||
elif options.locationstyle == options.GNU:
|
||||
# fit as many locations on one line, as long as the
|
||||
# resulting line length doesn't exceed 'options.width'
|
||||
locline = '#:'
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
s = _(' %(filename)s:%(lineno)d') % d
|
||||
if len(locline) + len(s) <= options.width:
|
||||
locline = locline + s
|
||||
else:
|
||||
print(locline, file=fp)
|
||||
locline = "#:" + s
|
||||
if len(locline) > 2:
|
||||
print(locline, file=fp)
|
||||
if isdocstring:
|
||||
print('#, docstring', file=fp)
|
||||
print('msgid', normalize(k, encoding), file=fp)
|
||||
print('msgstr ""\n', file=fp)
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
global default_keywords
|
||||
try:
|
||||
opts, args = getopt.getopt(
|
||||
sys.argv[1:],
|
||||
'ad:DEhk:Kno:p:S:Vvw:x:X:',
|
||||
['extract-all', 'default-domain=', 'escape', 'help',
|
||||
'keyword=', 'no-default-keywords',
|
||||
'add-location', 'no-location', 'output=', 'output-dir=',
|
||||
'style=', 'verbose', 'version', 'width=', 'exclude-file=',
|
||||
'docstrings', 'no-docstrings',
|
||||
])
|
||||
except getopt.error as msg:
|
||||
usage(1, msg)
|
||||
|
||||
# for holding option values
|
||||
class Options:
|
||||
# constants
|
||||
GNU = 1
|
||||
SOLARIS = 2
|
||||
# defaults
|
||||
extractall = 0 # FIXME: currently this option has no effect at all.
|
||||
escape = 0
|
||||
keywords = []
|
||||
outpath = ''
|
||||
outfile = 'messages.pot'
|
||||
writelocations = 1
|
||||
locationstyle = GNU
|
||||
verbose = 0
|
||||
width = 78
|
||||
excludefilename = ''
|
||||
docstrings = 0
|
||||
nodocstrings = {}
|
||||
|
||||
options = Options()
|
||||
locations = {'gnu' : options.GNU,
|
||||
'solaris' : options.SOLARIS,
|
||||
}
|
||||
|
||||
# parse options
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-a', '--extract-all'):
|
||||
options.extractall = 1
|
||||
elif opt in ('-d', '--default-domain'):
|
||||
options.outfile = arg + '.pot'
|
||||
elif opt in ('-E', '--escape'):
|
||||
options.escape = 1
|
||||
elif opt in ('-D', '--docstrings'):
|
||||
options.docstrings = 1
|
||||
elif opt in ('-k', '--keyword'):
|
||||
options.keywords.append(arg)
|
||||
elif opt in ('-K', '--no-default-keywords'):
|
||||
default_keywords = []
|
||||
elif opt in ('-n', '--add-location'):
|
||||
options.writelocations = 1
|
||||
elif opt in ('--no-location',):
|
||||
options.writelocations = 0
|
||||
elif opt in ('-S', '--style'):
|
||||
options.locationstyle = locations.get(arg.lower())
|
||||
if options.locationstyle is None:
|
||||
usage(1, _('Invalid value for --style: %s') % arg)
|
||||
elif opt in ('-o', '--output'):
|
||||
options.outfile = arg
|
||||
elif opt in ('-p', '--output-dir'):
|
||||
options.outpath = arg
|
||||
elif opt in ('-v', '--verbose'):
|
||||
options.verbose = 1
|
||||
elif opt in ('-V', '--version'):
|
||||
print(_('pygettext.py (xgettext for Python) %s') % __version__)
|
||||
sys.exit(0)
|
||||
elif opt in ('-w', '--width'):
|
||||
try:
|
||||
options.width = int(arg)
|
||||
except ValueError:
|
||||
usage(1, _('--width argument must be an integer: %s') % arg)
|
||||
elif opt in ('-x', '--exclude-file'):
|
||||
options.excludefilename = arg
|
||||
elif opt in ('-X', '--no-docstrings'):
|
||||
fp = open(arg)
|
||||
try:
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
options.nodocstrings[line[:-1]] = 1
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# calculate escapes
|
||||
make_escapes(not options.escape)
|
||||
|
||||
# calculate all keywords
|
||||
options.keywords.extend(default_keywords)
|
||||
|
||||
# initialize list of strings to exclude
|
||||
if options.excludefilename:
|
||||
try:
|
||||
fp = open(options.excludefilename)
|
||||
options.toexclude = fp.readlines()
|
||||
fp.close()
|
||||
except IOError:
|
||||
print(_(
|
||||
"Can't read --exclude-file: %s") % options.excludefilename, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
options.toexclude = []
|
||||
|
||||
# resolve args to module lists
|
||||
expanded = []
|
||||
for arg in args:
|
||||
if arg == '-':
|
||||
expanded.append(arg)
|
||||
else:
|
||||
expanded.extend(getFilesForName(arg))
|
||||
args = expanded
|
||||
|
||||
# slurp through all the files
|
||||
eater = TokenEater(options)
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
if options.verbose:
|
||||
print(_('Reading standard input'))
|
||||
fp = sys.stdin.buffer
|
||||
closep = 0
|
||||
else:
|
||||
if options.verbose:
|
||||
print(_('Working on %s') % filename)
|
||||
fp = open(filename, 'rb')
|
||||
closep = 1
|
||||
try:
|
||||
eater.set_filename(filename)
|
||||
try:
|
||||
tokens = tokenize.tokenize(fp.readline)
|
||||
for _token in tokens:
|
||||
eater(*_token)
|
||||
except tokenize.TokenError as e:
|
||||
print('%s: %s, line %d, column %d' % (
|
||||
e.args[0], filename, e.args[1][0], e.args[1][1]),
|
||||
file=sys.stderr)
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
||||
# write the output
|
||||
if options.outfile == '-':
|
||||
fp = sys.stdout
|
||||
closep = 0
|
||||
else:
|
||||
if options.outpath:
|
||||
options.outfile = os.path.join(options.outpath, options.outfile)
|
||||
fp = open(options.outfile, 'w')
|
||||
closep = 1
|
||||
try:
|
||||
eater.write(fp)
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
# some more test strings
|
||||
# this one creates a warning
|
||||
_('*** Seen unexpected token "%(token)s"') % {'token': 'test'}
|
||||
_('more' 'than' 'one' 'string')
|
||||
706
Tools/parser/unparse.py
Normal file
706
Tools/parser/unparse.py
Normal file
|
|
@ -0,0 +1,706 @@
|
|||
"Usage: unparse.py <path to source file>"
|
||||
import sys
|
||||
import ast
|
||||
import tokenize
|
||||
import io
|
||||
import os
|
||||
|
||||
# Large float and imaginary literals get turned into infinities in the AST.
|
||||
# We unparse those infinities to INFSTR.
|
||||
INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
|
||||
|
||||
def interleave(inter, f, seq):
|
||||
"""Call f on each item in seq, calling inter() in between.
|
||||
"""
|
||||
seq = iter(seq)
|
||||
try:
|
||||
f(next(seq))
|
||||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
for x in seq:
|
||||
inter()
|
||||
f(x)
|
||||
|
||||
class Unparser:
|
||||
"""Methods in this class recursively traverse an AST and
|
||||
output source code for the abstract syntax; original formatting
|
||||
is disregarded. """
|
||||
|
||||
def __init__(self, tree, file = sys.stdout):
|
||||
"""Unparser(tree, file=sys.stdout) -> None.
|
||||
Print the source for tree to file."""
|
||||
self.f = file
|
||||
self._indent = 0
|
||||
self.dispatch(tree)
|
||||
print("", file=self.f)
|
||||
self.f.flush()
|
||||
|
||||
def fill(self, text = ""):
|
||||
"Indent a piece of text, according to the current indentation level"
|
||||
self.f.write("\n"+" "*self._indent + text)
|
||||
|
||||
def write(self, text):
|
||||
"Append a piece of text to the current line."
|
||||
self.f.write(text)
|
||||
|
||||
def enter(self):
|
||||
"Print ':', and increase the indentation."
|
||||
self.write(":")
|
||||
self._indent += 1
|
||||
|
||||
def leave(self):
|
||||
"Decrease the indentation level."
|
||||
self._indent -= 1
|
||||
|
||||
def dispatch(self, tree):
|
||||
"Dispatcher function, dispatching tree type T to method _T."
|
||||
if isinstance(tree, list):
|
||||
for t in tree:
|
||||
self.dispatch(t)
|
||||
return
|
||||
meth = getattr(self, "_"+tree.__class__.__name__)
|
||||
meth(tree)
|
||||
|
||||
|
||||
############### Unparsing methods ######################
|
||||
# There should be one method per concrete grammar type #
|
||||
# Constructors should be grouped by sum type. Ideally, #
|
||||
# this would follow the order in the grammar, but #
|
||||
# currently doesn't. #
|
||||
########################################################
|
||||
|
||||
def _Module(self, tree):
|
||||
for stmt in tree.body:
|
||||
self.dispatch(stmt)
|
||||
|
||||
# stmt
|
||||
def _Expr(self, tree):
|
||||
self.fill()
|
||||
self.dispatch(tree.value)
|
||||
|
||||
def _Import(self, t):
|
||||
self.fill("import ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.names)
|
||||
|
||||
def _ImportFrom(self, t):
|
||||
self.fill("from ")
|
||||
self.write("." * t.level)
|
||||
if t.module:
|
||||
self.write(t.module)
|
||||
self.write(" import ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.names)
|
||||
|
||||
def _Assign(self, t):
|
||||
self.fill()
|
||||
for target in t.targets:
|
||||
self.dispatch(target)
|
||||
self.write(" = ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _AugAssign(self, t):
|
||||
self.fill()
|
||||
self.dispatch(t.target)
|
||||
self.write(" "+self.binop[t.op.__class__.__name__]+"= ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _AnnAssign(self, t):
|
||||
self.fill()
|
||||
if not t.simple and isinstance(t.target, ast.Name):
|
||||
self.write('(')
|
||||
self.dispatch(t.target)
|
||||
if not t.simple and isinstance(t.target, ast.Name):
|
||||
self.write(')')
|
||||
self.write(": ")
|
||||
self.dispatch(t.annotation)
|
||||
if t.value:
|
||||
self.write(" = ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Return(self, t):
|
||||
self.fill("return")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Pass(self, t):
|
||||
self.fill("pass")
|
||||
|
||||
def _Break(self, t):
|
||||
self.fill("break")
|
||||
|
||||
def _Continue(self, t):
|
||||
self.fill("continue")
|
||||
|
||||
def _Delete(self, t):
|
||||
self.fill("del ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.targets)
|
||||
|
||||
def _Assert(self, t):
|
||||
self.fill("assert ")
|
||||
self.dispatch(t.test)
|
||||
if t.msg:
|
||||
self.write(", ")
|
||||
self.dispatch(t.msg)
|
||||
|
||||
def _Global(self, t):
|
||||
self.fill("global ")
|
||||
interleave(lambda: self.write(", "), self.write, t.names)
|
||||
|
||||
def _Nonlocal(self, t):
|
||||
self.fill("nonlocal ")
|
||||
interleave(lambda: self.write(", "), self.write, t.names)
|
||||
|
||||
def _Await(self, t):
|
||||
self.write("(")
|
||||
self.write("await")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
self.write(")")
|
||||
|
||||
def _Yield(self, t):
|
||||
self.write("(")
|
||||
self.write("yield")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
self.write(")")
|
||||
|
||||
def _YieldFrom(self, t):
|
||||
self.write("(")
|
||||
self.write("yield from")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
self.write(")")
|
||||
|
||||
def _Raise(self, t):
|
||||
self.fill("raise")
|
||||
if not t.exc:
|
||||
assert not t.cause
|
||||
return
|
||||
self.write(" ")
|
||||
self.dispatch(t.exc)
|
||||
if t.cause:
|
||||
self.write(" from ")
|
||||
self.dispatch(t.cause)
|
||||
|
||||
def _Try(self, t):
|
||||
self.fill("try")
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
for ex in t.handlers:
|
||||
self.dispatch(ex)
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
if t.finalbody:
|
||||
self.fill("finally")
|
||||
self.enter()
|
||||
self.dispatch(t.finalbody)
|
||||
self.leave()
|
||||
|
||||
def _ExceptHandler(self, t):
|
||||
self.fill("except")
|
||||
if t.type:
|
||||
self.write(" ")
|
||||
self.dispatch(t.type)
|
||||
if t.name:
|
||||
self.write(" as ")
|
||||
self.write(t.name)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _ClassDef(self, t):
|
||||
self.write("\n")
|
||||
for deco in t.decorator_list:
|
||||
self.fill("@")
|
||||
self.dispatch(deco)
|
||||
self.fill("class "+t.name)
|
||||
self.write("(")
|
||||
comma = False
|
||||
for e in t.bases:
|
||||
if comma: self.write(", ")
|
||||
else: comma = True
|
||||
self.dispatch(e)
|
||||
for e in t.keywords:
|
||||
if comma: self.write(", ")
|
||||
else: comma = True
|
||||
self.dispatch(e)
|
||||
self.write(")")
|
||||
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _FunctionDef(self, t):
|
||||
self.__FunctionDef_helper(t, "def")
|
||||
|
||||
def _AsyncFunctionDef(self, t):
|
||||
self.__FunctionDef_helper(t, "async def")
|
||||
|
||||
def __FunctionDef_helper(self, t, fill_suffix):
|
||||
self.write("\n")
|
||||
for deco in t.decorator_list:
|
||||
self.fill("@")
|
||||
self.dispatch(deco)
|
||||
def_str = fill_suffix+" "+t.name + "("
|
||||
self.fill(def_str)
|
||||
self.dispatch(t.args)
|
||||
self.write(")")
|
||||
if t.returns:
|
||||
self.write(" -> ")
|
||||
self.dispatch(t.returns)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _For(self, t):
|
||||
self.__For_helper("for ", t)
|
||||
|
||||
def _AsyncFor(self, t):
|
||||
self.__For_helper("async for ", t)
|
||||
|
||||
def __For_helper(self, fill, t):
|
||||
self.fill(fill)
|
||||
self.dispatch(t.target)
|
||||
self.write(" in ")
|
||||
self.dispatch(t.iter)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
|
||||
def _If(self, t):
|
||||
self.fill("if ")
|
||||
self.dispatch(t.test)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
# collapse nested ifs into equivalent elifs.
|
||||
while (t.orelse and len(t.orelse) == 1 and
|
||||
isinstance(t.orelse[0], ast.If)):
|
||||
t = t.orelse[0]
|
||||
self.fill("elif ")
|
||||
self.dispatch(t.test)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
# final else
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
|
||||
def _While(self, t):
|
||||
self.fill("while ")
|
||||
self.dispatch(t.test)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
|
||||
def _With(self, t):
|
||||
self.fill("with ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.items)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _AsyncWith(self, t):
|
||||
self.fill("async with ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.items)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
# expr
|
||||
def _Bytes(self, t):
|
||||
self.write(repr(t.s))
|
||||
|
||||
def _Str(self, tree):
|
||||
self.write(repr(tree.s))
|
||||
|
||||
def _JoinedStr(self, t):
|
||||
self.write("f")
|
||||
string = io.StringIO()
|
||||
self._fstring_JoinedStr(t, string.write)
|
||||
self.write(repr(string.getvalue()))
|
||||
|
||||
def _FormattedValue(self, t):
|
||||
self.write("f")
|
||||
string = io.StringIO()
|
||||
self._fstring_FormattedValue(t, string.write)
|
||||
self.write(repr(string.getvalue()))
|
||||
|
||||
def _fstring_JoinedStr(self, t, write):
|
||||
for value in t.values:
|
||||
meth = getattr(self, "_fstring_" + type(value).__name__)
|
||||
meth(value, write)
|
||||
|
||||
def _fstring_Str(self, t, write):
|
||||
value = t.s.replace("{", "{{").replace("}", "}}")
|
||||
write(value)
|
||||
|
||||
def _fstring_Constant(self, t, write):
|
||||
assert isinstance(t.value, str)
|
||||
value = t.value.replace("{", "{{").replace("}", "}}")
|
||||
write(value)
|
||||
|
||||
def _fstring_FormattedValue(self, t, write):
|
||||
write("{")
|
||||
expr = io.StringIO()
|
||||
Unparser(t.value, expr)
|
||||
expr = expr.getvalue().rstrip("\n")
|
||||
if expr.startswith("{"):
|
||||
write(" ") # Separate pair of opening brackets as "{ {"
|
||||
write(expr)
|
||||
if t.conversion != -1:
|
||||
conversion = chr(t.conversion)
|
||||
assert conversion in "sra"
|
||||
write(f"!{conversion}")
|
||||
if t.format_spec:
|
||||
write(":")
|
||||
meth = getattr(self, "_fstring_" + type(t.format_spec).__name__)
|
||||
meth(t.format_spec, write)
|
||||
write("}")
|
||||
|
||||
def _Name(self, t):
|
||||
self.write(t.id)
|
||||
|
||||
def _write_constant(self, value):
|
||||
if isinstance(value, (float, complex)):
|
||||
self.write(repr(value).replace("inf", INFSTR))
|
||||
else:
|
||||
self.write(repr(value))
|
||||
|
||||
def _Constant(self, t):
|
||||
value = t.value
|
||||
if isinstance(value, tuple):
|
||||
self.write("(")
|
||||
if len(value) == 1:
|
||||
self._write_constant(value[0])
|
||||
self.write(",")
|
||||
else:
|
||||
interleave(lambda: self.write(", "), self._write_constant, value)
|
||||
self.write(")")
|
||||
else:
|
||||
self._write_constant(t.value)
|
||||
|
||||
def _NameConstant(self, t):
|
||||
self.write(repr(t.value))
|
||||
|
||||
def _Num(self, t):
|
||||
# Substitute overflowing decimal literal for AST infinities.
|
||||
self.write(repr(t.n).replace("inf", INFSTR))
|
||||
|
||||
def _List(self, t):
|
||||
self.write("[")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.elts)
|
||||
self.write("]")
|
||||
|
||||
def _ListComp(self, t):
|
||||
self.write("[")
|
||||
self.dispatch(t.elt)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write("]")
|
||||
|
||||
def _GeneratorExp(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.elt)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write(")")
|
||||
|
||||
def _SetComp(self, t):
|
||||
self.write("{")
|
||||
self.dispatch(t.elt)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write("}")
|
||||
|
||||
def _DictComp(self, t):
|
||||
self.write("{")
|
||||
self.dispatch(t.key)
|
||||
self.write(": ")
|
||||
self.dispatch(t.value)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write("}")
|
||||
|
||||
def _comprehension(self, t):
|
||||
if t.is_async:
|
||||
self.write(" async for ")
|
||||
else:
|
||||
self.write(" for ")
|
||||
self.dispatch(t.target)
|
||||
self.write(" in ")
|
||||
self.dispatch(t.iter)
|
||||
for if_clause in t.ifs:
|
||||
self.write(" if ")
|
||||
self.dispatch(if_clause)
|
||||
|
||||
def _IfExp(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.body)
|
||||
self.write(" if ")
|
||||
self.dispatch(t.test)
|
||||
self.write(" else ")
|
||||
self.dispatch(t.orelse)
|
||||
self.write(")")
|
||||
|
||||
def _Set(self, t):
|
||||
assert(t.elts) # should be at least one element
|
||||
self.write("{")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.elts)
|
||||
self.write("}")
|
||||
|
||||
def _Dict(self, t):
|
||||
self.write("{")
|
||||
def write_key_value_pair(k, v):
|
||||
self.dispatch(k)
|
||||
self.write(": ")
|
||||
self.dispatch(v)
|
||||
|
||||
def write_item(item):
|
||||
k, v = item
|
||||
if k is None:
|
||||
# for dictionary unpacking operator in dicts {**{'y': 2}}
|
||||
# see PEP 448 for details
|
||||
self.write("**")
|
||||
self.dispatch(v)
|
||||
else:
|
||||
write_key_value_pair(k, v)
|
||||
interleave(lambda: self.write(", "), write_item, zip(t.keys, t.values))
|
||||
self.write("}")
|
||||
|
||||
def _Tuple(self, t):
|
||||
self.write("(")
|
||||
if len(t.elts) == 1:
|
||||
elt = t.elts[0]
|
||||
self.dispatch(elt)
|
||||
self.write(",")
|
||||
else:
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.elts)
|
||||
self.write(")")
|
||||
|
||||
unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
|
||||
def _UnaryOp(self, t):
|
||||
self.write("(")
|
||||
self.write(self.unop[t.op.__class__.__name__])
|
||||
self.write(" ")
|
||||
self.dispatch(t.operand)
|
||||
self.write(")")
|
||||
|
||||
binop = { "Add":"+", "Sub":"-", "Mult":"*", "MatMult":"@", "Div":"/", "Mod":"%",
|
||||
"LShift":"<<", "RShift":">>", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
|
||||
"FloorDiv":"//", "Pow": "**"}
|
||||
def _BinOp(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.left)
|
||||
self.write(" " + self.binop[t.op.__class__.__name__] + " ")
|
||||
self.dispatch(t.right)
|
||||
self.write(")")
|
||||
|
||||
cmpops = {"Eq":"==", "NotEq":"!=", "Lt":"<", "LtE":"<=", "Gt":">", "GtE":">=",
|
||||
"Is":"is", "IsNot":"is not", "In":"in", "NotIn":"not in"}
|
||||
def _Compare(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.left)
|
||||
for o, e in zip(t.ops, t.comparators):
|
||||
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
|
||||
self.dispatch(e)
|
||||
self.write(")")
|
||||
|
||||
boolops = {ast.And: 'and', ast.Or: 'or'}
|
||||
def _BoolOp(self, t):
|
||||
self.write("(")
|
||||
s = " %s " % self.boolops[t.op.__class__]
|
||||
interleave(lambda: self.write(s), self.dispatch, t.values)
|
||||
self.write(")")
|
||||
|
||||
def _Attribute(self,t):
|
||||
self.dispatch(t.value)
|
||||
# Special case: 3.__abs__() is a syntax error, so if t.value
|
||||
# is an integer literal then we need to either parenthesize
|
||||
# it or add an extra space to get 3 .__abs__().
|
||||
if ((isinstance(t.value, ast.Num) and isinstance(t.value.n, int))
|
||||
or (isinstance(t.value, ast.Constant) and isinstance(t.value.value, int))):
|
||||
self.write(" ")
|
||||
self.write(".")
|
||||
self.write(t.attr)
|
||||
|
||||
def _Call(self, t):
|
||||
self.dispatch(t.func)
|
||||
self.write("(")
|
||||
comma = False
|
||||
for e in t.args:
|
||||
if comma: self.write(", ")
|
||||
else: comma = True
|
||||
self.dispatch(e)
|
||||
for e in t.keywords:
|
||||
if comma: self.write(", ")
|
||||
else: comma = True
|
||||
self.dispatch(e)
|
||||
self.write(")")
|
||||
|
||||
def _Subscript(self, t):
|
||||
self.dispatch(t.value)
|
||||
self.write("[")
|
||||
self.dispatch(t.slice)
|
||||
self.write("]")
|
||||
|
||||
def _Starred(self, t):
|
||||
self.write("*")
|
||||
self.dispatch(t.value)
|
||||
|
||||
# slice
|
||||
def _Ellipsis(self, t):
|
||||
self.write("...")
|
||||
|
||||
def _Index(self, t):
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Slice(self, t):
|
||||
if t.lower:
|
||||
self.dispatch(t.lower)
|
||||
self.write(":")
|
||||
if t.upper:
|
||||
self.dispatch(t.upper)
|
||||
if t.step:
|
||||
self.write(":")
|
||||
self.dispatch(t.step)
|
||||
|
||||
def _ExtSlice(self, t):
|
||||
interleave(lambda: self.write(', '), self.dispatch, t.dims)
|
||||
|
||||
# argument
|
||||
def _arg(self, t):
|
||||
self.write(t.arg)
|
||||
if t.annotation:
|
||||
self.write(": ")
|
||||
self.dispatch(t.annotation)
|
||||
|
||||
# others
|
||||
def _arguments(self, t):
|
||||
first = True
|
||||
# normal arguments
|
||||
defaults = [None] * (len(t.args) - len(t.defaults)) + t.defaults
|
||||
for a, d in zip(t.args, defaults):
|
||||
if first:first = False
|
||||
else: self.write(", ")
|
||||
self.dispatch(a)
|
||||
if d:
|
||||
self.write("=")
|
||||
self.dispatch(d)
|
||||
|
||||
# varargs, or bare '*' if no varargs but keyword-only arguments present
|
||||
if t.vararg or t.kwonlyargs:
|
||||
if first:first = False
|
||||
else: self.write(", ")
|
||||
self.write("*")
|
||||
if t.vararg:
|
||||
self.write(t.vararg.arg)
|
||||
if t.vararg.annotation:
|
||||
self.write(": ")
|
||||
self.dispatch(t.vararg.annotation)
|
||||
|
||||
# keyword-only arguments
|
||||
if t.kwonlyargs:
|
||||
for a, d in zip(t.kwonlyargs, t.kw_defaults):
|
||||
if first:first = False
|
||||
else: self.write(", ")
|
||||
self.dispatch(a),
|
||||
if d:
|
||||
self.write("=")
|
||||
self.dispatch(d)
|
||||
|
||||
# kwargs
|
||||
if t.kwarg:
|
||||
if first:first = False
|
||||
else: self.write(", ")
|
||||
self.write("**"+t.kwarg.arg)
|
||||
if t.kwarg.annotation:
|
||||
self.write(": ")
|
||||
self.dispatch(t.kwarg.annotation)
|
||||
|
||||
def _keyword(self, t):
|
||||
if t.arg is None:
|
||||
self.write("**")
|
||||
else:
|
||||
self.write(t.arg)
|
||||
self.write("=")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Lambda(self, t):
|
||||
self.write("(")
|
||||
self.write("lambda ")
|
||||
self.dispatch(t.args)
|
||||
self.write(": ")
|
||||
self.dispatch(t.body)
|
||||
self.write(")")
|
||||
|
||||
def _alias(self, t):
|
||||
self.write(t.name)
|
||||
if t.asname:
|
||||
self.write(" as "+t.asname)
|
||||
|
||||
def _withitem(self, t):
|
||||
self.dispatch(t.context_expr)
|
||||
if t.optional_vars:
|
||||
self.write(" as ")
|
||||
self.dispatch(t.optional_vars)
|
||||
|
||||
def roundtrip(filename, output=sys.stdout):
|
||||
with open(filename, "rb") as pyfile:
|
||||
encoding = tokenize.detect_encoding(pyfile.readline)[0]
|
||||
with open(filename, "r", encoding=encoding) as pyfile:
|
||||
source = pyfile.read()
|
||||
tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST)
|
||||
Unparser(tree, output)
|
||||
|
||||
|
||||
|
||||
def testdir(a):
|
||||
try:
|
||||
names = [n for n in os.listdir(a) if n.endswith('.py')]
|
||||
except OSError:
|
||||
print("Directory not readable: %s" % a, file=sys.stderr)
|
||||
else:
|
||||
for n in names:
|
||||
fullname = os.path.join(a, n)
|
||||
if os.path.isfile(fullname):
|
||||
output = io.StringIO()
|
||||
print('Testing %s' % fullname)
|
||||
try:
|
||||
roundtrip(fullname, output)
|
||||
except Exception as e:
|
||||
print(' Failed to compile, exception is %s' % repr(e))
|
||||
elif os.path.isdir(fullname):
|
||||
testdir(fullname)
|
||||
|
||||
def main(args):
|
||||
if args[0] == '--testdir':
|
||||
for a in args[1:]:
|
||||
testdir(a)
|
||||
else:
|
||||
for a in args:
|
||||
roundtrip(a)
|
||||
|
||||
if __name__=='__main__':
|
||||
main(sys.argv[1:])
|
||||
130
Tools/pynche/ChipViewer.py
Normal file
130
Tools/pynche/ChipViewer.py
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
"""Chip viewer and widget.
|
||||
|
||||
In the lower left corner of the main Pynche window, you will see two
|
||||
ChipWidgets, one for the selected color and one for the nearest color. The
|
||||
selected color is the actual RGB value expressed as an X11 #COLOR name. The
|
||||
nearest color is the named color from the X11 database that is closest to the
|
||||
selected color in 3D space. There may be other colors equally close, but the
|
||||
nearest one is the first one found.
|
||||
|
||||
Clicking on the nearest color chip selects that named color.
|
||||
|
||||
The ChipViewer class includes the entire lower left quandrant; i.e. both the
|
||||
selected and nearest ChipWidgets.
|
||||
"""
|
||||
|
||||
from tkinter import *
|
||||
import ColorDB
|
||||
|
||||
|
||||
class ChipWidget:
|
||||
_WIDTH = 150
|
||||
_HEIGHT = 80
|
||||
|
||||
def __init__(self,
|
||||
master = None,
|
||||
width = _WIDTH,
|
||||
height = _HEIGHT,
|
||||
text = 'Color',
|
||||
initialcolor = 'blue',
|
||||
presscmd = None,
|
||||
releasecmd = None):
|
||||
# create the text label
|
||||
self.__label = Label(master, text=text)
|
||||
self.__label.grid(row=0, column=0)
|
||||
# create the color chip, implemented as a frame
|
||||
self.__chip = Frame(master, relief=RAISED, borderwidth=2,
|
||||
width=width,
|
||||
height=height,
|
||||
background=initialcolor)
|
||||
self.__chip.grid(row=1, column=0)
|
||||
# create the color name
|
||||
self.__namevar = StringVar()
|
||||
self.__namevar.set(initialcolor)
|
||||
self.__name = Entry(master, textvariable=self.__namevar,
|
||||
relief=FLAT, justify=CENTER, state=DISABLED,
|
||||
font=self.__label['font'])
|
||||
self.__name.grid(row=2, column=0)
|
||||
# create the message area
|
||||
self.__msgvar = StringVar()
|
||||
self.__name = Entry(master, textvariable=self.__msgvar,
|
||||
relief=FLAT, justify=CENTER, state=DISABLED,
|
||||
font=self.__label['font'])
|
||||
self.__name.grid(row=3, column=0)
|
||||
# set bindings
|
||||
if presscmd:
|
||||
self.__chip.bind('<ButtonPress-1>', presscmd)
|
||||
if releasecmd:
|
||||
self.__chip.bind('<ButtonRelease-1>', releasecmd)
|
||||
|
||||
def set_color(self, color):
|
||||
self.__chip.config(background=color)
|
||||
|
||||
def get_color(self):
|
||||
return self.__chip['background']
|
||||
|
||||
def set_name(self, colorname):
|
||||
self.__namevar.set(colorname)
|
||||
|
||||
def set_message(self, message):
|
||||
self.__msgvar.set(message)
|
||||
|
||||
def press(self):
|
||||
self.__chip.configure(relief=SUNKEN)
|
||||
|
||||
def release(self):
|
||||
self.__chip.configure(relief=RAISED)
|
||||
|
||||
|
||||
|
||||
class ChipViewer:
|
||||
def __init__(self, switchboard, master=None):
|
||||
self.__sb = switchboard
|
||||
self.__frame = Frame(master, relief=RAISED, borderwidth=1)
|
||||
self.__frame.grid(row=3, column=0, ipadx=5, sticky='NSEW')
|
||||
# create the chip that will display the currently selected color
|
||||
# exactly
|
||||
self.__sframe = Frame(self.__frame)
|
||||
self.__sframe.grid(row=0, column=0)
|
||||
self.__selected = ChipWidget(self.__sframe, text='Selected')
|
||||
# create the chip that will display the nearest real X11 color
|
||||
# database color name
|
||||
self.__nframe = Frame(self.__frame)
|
||||
self.__nframe.grid(row=0, column=1)
|
||||
self.__nearest = ChipWidget(self.__nframe, text='Nearest',
|
||||
presscmd = self.__buttonpress,
|
||||
releasecmd = self.__buttonrelease)
|
||||
|
||||
def update_yourself(self, red, green, blue):
|
||||
# Selected always shows the #rrggbb name of the color, nearest always
|
||||
# shows the name of the nearest color in the database. BAW: should
|
||||
# an exact match be indicated in some way?
|
||||
#
|
||||
# Always use the #rrggbb style to actually set the color, since we may
|
||||
# not be using X color names (e.g. "web-safe" names)
|
||||
colordb = self.__sb.colordb()
|
||||
rgbtuple = (red, green, blue)
|
||||
rrggbb = ColorDB.triplet_to_rrggbb(rgbtuple)
|
||||
# find the nearest
|
||||
nearest = colordb.nearest(red, green, blue)
|
||||
nearest_tuple = colordb.find_byname(nearest)
|
||||
nearest_rrggbb = ColorDB.triplet_to_rrggbb(nearest_tuple)
|
||||
self.__selected.set_color(rrggbb)
|
||||
self.__nearest.set_color(nearest_rrggbb)
|
||||
# set the name and messages areas
|
||||
self.__selected.set_name(rrggbb)
|
||||
if rrggbb == nearest_rrggbb:
|
||||
self.__selected.set_message(nearest)
|
||||
else:
|
||||
self.__selected.set_message('')
|
||||
self.__nearest.set_name(nearest_rrggbb)
|
||||
self.__nearest.set_message(nearest)
|
||||
|
||||
def __buttonpress(self, event=None):
|
||||
self.__nearest.press()
|
||||
|
||||
def __buttonrelease(self, event=None):
|
||||
self.__nearest.release()
|
||||
rrggbb = self.__nearest.get_color()
|
||||
red, green, blue = ColorDB.rrggbb_to_triplet(rrggbb)
|
||||
self.__sb.update_views(red, green, blue)
|
||||
271
Tools/pynche/ColorDB.py
Normal file
271
Tools/pynche/ColorDB.py
Normal file
|
|
@ -0,0 +1,271 @@
|
|||
"""Color Database.
|
||||
|
||||
This file contains one class, called ColorDB, and several utility functions.
|
||||
The class must be instantiated by the get_colordb() function in this file,
|
||||
passing it a filename to read a database out of.
|
||||
|
||||
The get_colordb() function will try to examine the file to figure out what the
|
||||
format of the file is. If it can't figure out the file format, or it has
|
||||
trouble reading the file, None is returned. You can pass get_colordb() an
|
||||
optional filetype argument.
|
||||
|
||||
Supporte file types are:
|
||||
|
||||
X_RGB_TXT -- X Consortium rgb.txt format files. Three columns of numbers
|
||||
from 0 .. 255 separated by whitespace. Arbitrary trailing
|
||||
columns used as the color name.
|
||||
|
||||
The utility functions are useful for converting between the various expected
|
||||
color formats, and for calculating other color values.
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
import re
|
||||
from types import *
|
||||
|
||||
class BadColor(Exception):
|
||||
pass
|
||||
|
||||
DEFAULT_DB = None
|
||||
SPACE = ' '
|
||||
COMMASPACE = ', '
|
||||
|
||||
|
||||
|
||||
# generic class
|
||||
class ColorDB:
|
||||
def __init__(self, fp):
|
||||
lineno = 2
|
||||
self.__name = fp.name
|
||||
# Maintain several dictionaries for indexing into the color database.
|
||||
# Note that while Tk supports RGB intensities of 4, 8, 12, or 16 bits,
|
||||
# for now we only support 8 bit intensities. At least on OpenWindows,
|
||||
# all intensities in the /usr/openwin/lib/rgb.txt file are 8-bit
|
||||
#
|
||||
# key is (red, green, blue) tuple, value is (name, [aliases])
|
||||
self.__byrgb = {}
|
||||
# key is name, value is (red, green, blue)
|
||||
self.__byname = {}
|
||||
# all unique names (non-aliases). built-on demand
|
||||
self.__allnames = None
|
||||
for line in fp:
|
||||
# get this compiled regular expression from derived class
|
||||
mo = self._re.match(line)
|
||||
if not mo:
|
||||
print('Error in', fp.name, ' line', lineno, file=sys.stderr)
|
||||
lineno += 1
|
||||
continue
|
||||
# extract the red, green, blue, and name
|
||||
red, green, blue = self._extractrgb(mo)
|
||||
name = self._extractname(mo)
|
||||
keyname = name.lower()
|
||||
# BAW: for now the `name' is just the first named color with the
|
||||
# rgb values we find. Later, we might want to make the two word
|
||||
# version the `name', or the CapitalizedVersion, etc.
|
||||
key = (red, green, blue)
|
||||
foundname, aliases = self.__byrgb.get(key, (name, []))
|
||||
if foundname != name and foundname not in aliases:
|
||||
aliases.append(name)
|
||||
self.__byrgb[key] = (foundname, aliases)
|
||||
# add to byname lookup
|
||||
self.__byname[keyname] = key
|
||||
lineno = lineno + 1
|
||||
|
||||
# override in derived classes
|
||||
def _extractrgb(self, mo):
|
||||
return [int(x) for x in mo.group('red', 'green', 'blue')]
|
||||
|
||||
def _extractname(self, mo):
|
||||
return mo.group('name')
|
||||
|
||||
def filename(self):
|
||||
return self.__name
|
||||
|
||||
def find_byrgb(self, rgbtuple):
|
||||
"""Return name for rgbtuple"""
|
||||
try:
|
||||
return self.__byrgb[rgbtuple]
|
||||
except KeyError:
|
||||
raise BadColor(rgbtuple) from None
|
||||
|
||||
def find_byname(self, name):
|
||||
"""Return (red, green, blue) for name"""
|
||||
name = name.lower()
|
||||
try:
|
||||
return self.__byname[name]
|
||||
except KeyError:
|
||||
raise BadColor(name) from None
|
||||
|
||||
def nearest(self, red, green, blue):
|
||||
"""Return the name of color nearest (red, green, blue)"""
|
||||
# BAW: should we use Voronoi diagrams, Delaunay triangulation, or
|
||||
# octree for speeding up the locating of nearest point? Exhaustive
|
||||
# search is inefficient, but seems fast enough.
|
||||
nearest = -1
|
||||
nearest_name = ''
|
||||
for name, aliases in self.__byrgb.values():
|
||||
r, g, b = self.__byname[name.lower()]
|
||||
rdelta = red - r
|
||||
gdelta = green - g
|
||||
bdelta = blue - b
|
||||
distance = rdelta * rdelta + gdelta * gdelta + bdelta * bdelta
|
||||
if nearest == -1 or distance < nearest:
|
||||
nearest = distance
|
||||
nearest_name = name
|
||||
return nearest_name
|
||||
|
||||
def unique_names(self):
|
||||
# sorted
|
||||
if not self.__allnames:
|
||||
self.__allnames = []
|
||||
for name, aliases in self.__byrgb.values():
|
||||
self.__allnames.append(name)
|
||||
self.__allnames.sort(key=str.lower)
|
||||
return self.__allnames
|
||||
|
||||
def aliases_of(self, red, green, blue):
|
||||
try:
|
||||
name, aliases = self.__byrgb[(red, green, blue)]
|
||||
except KeyError:
|
||||
raise BadColor((red, green, blue)) from None
|
||||
return [name] + aliases
|
||||
|
||||
|
||||
class RGBColorDB(ColorDB):
|
||||
_re = re.compile(
|
||||
r'\s*(?P<red>\d+)\s+(?P<green>\d+)\s+(?P<blue>\d+)\s+(?P<name>.*)')
|
||||
|
||||
|
||||
class HTML40DB(ColorDB):
|
||||
_re = re.compile(r'(?P<name>\S+)\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
|
||||
|
||||
def _extractrgb(self, mo):
|
||||
return rrggbb_to_triplet(mo.group('hexrgb'))
|
||||
|
||||
class LightlinkDB(HTML40DB):
|
||||
_re = re.compile(r'(?P<name>(.+))\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
|
||||
|
||||
def _extractname(self, mo):
|
||||
return mo.group('name').strip()
|
||||
|
||||
class WebsafeDB(ColorDB):
|
||||
_re = re.compile('(?P<hexrgb>#[0-9a-fA-F]{6})')
|
||||
|
||||
def _extractrgb(self, mo):
|
||||
return rrggbb_to_triplet(mo.group('hexrgb'))
|
||||
|
||||
def _extractname(self, mo):
|
||||
return mo.group('hexrgb').upper()
|
||||
|
||||
|
||||
|
||||
# format is a tuple (RE, SCANLINES, CLASS) where RE is a compiled regular
|
||||
# expression, SCANLINES is the number of header lines to scan, and CLASS is
|
||||
# the class to instantiate if a match is found
|
||||
|
||||
FILETYPES = [
|
||||
(re.compile('Xorg'), RGBColorDB),
|
||||
(re.compile('XConsortium'), RGBColorDB),
|
||||
(re.compile('HTML'), HTML40DB),
|
||||
(re.compile('lightlink'), LightlinkDB),
|
||||
(re.compile('Websafe'), WebsafeDB),
|
||||
]
|
||||
|
||||
def get_colordb(file, filetype=None):
|
||||
colordb = None
|
||||
fp = open(file)
|
||||
try:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
return None
|
||||
# try to determine the type of RGB file it is
|
||||
if filetype is None:
|
||||
filetypes = FILETYPES
|
||||
else:
|
||||
filetypes = [filetype]
|
||||
for typere, class_ in filetypes:
|
||||
mo = typere.search(line)
|
||||
if mo:
|
||||
break
|
||||
else:
|
||||
# no matching type
|
||||
return None
|
||||
# we know the type and the class to grok the type, so suck it in
|
||||
colordb = class_(fp)
|
||||
finally:
|
||||
fp.close()
|
||||
# save a global copy
|
||||
global DEFAULT_DB
|
||||
DEFAULT_DB = colordb
|
||||
return colordb
|
||||
|
||||
|
||||
|
||||
_namedict = {}
|
||||
|
||||
def rrggbb_to_triplet(color):
|
||||
"""Converts a #rrggbb color to the tuple (red, green, blue)."""
|
||||
rgbtuple = _namedict.get(color)
|
||||
if rgbtuple is None:
|
||||
if color[0] != '#':
|
||||
raise BadColor(color)
|
||||
red = color[1:3]
|
||||
green = color[3:5]
|
||||
blue = color[5:7]
|
||||
rgbtuple = int(red, 16), int(green, 16), int(blue, 16)
|
||||
_namedict[color] = rgbtuple
|
||||
return rgbtuple
|
||||
|
||||
|
||||
_tripdict = {}
|
||||
def triplet_to_rrggbb(rgbtuple):
|
||||
"""Converts a (red, green, blue) tuple to #rrggbb."""
|
||||
global _tripdict
|
||||
hexname = _tripdict.get(rgbtuple)
|
||||
if hexname is None:
|
||||
hexname = '#%02x%02x%02x' % rgbtuple
|
||||
_tripdict[rgbtuple] = hexname
|
||||
return hexname
|
||||
|
||||
|
||||
def triplet_to_fractional_rgb(rgbtuple):
|
||||
return [x / 256 for x in rgbtuple]
|
||||
|
||||
|
||||
def triplet_to_brightness(rgbtuple):
|
||||
# return the brightness (grey level) along the scale 0.0==black to
|
||||
# 1.0==white
|
||||
r = 0.299
|
||||
g = 0.587
|
||||
b = 0.114
|
||||
return r*rgbtuple[0] + g*rgbtuple[1] + b*rgbtuple[2]
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
colordb = get_colordb('/usr/openwin/lib/rgb.txt')
|
||||
if not colordb:
|
||||
print('No parseable color database found')
|
||||
sys.exit(1)
|
||||
# on my system, this color matches exactly
|
||||
target = 'navy'
|
||||
red, green, blue = rgbtuple = colordb.find_byname(target)
|
||||
print(target, ':', red, green, blue, triplet_to_rrggbb(rgbtuple))
|
||||
name, aliases = colordb.find_byrgb(rgbtuple)
|
||||
print('name:', name, 'aliases:', COMMASPACE.join(aliases))
|
||||
r, g, b = (1, 1, 128) # nearest to navy
|
||||
r, g, b = (145, 238, 144) # nearest to lightgreen
|
||||
r, g, b = (255, 251, 250) # snow
|
||||
print('finding nearest to', target, '...')
|
||||
import time
|
||||
t0 = time.time()
|
||||
nearest = colordb.nearest(r, g, b)
|
||||
t1 = time.time()
|
||||
print('found nearest color', nearest, 'in', t1-t0, 'seconds')
|
||||
# dump the database
|
||||
for n in colordb.unique_names():
|
||||
r, g, b = colordb.find_byname(n)
|
||||
aliases = colordb.aliases_of(r, g, b)
|
||||
print('%20s: (%3d/%3d/%3d) == %s' % (n, r, g, b,
|
||||
SPACE.join(aliases[1:])))
|
||||
273
Tools/pynche/DetailsViewer.py
Normal file
273
Tools/pynche/DetailsViewer.py
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
"""DetailsViewer class.
|
||||
|
||||
This class implements a pure input window which allows you to meticulously
|
||||
edit the current color. You have both mouse control of the color (via the
|
||||
buttons along the bottom row), and there are keyboard bindings for each of the
|
||||
increment/decrement buttons.
|
||||
|
||||
The top three check buttons allow you to specify which of the three color
|
||||
variations are tied together when incrementing and decrementing. Red, green,
|
||||
and blue are self evident. By tying together red and green, you can modify
|
||||
the yellow level of the color. By tying together red and blue, you can modify
|
||||
the magenta level of the color. By tying together green and blue, you can
|
||||
modify the cyan level, and by tying all three together, you can modify the
|
||||
grey level.
|
||||
|
||||
The behavior at the boundaries (0 and 255) are defined by the `At boundary'
|
||||
option menu:
|
||||
|
||||
Stop
|
||||
When the increment or decrement would send any of the tied variations
|
||||
out of bounds, the entire delta is discarded.
|
||||
|
||||
Wrap Around
|
||||
When the increment or decrement would send any of the tied variations
|
||||
out of bounds, the out of bounds variation is wrapped around to the
|
||||
other side. Thus if red were at 238 and 25 were added to it, red
|
||||
would have the value 7.
|
||||
|
||||
Preserve Distance
|
||||
When the increment or decrement would send any of the tied variations
|
||||
out of bounds, all tied variations are wrapped as one, so as to
|
||||
preserve the distance between them. Thus if green and blue were tied,
|
||||
and green was at 238 while blue was at 223, and an increment of 25
|
||||
were applied, green would be at 15 and blue would be at 0.
|
||||
|
||||
Squash
|
||||
When the increment or decrement would send any of the tied variations
|
||||
out of bounds, the out of bounds variation is set to the ceiling of
|
||||
255 or floor of 0, as appropriate. In this way, all tied variations
|
||||
are squashed to one edge or the other.
|
||||
|
||||
The following key bindings can be used as accelerators. Note that Pynche can
|
||||
fall behind if you hold the key down as a key repeat:
|
||||
|
||||
Left arrow == -1
|
||||
Right arrow == +1
|
||||
|
||||
Control + Left == -10
|
||||
Control + Right == 10
|
||||
|
||||
Shift + Left == -25
|
||||
Shift + Right == +25
|
||||
"""
|
||||
|
||||
from tkinter import *
|
||||
|
||||
STOP = 'Stop'
|
||||
WRAP = 'Wrap Around'
|
||||
RATIO = 'Preserve Distance'
|
||||
GRAV = 'Squash'
|
||||
|
||||
ADDTOVIEW = 'Details Window...'
|
||||
|
||||
|
||||
class DetailsViewer:
|
||||
def __init__(self, switchboard, master=None):
|
||||
self.__sb = switchboard
|
||||
optiondb = switchboard.optiondb()
|
||||
self.__red, self.__green, self.__blue = switchboard.current_rgb()
|
||||
# GUI
|
||||
root = self.__root = Toplevel(master, class_='Pynche')
|
||||
root.protocol('WM_DELETE_WINDOW', self.withdraw)
|
||||
root.title('Pynche Details Window')
|
||||
root.iconname('Pynche Details Window')
|
||||
root.bind('<Alt-q>', self.__quit)
|
||||
root.bind('<Alt-Q>', self.__quit)
|
||||
root.bind('<Alt-w>', self.withdraw)
|
||||
root.bind('<Alt-W>', self.withdraw)
|
||||
# accelerators
|
||||
root.bind('<KeyPress-Left>', self.__minus1)
|
||||
root.bind('<KeyPress-Right>', self.__plus1)
|
||||
root.bind('<Control-KeyPress-Left>', self.__minus10)
|
||||
root.bind('<Control-KeyPress-Right>', self.__plus10)
|
||||
root.bind('<Shift-KeyPress-Left>', self.__minus25)
|
||||
root.bind('<Shift-KeyPress-Right>', self.__plus25)
|
||||
#
|
||||
# color ties
|
||||
frame = self.__frame = Frame(root)
|
||||
frame.pack(expand=YES, fill=X)
|
||||
self.__l1 = Label(frame, text='Move Sliders:')
|
||||
self.__l1.grid(row=1, column=0, sticky=E)
|
||||
self.__rvar = IntVar()
|
||||
self.__rvar.set(optiondb.get('RSLIDER', 4))
|
||||
self.__radio1 = Checkbutton(frame, text='Red',
|
||||
variable=self.__rvar,
|
||||
command=self.__effect,
|
||||
onvalue=4, offvalue=0)
|
||||
self.__radio1.grid(row=1, column=1, sticky=W)
|
||||
self.__gvar = IntVar()
|
||||
self.__gvar.set(optiondb.get('GSLIDER', 2))
|
||||
self.__radio2 = Checkbutton(frame, text='Green',
|
||||
variable=self.__gvar,
|
||||
command=self.__effect,
|
||||
onvalue=2, offvalue=0)
|
||||
self.__radio2.grid(row=2, column=1, sticky=W)
|
||||
self.__bvar = IntVar()
|
||||
self.__bvar.set(optiondb.get('BSLIDER', 1))
|
||||
self.__radio3 = Checkbutton(frame, text='Blue',
|
||||
variable=self.__bvar,
|
||||
command=self.__effect,
|
||||
onvalue=1, offvalue=0)
|
||||
self.__radio3.grid(row=3, column=1, sticky=W)
|
||||
self.__l2 = Label(frame)
|
||||
self.__l2.grid(row=4, column=1, sticky=W)
|
||||
self.__effect()
|
||||
#
|
||||
# Boundary behavior
|
||||
self.__l3 = Label(frame, text='At boundary:')
|
||||
self.__l3.grid(row=5, column=0, sticky=E)
|
||||
self.__boundvar = StringVar()
|
||||
self.__boundvar.set(optiondb.get('ATBOUND', STOP))
|
||||
self.__omenu = OptionMenu(frame, self.__boundvar,
|
||||
STOP, WRAP, RATIO, GRAV)
|
||||
self.__omenu.grid(row=5, column=1, sticky=W)
|
||||
self.__omenu.configure(width=17)
|
||||
#
|
||||
# Buttons
|
||||
frame = self.__btnframe = Frame(frame)
|
||||
frame.grid(row=0, column=0, columnspan=2, sticky='EW')
|
||||
self.__down25 = Button(frame, text='-25',
|
||||
command=self.__minus25)
|
||||
self.__down10 = Button(frame, text='-10',
|
||||
command=self.__minus10)
|
||||
self.__down1 = Button(frame, text='-1',
|
||||
command=self.__minus1)
|
||||
self.__up1 = Button(frame, text='+1',
|
||||
command=self.__plus1)
|
||||
self.__up10 = Button(frame, text='+10',
|
||||
command=self.__plus10)
|
||||
self.__up25 = Button(frame, text='+25',
|
||||
command=self.__plus25)
|
||||
self.__down25.pack(expand=YES, fill=X, side=LEFT)
|
||||
self.__down10.pack(expand=YES, fill=X, side=LEFT)
|
||||
self.__down1.pack(expand=YES, fill=X, side=LEFT)
|
||||
self.__up1.pack(expand=YES, fill=X, side=LEFT)
|
||||
self.__up10.pack(expand=YES, fill=X, side=LEFT)
|
||||
self.__up25.pack(expand=YES, fill=X, side=LEFT)
|
||||
|
||||
def __effect(self, event=None):
|
||||
tie = self.__rvar.get() + self.__gvar.get() + self.__bvar.get()
|
||||
if tie in (0, 1, 2, 4):
|
||||
text = ''
|
||||
else:
|
||||
text = '(= %s Level)' % {3: 'Cyan',
|
||||
5: 'Magenta',
|
||||
6: 'Yellow',
|
||||
7: 'Grey'}[tie]
|
||||
self.__l2.configure(text=text)
|
||||
|
||||
def __quit(self, event=None):
|
||||
self.__root.quit()
|
||||
|
||||
def withdraw(self, event=None):
|
||||
self.__root.withdraw()
|
||||
|
||||
def deiconify(self, event=None):
|
||||
self.__root.deiconify()
|
||||
|
||||
def __minus25(self, event=None):
|
||||
self.__delta(-25)
|
||||
|
||||
def __minus10(self, event=None):
|
||||
self.__delta(-10)
|
||||
|
||||
def __minus1(self, event=None):
|
||||
self.__delta(-1)
|
||||
|
||||
def __plus1(self, event=None):
|
||||
self.__delta(1)
|
||||
|
||||
def __plus10(self, event=None):
|
||||
self.__delta(10)
|
||||
|
||||
def __plus25(self, event=None):
|
||||
self.__delta(25)
|
||||
|
||||
def __delta(self, delta):
|
||||
tie = []
|
||||
if self.__rvar.get():
|
||||
red = self.__red + delta
|
||||
tie.append(red)
|
||||
else:
|
||||
red = self.__red
|
||||
if self.__gvar.get():
|
||||
green = self.__green + delta
|
||||
tie.append(green)
|
||||
else:
|
||||
green = self.__green
|
||||
if self.__bvar.get():
|
||||
blue = self.__blue + delta
|
||||
tie.append(blue)
|
||||
else:
|
||||
blue = self.__blue
|
||||
# now apply at boundary behavior
|
||||
atbound = self.__boundvar.get()
|
||||
if atbound == STOP:
|
||||
if red < 0 or green < 0 or blue < 0 or \
|
||||
red > 255 or green > 255 or blue > 255:
|
||||
# then
|
||||
red, green, blue = self.__red, self.__green, self.__blue
|
||||
elif atbound == WRAP or (atbound == RATIO and len(tie) < 2):
|
||||
if red < 0:
|
||||
red += 256
|
||||
if green < 0:
|
||||
green += 256
|
||||
if blue < 0:
|
||||
blue += 256
|
||||
if red > 255:
|
||||
red -= 256
|
||||
if green > 255:
|
||||
green -= 256
|
||||
if blue > 255:
|
||||
blue -= 256
|
||||
elif atbound == RATIO:
|
||||
# for when 2 or 3 colors are tied together
|
||||
dir = 0
|
||||
for c in tie:
|
||||
if c < 0:
|
||||
dir = -1
|
||||
elif c > 255:
|
||||
dir = 1
|
||||
if dir == -1:
|
||||
delta = max(tie)
|
||||
if self.__rvar.get():
|
||||
red = red + 255 - delta
|
||||
if self.__gvar.get():
|
||||
green = green + 255 - delta
|
||||
if self.__bvar.get():
|
||||
blue = blue + 255 - delta
|
||||
elif dir == 1:
|
||||
delta = min(tie)
|
||||
if self.__rvar.get():
|
||||
red = red - delta
|
||||
if self.__gvar.get():
|
||||
green = green - delta
|
||||
if self.__bvar.get():
|
||||
blue = blue - delta
|
||||
elif atbound == GRAV:
|
||||
if red < 0:
|
||||
red = 0
|
||||
if green < 0:
|
||||
green = 0
|
||||
if blue < 0:
|
||||
blue = 0
|
||||
if red > 255:
|
||||
red = 255
|
||||
if green > 255:
|
||||
green = 255
|
||||
if blue > 255:
|
||||
blue = 255
|
||||
self.__sb.update_views(red, green, blue)
|
||||
self.__root.update_idletasks()
|
||||
|
||||
def update_yourself(self, red, green, blue):
|
||||
self.__red = red
|
||||
self.__green = green
|
||||
self.__blue = blue
|
||||
|
||||
def save_options(self, optiondb):
|
||||
optiondb['RSLIDER'] = self.__rvar.get()
|
||||
optiondb['GSLIDER'] = self.__gvar.get()
|
||||
optiondb['BSLIDER'] = self.__bvar.get()
|
||||
optiondb['ATBOUND'] = self.__boundvar.get()
|
||||
175
Tools/pynche/ListViewer.py
Normal file
175
Tools/pynche/ListViewer.py
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
"""ListViewer class.
|
||||
|
||||
This class implements an input/output view on the color model. It lists every
|
||||
unique color (e.g. unique r/g/b value) found in the color database. Each
|
||||
color is shown by small swatch and primary color name. Some colors have
|
||||
aliases -- more than one name for the same r/g/b value. These aliases are
|
||||
displayed in the small listbox at the bottom of the screen.
|
||||
|
||||
Clicking on a color name or swatch selects that color and updates all other
|
||||
windows. When a color is selected in a different viewer, the color list is
|
||||
scrolled to the selected color and it is highlighted. If the selected color
|
||||
is an r/g/b value without a name, no scrolling occurs.
|
||||
|
||||
You can turn off Update On Click if all you want to see is the alias for a
|
||||
given name, without selecting the color.
|
||||
"""
|
||||
|
||||
from tkinter import *
|
||||
import ColorDB
|
||||
|
||||
ADDTOVIEW = 'Color %List Window...'
|
||||
|
||||
class ListViewer:
|
||||
def __init__(self, switchboard, master=None):
|
||||
self.__sb = switchboard
|
||||
optiondb = switchboard.optiondb()
|
||||
self.__lastbox = None
|
||||
self.__dontcenter = 0
|
||||
# GUI
|
||||
root = self.__root = Toplevel(master, class_='Pynche')
|
||||
root.protocol('WM_DELETE_WINDOW', self.withdraw)
|
||||
root.title('Pynche Color List')
|
||||
root.iconname('Pynche Color List')
|
||||
root.bind('<Alt-q>', self.__quit)
|
||||
root.bind('<Alt-Q>', self.__quit)
|
||||
root.bind('<Alt-w>', self.withdraw)
|
||||
root.bind('<Alt-W>', self.withdraw)
|
||||
#
|
||||
# create the canvas which holds everything, and its scrollbar
|
||||
#
|
||||
frame = self.__frame = Frame(root)
|
||||
frame.pack()
|
||||
canvas = self.__canvas = Canvas(frame, width=160, height=300,
|
||||
borderwidth=2, relief=SUNKEN)
|
||||
self.__scrollbar = Scrollbar(frame)
|
||||
self.__scrollbar.pack(fill=Y, side=RIGHT)
|
||||
canvas.pack(fill=BOTH, expand=1)
|
||||
canvas.configure(yscrollcommand=(self.__scrollbar, 'set'))
|
||||
self.__scrollbar.configure(command=(canvas, 'yview'))
|
||||
self.__populate()
|
||||
#
|
||||
# Update on click
|
||||
self.__uoc = BooleanVar()
|
||||
self.__uoc.set(optiondb.get('UPONCLICK', 1))
|
||||
self.__uocbtn = Checkbutton(root,
|
||||
text='Update on Click',
|
||||
variable=self.__uoc,
|
||||
command=self.__toggleupdate)
|
||||
self.__uocbtn.pack(expand=1, fill=BOTH)
|
||||
#
|
||||
# alias list
|
||||
self.__alabel = Label(root, text='Aliases:')
|
||||
self.__alabel.pack()
|
||||
self.__aliases = Listbox(root, height=5,
|
||||
selectmode=BROWSE)
|
||||
self.__aliases.pack(expand=1, fill=BOTH)
|
||||
|
||||
def __populate(self):
|
||||
#
|
||||
# create all the buttons
|
||||
colordb = self.__sb.colordb()
|
||||
canvas = self.__canvas
|
||||
row = 0
|
||||
widest = 0
|
||||
bboxes = self.__bboxes = []
|
||||
for name in colordb.unique_names():
|
||||
exactcolor = ColorDB.triplet_to_rrggbb(colordb.find_byname(name))
|
||||
canvas.create_rectangle(5, row*20 + 5,
|
||||
20, row*20 + 20,
|
||||
fill=exactcolor)
|
||||
textid = canvas.create_text(25, row*20 + 13,
|
||||
text=name,
|
||||
anchor=W)
|
||||
x1, y1, textend, y2 = canvas.bbox(textid)
|
||||
boxid = canvas.create_rectangle(3, row*20+3,
|
||||
textend+3, row*20 + 23,
|
||||
outline='',
|
||||
tags=(exactcolor, 'all'))
|
||||
canvas.bind('<ButtonRelease>', self.__onrelease)
|
||||
bboxes.append(boxid)
|
||||
if textend+3 > widest:
|
||||
widest = textend+3
|
||||
row += 1
|
||||
canvheight = (row-1)*20 + 25
|
||||
canvas.config(scrollregion=(0, 0, 150, canvheight))
|
||||
for box in bboxes:
|
||||
x1, y1, x2, y2 = canvas.coords(box)
|
||||
canvas.coords(box, x1, y1, widest, y2)
|
||||
|
||||
def __onrelease(self, event=None):
|
||||
canvas = self.__canvas
|
||||
# find the current box
|
||||
x = canvas.canvasx(event.x)
|
||||
y = canvas.canvasy(event.y)
|
||||
ids = canvas.find_overlapping(x, y, x, y)
|
||||
for boxid in ids:
|
||||
if boxid in self.__bboxes:
|
||||
break
|
||||
else:
|
||||
## print 'No box found!'
|
||||
return
|
||||
tags = self.__canvas.gettags(boxid)
|
||||
for t in tags:
|
||||
if t[0] == '#':
|
||||
break
|
||||
else:
|
||||
## print 'No color tag found!'
|
||||
return
|
||||
red, green, blue = ColorDB.rrggbb_to_triplet(t)
|
||||
self.__dontcenter = 1
|
||||
if self.__uoc.get():
|
||||
self.__sb.update_views(red, green, blue)
|
||||
else:
|
||||
self.update_yourself(red, green, blue)
|
||||
self.__red, self.__green, self.__blue = red, green, blue
|
||||
|
||||
def __toggleupdate(self, event=None):
|
||||
if self.__uoc.get():
|
||||
self.__sb.update_views(self.__red, self.__green, self.__blue)
|
||||
|
||||
def __quit(self, event=None):
|
||||
self.__root.quit()
|
||||
|
||||
def withdraw(self, event=None):
|
||||
self.__root.withdraw()
|
||||
|
||||
def deiconify(self, event=None):
|
||||
self.__root.deiconify()
|
||||
|
||||
def update_yourself(self, red, green, blue):
|
||||
canvas = self.__canvas
|
||||
# turn off the last box
|
||||
if self.__lastbox:
|
||||
canvas.itemconfigure(self.__lastbox, outline='')
|
||||
# turn on the current box
|
||||
colortag = ColorDB.triplet_to_rrggbb((red, green, blue))
|
||||
canvas.itemconfigure(colortag, outline='black')
|
||||
self.__lastbox = colortag
|
||||
# fill the aliases
|
||||
self.__aliases.delete(0, END)
|
||||
try:
|
||||
aliases = self.__sb.colordb().aliases_of(red, green, blue)[1:]
|
||||
except ColorDB.BadColor:
|
||||
self.__aliases.insert(END, '<no matching color>')
|
||||
return
|
||||
if not aliases:
|
||||
self.__aliases.insert(END, '<no aliases>')
|
||||
else:
|
||||
for name in aliases:
|
||||
self.__aliases.insert(END, name)
|
||||
# maybe scroll the canvas so that the item is visible
|
||||
if self.__dontcenter:
|
||||
self.__dontcenter = 0
|
||||
else:
|
||||
ig, ig, ig, y1 = canvas.coords(colortag)
|
||||
ig, ig, ig, y2 = canvas.coords(self.__bboxes[-1])
|
||||
h = int(canvas['height']) * 0.5
|
||||
canvas.yview('moveto', (y1-h) / y2)
|
||||
|
||||
def save_options(self, optiondb):
|
||||
optiondb['UPONCLICK'] = self.__uoc.get()
|
||||
|
||||
def colordb_changed(self, colordb):
|
||||
self.__canvas.delete('all')
|
||||
self.__populate()
|
||||
229
Tools/pynche/Main.py
Normal file
229
Tools/pynche/Main.py
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
"""Pynche -- The PYthon Natural Color and Hue Editor.
|
||||
|
||||
Contact: %(AUTHNAME)s
|
||||
Email: %(AUTHEMAIL)s
|
||||
Version: %(__version__)s
|
||||
|
||||
Pynche is based largely on a similar color editor I wrote years ago for the
|
||||
SunView window system. That editor was called ICE: the Interactive Color
|
||||
Editor. I'd always wanted to port the editor to X but didn't feel like
|
||||
hacking X and C code to do it. Fast forward many years, to where Python +
|
||||
Tkinter provides such a nice programming environment, with enough power, that
|
||||
I finally buckled down and implemented it. I changed the name because these
|
||||
days, too many other systems have the acronym `ICE'.
|
||||
|
||||
This program currently requires Python 2.2 with Tkinter.
|
||||
|
||||
Usage: %(PROGRAM)s [-d file] [-i file] [-X] [-v] [-h] [initialcolor]
|
||||
|
||||
Where:
|
||||
--database file
|
||||
-d file
|
||||
Alternate location of a color database file
|
||||
|
||||
--initfile file
|
||||
-i file
|
||||
Alternate location of the initialization file. This file contains a
|
||||
persistent database of the current Pynche options and color. This
|
||||
means that Pynche restores its option settings and current color when
|
||||
it restarts, using this file (unless the -X option is used). The
|
||||
default is ~/.pynche
|
||||
|
||||
--ignore
|
||||
-X
|
||||
Ignore the initialization file when starting up. Pynche will still
|
||||
write the current option settings to this file when it quits.
|
||||
|
||||
--version
|
||||
-v
|
||||
print the version number and exit
|
||||
|
||||
--help
|
||||
-h
|
||||
print this message
|
||||
|
||||
initialcolor
|
||||
initial color, as a color name or #RRGGBB format
|
||||
"""
|
||||
|
||||
__version__ = '1.4.1'
|
||||
|
||||
import sys
|
||||
import os
|
||||
import getopt
|
||||
import ColorDB
|
||||
|
||||
from PyncheWidget import PyncheWidget
|
||||
from Switchboard import Switchboard
|
||||
from StripViewer import StripViewer
|
||||
from ChipViewer import ChipViewer
|
||||
from TypeinViewer import TypeinViewer
|
||||
|
||||
|
||||
|
||||
PROGRAM = sys.argv[0]
|
||||
AUTHNAME = 'Barry Warsaw'
|
||||
AUTHEMAIL = 'barry@python.org'
|
||||
|
||||
# Default locations of rgb.txt or other textual color database
|
||||
RGB_TXT = [
|
||||
# Solaris OpenWindows
|
||||
'/usr/openwin/lib/rgb.txt',
|
||||
# Linux
|
||||
'/usr/lib/X11/rgb.txt',
|
||||
# The X11R6.4 rgb.txt file
|
||||
os.path.join(sys.path[0], 'X/rgb.txt'),
|
||||
# add more here
|
||||
]
|
||||
|
||||
|
||||
|
||||
# Do this because PyncheWidget.py wants to get at the interpolated docstring
|
||||
# too, for its Help menu.
|
||||
def docstring():
|
||||
return __doc__ % globals()
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print(docstring())
|
||||
if msg:
|
||||
print(msg)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
|
||||
def initial_color(s, colordb):
|
||||
# function called on every color
|
||||
def scan_color(s, colordb=colordb):
|
||||
try:
|
||||
r, g, b = colordb.find_byname(s)
|
||||
except ColorDB.BadColor:
|
||||
try:
|
||||
r, g, b = ColorDB.rrggbb_to_triplet(s)
|
||||
except ColorDB.BadColor:
|
||||
return None, None, None
|
||||
return r, g, b
|
||||
#
|
||||
# First try the passed in color
|
||||
r, g, b = scan_color(s)
|
||||
if r is None:
|
||||
# try the same color with '#' prepended, since some shells require
|
||||
# this to be escaped, which is a pain
|
||||
r, g, b = scan_color('#' + s)
|
||||
if r is None:
|
||||
print('Bad initial color, using gray50:', s)
|
||||
r, g, b = scan_color('gray50')
|
||||
if r is None:
|
||||
usage(1, 'Cannot find an initial color to use')
|
||||
# does not return
|
||||
return r, g, b
|
||||
|
||||
|
||||
|
||||
def build(master=None, initialcolor=None, initfile=None, ignore=None,
|
||||
dbfile=None):
|
||||
# create all output widgets
|
||||
s = Switchboard(not ignore and initfile)
|
||||
# defer to the command line chosen color database, falling back to the one
|
||||
# in the .pynche file.
|
||||
if dbfile is None:
|
||||
dbfile = s.optiondb().get('DBFILE')
|
||||
# find a parseable color database
|
||||
colordb = None
|
||||
files = RGB_TXT[:]
|
||||
if dbfile is None:
|
||||
dbfile = files.pop()
|
||||
while colordb is None:
|
||||
try:
|
||||
colordb = ColorDB.get_colordb(dbfile)
|
||||
except (KeyError, IOError):
|
||||
pass
|
||||
if colordb is None:
|
||||
if not files:
|
||||
break
|
||||
dbfile = files.pop(0)
|
||||
if not colordb:
|
||||
usage(1, 'No color database file found, see the -d option.')
|
||||
s.set_colordb(colordb)
|
||||
|
||||
# create the application window decorations
|
||||
app = PyncheWidget(__version__, s, master=master)
|
||||
w = app.window()
|
||||
|
||||
# these built-in viewers live inside the main Pynche window
|
||||
s.add_view(StripViewer(s, w))
|
||||
s.add_view(ChipViewer(s, w))
|
||||
s.add_view(TypeinViewer(s, w))
|
||||
|
||||
# get the initial color as components and set the color on all views. if
|
||||
# there was no initial color given on the command line, use the one that's
|
||||
# stored in the option database
|
||||
if initialcolor is None:
|
||||
optiondb = s.optiondb()
|
||||
red = optiondb.get('RED')
|
||||
green = optiondb.get('GREEN')
|
||||
blue = optiondb.get('BLUE')
|
||||
# but if there wasn't any stored in the database, use grey50
|
||||
if red is None or blue is None or green is None:
|
||||
red, green, blue = initial_color('grey50', colordb)
|
||||
else:
|
||||
red, green, blue = initial_color(initialcolor, colordb)
|
||||
s.update_views(red, green, blue)
|
||||
return app, s
|
||||
|
||||
|
||||
def run(app, s):
|
||||
try:
|
||||
app.start()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(
|
||||
sys.argv[1:],
|
||||
'hd:i:Xv',
|
||||
['database=', 'initfile=', 'ignore', 'help', 'version'])
|
||||
except getopt.error as msg:
|
||||
usage(1, msg)
|
||||
|
||||
if len(args) == 0:
|
||||
initialcolor = None
|
||||
elif len(args) == 1:
|
||||
initialcolor = args[0]
|
||||
else:
|
||||
usage(1)
|
||||
|
||||
ignore = False
|
||||
dbfile = None
|
||||
initfile = os.path.expanduser('~/.pynche')
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-v', '--version'):
|
||||
print("""\
|
||||
Pynche -- The PYthon Natural Color and Hue Editor.
|
||||
Contact: %(AUTHNAME)s
|
||||
Email: %(AUTHEMAIL)s
|
||||
Version: %(__version__)s""" % globals())
|
||||
sys.exit(0)
|
||||
elif opt in ('-d', '--database'):
|
||||
dbfile = arg
|
||||
elif opt in ('-X', '--ignore'):
|
||||
ignore = True
|
||||
elif opt in ('-i', '--initfile'):
|
||||
initfile = arg
|
||||
|
||||
app, sb = build(initialcolor=initialcolor,
|
||||
initfile=initfile,
|
||||
ignore=ignore,
|
||||
dbfile=dbfile)
|
||||
run(app, sb)
|
||||
sb.save_views()
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
313
Tools/pynche/PyncheWidget.py
Normal file
313
Tools/pynche/PyncheWidget.py
Normal file
|
|
@ -0,0 +1,313 @@
|
|||
"""Main Pynche (Pythonically Natural Color and Hue Editor) widget.
|
||||
|
||||
This window provides the basic decorations, primarily including the menubar.
|
||||
It is used to bring up other windows.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
from tkinter import *
|
||||
from tkinter import messagebox, filedialog
|
||||
import ColorDB
|
||||
|
||||
# Milliseconds between interrupt checks
|
||||
KEEPALIVE_TIMER = 500
|
||||
|
||||
|
||||
|
||||
class PyncheWidget:
|
||||
def __init__(self, version, switchboard, master=None, extrapath=[]):
|
||||
self.__sb = switchboard
|
||||
self.__version = version
|
||||
self.__textwin = None
|
||||
self.__listwin = None
|
||||
self.__detailswin = None
|
||||
self.__helpwin = None
|
||||
self.__dialogstate = {}
|
||||
modal = self.__modal = not not master
|
||||
# If a master was given, we are running as a modal dialog servant to
|
||||
# some other application. We rearrange our UI in this case (there's
|
||||
# no File menu and we get `Okay' and `Cancel' buttons), and we do a
|
||||
# grab_set() to make ourselves modal
|
||||
if modal:
|
||||
self.__tkroot = tkroot = Toplevel(master, class_='Pynche')
|
||||
tkroot.grab_set()
|
||||
tkroot.withdraw()
|
||||
else:
|
||||
# Is there already a default root for Tk, say because we're
|
||||
# running under Guido's IDE? :-) Two conditions say no, either the
|
||||
# import fails or _default_root is None.
|
||||
tkroot = None
|
||||
try:
|
||||
from Tkinter import _default_root
|
||||
tkroot = self.__tkroot = _default_root
|
||||
except ImportError:
|
||||
pass
|
||||
if not tkroot:
|
||||
tkroot = self.__tkroot = Tk(className='Pynche')
|
||||
# but this isn't our top level widget, so make it invisible
|
||||
tkroot.withdraw()
|
||||
# create the menubar
|
||||
menubar = self.__menubar = Menu(tkroot)
|
||||
#
|
||||
# File menu
|
||||
#
|
||||
filemenu = self.__filemenu = Menu(menubar, tearoff=0)
|
||||
filemenu.add_command(label='Load palette...',
|
||||
command=self.__load,
|
||||
underline=0)
|
||||
if not modal:
|
||||
filemenu.add_command(label='Quit',
|
||||
command=self.__quit,
|
||||
accelerator='Alt-Q',
|
||||
underline=0)
|
||||
#
|
||||
# View menu
|
||||
#
|
||||
views = make_view_popups(self.__sb, self.__tkroot, extrapath)
|
||||
viewmenu = Menu(menubar, tearoff=0)
|
||||
for v in views:
|
||||
viewmenu.add_command(label=v.menutext(),
|
||||
command=v.popup,
|
||||
underline=v.underline())
|
||||
#
|
||||
# Help menu
|
||||
#
|
||||
helpmenu = Menu(menubar, name='help', tearoff=0)
|
||||
helpmenu.add_command(label='About Pynche...',
|
||||
command=self.__popup_about,
|
||||
underline=0)
|
||||
helpmenu.add_command(label='Help...',
|
||||
command=self.__popup_usage,
|
||||
underline=0)
|
||||
#
|
||||
# Tie them all together
|
||||
#
|
||||
menubar.add_cascade(label='File',
|
||||
menu=filemenu,
|
||||
underline=0)
|
||||
menubar.add_cascade(label='View',
|
||||
menu=viewmenu,
|
||||
underline=0)
|
||||
menubar.add_cascade(label='Help',
|
||||
menu=helpmenu,
|
||||
underline=0)
|
||||
|
||||
# now create the top level window
|
||||
root = self.__root = Toplevel(tkroot, class_='Pynche', menu=menubar)
|
||||
root.protocol('WM_DELETE_WINDOW',
|
||||
modal and self.__bell or self.__quit)
|
||||
root.title('Pynche %s' % version)
|
||||
root.iconname('Pynche')
|
||||
# Only bind accelerators for the File->Quit menu item if running as a
|
||||
# standalone app
|
||||
if not modal:
|
||||
root.bind('<Alt-q>', self.__quit)
|
||||
root.bind('<Alt-Q>', self.__quit)
|
||||
else:
|
||||
# We're a modal dialog so we have a new row of buttons
|
||||
bframe = Frame(root, borderwidth=1, relief=RAISED)
|
||||
bframe.grid(row=4, column=0, columnspan=2,
|
||||
sticky='EW',
|
||||
ipady=5)
|
||||
okay = Button(bframe,
|
||||
text='Okay',
|
||||
command=self.__okay)
|
||||
okay.pack(side=LEFT, expand=1)
|
||||
cancel = Button(bframe,
|
||||
text='Cancel',
|
||||
command=self.__cancel)
|
||||
cancel.pack(side=LEFT, expand=1)
|
||||
|
||||
def __quit(self, event=None):
|
||||
self.__tkroot.quit()
|
||||
|
||||
def __bell(self, event=None):
|
||||
self.__tkroot.bell()
|
||||
|
||||
def __okay(self, event=None):
|
||||
self.__sb.withdraw_views()
|
||||
self.__tkroot.grab_release()
|
||||
self.__quit()
|
||||
|
||||
def __cancel(self, event=None):
|
||||
self.__sb.canceled()
|
||||
self.__okay()
|
||||
|
||||
def __keepalive(self):
|
||||
# Exercise the Python interpreter regularly so keyboard interrupts get
|
||||
# through.
|
||||
self.__tkroot.tk.createtimerhandler(KEEPALIVE_TIMER, self.__keepalive)
|
||||
|
||||
def start(self):
|
||||
if not self.__modal:
|
||||
self.__keepalive()
|
||||
self.__tkroot.mainloop()
|
||||
|
||||
def window(self):
|
||||
return self.__root
|
||||
|
||||
def __popup_about(self, event=None):
|
||||
from Main import __version__
|
||||
messagebox.showinfo('About Pynche ' + __version__,
|
||||
'''\
|
||||
Pynche %s
|
||||
The PYthonically Natural
|
||||
Color and Hue Editor
|
||||
|
||||
For information
|
||||
contact: Barry A. Warsaw
|
||||
email: bwarsaw@python.org''' % __version__)
|
||||
|
||||
def __popup_usage(self, event=None):
|
||||
if not self.__helpwin:
|
||||
self.__helpwin = Helpwin(self.__root, self.__quit)
|
||||
self.__helpwin.deiconify()
|
||||
|
||||
def __load(self, event=None):
|
||||
while 1:
|
||||
idir, ifile = os.path.split(self.__sb.colordb().filename())
|
||||
file = filedialog.askopenfilename(
|
||||
filetypes=[('Text files', '*.txt'),
|
||||
('All files', '*'),
|
||||
],
|
||||
initialdir=idir,
|
||||
initialfile=ifile)
|
||||
if not file:
|
||||
# cancel button
|
||||
return
|
||||
try:
|
||||
colordb = ColorDB.get_colordb(file)
|
||||
except IOError:
|
||||
messagebox.showerror('Read error', '''\
|
||||
Could not open file for reading:
|
||||
%s''' % file)
|
||||
continue
|
||||
if colordb is None:
|
||||
messagebox.showerror('Unrecognized color file type', '''\
|
||||
Unrecognized color file type in file:
|
||||
%s''' % file)
|
||||
continue
|
||||
break
|
||||
self.__sb.set_colordb(colordb)
|
||||
|
||||
def withdraw(self):
|
||||
self.__root.withdraw()
|
||||
|
||||
def deiconify(self):
|
||||
self.__root.deiconify()
|
||||
|
||||
|
||||
|
||||
class Helpwin:
|
||||
def __init__(self, master, quitfunc):
|
||||
from Main import docstring
|
||||
self.__root = root = Toplevel(master, class_='Pynche')
|
||||
root.protocol('WM_DELETE_WINDOW', self.__withdraw)
|
||||
root.title('Pynche Help Window')
|
||||
root.iconname('Pynche Help Window')
|
||||
root.bind('<Alt-q>', quitfunc)
|
||||
root.bind('<Alt-Q>', quitfunc)
|
||||
root.bind('<Alt-w>', self.__withdraw)
|
||||
root.bind('<Alt-W>', self.__withdraw)
|
||||
|
||||
# more elaborate help is available in the README file
|
||||
readmefile = os.path.join(sys.path[0], 'README')
|
||||
try:
|
||||
fp = None
|
||||
try:
|
||||
fp = open(readmefile)
|
||||
contents = fp.read()
|
||||
# wax the last page, it contains Emacs cruft
|
||||
i = contents.rfind('\f')
|
||||
if i > 0:
|
||||
contents = contents[:i].rstrip()
|
||||
finally:
|
||||
if fp:
|
||||
fp.close()
|
||||
except IOError:
|
||||
sys.stderr.write("Couldn't open Pynche's README, "
|
||||
'using docstring instead.\n')
|
||||
contents = docstring()
|
||||
|
||||
self.__text = text = Text(root, relief=SUNKEN,
|
||||
width=80, height=24)
|
||||
self.__text.focus_set()
|
||||
text.insert(0.0, contents)
|
||||
scrollbar = Scrollbar(root)
|
||||
scrollbar.pack(fill=Y, side=RIGHT)
|
||||
text.pack(fill=BOTH, expand=YES)
|
||||
text.configure(yscrollcommand=(scrollbar, 'set'))
|
||||
scrollbar.configure(command=(text, 'yview'))
|
||||
|
||||
def __withdraw(self, event=None):
|
||||
self.__root.withdraw()
|
||||
|
||||
def deiconify(self):
|
||||
self.__root.deiconify()
|
||||
|
||||
|
||||
|
||||
import functools
|
||||
@functools.total_ordering
|
||||
class PopupViewer:
|
||||
def __init__(self, module, name, switchboard, root):
|
||||
self.__m = module
|
||||
self.__name = name
|
||||
self.__sb = switchboard
|
||||
self.__root = root
|
||||
self.__menutext = module.ADDTOVIEW
|
||||
# find the underline character
|
||||
underline = module.ADDTOVIEW.find('%')
|
||||
if underline == -1:
|
||||
underline = 0
|
||||
else:
|
||||
self.__menutext = module.ADDTOVIEW.replace('%', '', 1)
|
||||
self.__underline = underline
|
||||
self.__window = None
|
||||
|
||||
def menutext(self):
|
||||
return self.__menutext
|
||||
|
||||
def underline(self):
|
||||
return self.__underline
|
||||
|
||||
def popup(self, event=None):
|
||||
if not self.__window:
|
||||
# class and module must have the same name
|
||||
class_ = getattr(self.__m, self.__name)
|
||||
self.__window = class_(self.__sb, self.__root)
|
||||
self.__sb.add_view(self.__window)
|
||||
self.__window.deiconify()
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__menutext == other.__menutext
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.__menutext < other.__menutext
|
||||
|
||||
|
||||
def make_view_popups(switchboard, root, extrapath):
|
||||
viewers = []
|
||||
# where we are in the file system
|
||||
dirs = [os.path.dirname(__file__)] + extrapath
|
||||
for dir in dirs:
|
||||
if dir == '':
|
||||
dir = '.'
|
||||
for file in os.listdir(dir):
|
||||
if file[-9:] == 'Viewer.py':
|
||||
name = file[:-3]
|
||||
try:
|
||||
module = __import__(name)
|
||||
except ImportError:
|
||||
# Pynche is running from inside a package, so get the
|
||||
# module using the explicit path.
|
||||
pkg = __import__('pynche.'+name)
|
||||
module = getattr(pkg, name)
|
||||
if hasattr(module, 'ADDTOVIEW') and module.ADDTOVIEW:
|
||||
# this is an external viewer
|
||||
v = PopupViewer(module, name, switchboard, root)
|
||||
viewers.append(v)
|
||||
# sort alphabetically
|
||||
viewers.sort()
|
||||
return viewers
|
||||
433
Tools/pynche/StripViewer.py
Normal file
433
Tools/pynche/StripViewer.py
Normal file
|
|
@ -0,0 +1,433 @@
|
|||
"""Strip viewer and related widgets.
|
||||
|
||||
The classes in this file implement the StripViewer shown in the top two thirds
|
||||
of the main Pynche window. It consists of three StripWidgets which display
|
||||
the variations in red, green, and blue respectively of the currently selected
|
||||
r/g/b color value.
|
||||
|
||||
Each StripWidget shows the color variations that are reachable by varying an
|
||||
axis of the currently selected color. So for example, if the color is
|
||||
|
||||
(R,G,B)=(127,163,196)
|
||||
|
||||
then the Red variations show colors from (0,163,196) to (255,163,196), the
|
||||
Green variations show colors from (127,0,196) to (127,255,196), and the Blue
|
||||
variations show colors from (127,163,0) to (127,163,255).
|
||||
|
||||
The selected color is always visible in all three StripWidgets, and in fact
|
||||
each StripWidget highlights the selected color, and has an arrow pointing to
|
||||
the selected chip, which includes the value along that particular axis.
|
||||
|
||||
Clicking on any chip in any StripWidget selects that color, and updates all
|
||||
arrows and other windows. By toggling on Update while dragging, Pynche will
|
||||
select the color under the cursor while you drag it, but be forewarned that
|
||||
this can be slow.
|
||||
"""
|
||||
|
||||
from tkinter import *
|
||||
import ColorDB
|
||||
|
||||
# Load this script into the Tcl interpreter and call it in
|
||||
# StripWidget.set_color(). This is about as fast as it can be with the
|
||||
# current _tkinter.c interface, which doesn't support Tcl Objects.
|
||||
TCLPROC = '''\
|
||||
proc setcolor {canv colors} {
|
||||
set i 1
|
||||
foreach c $colors {
|
||||
$canv itemconfigure $i -fill $c -outline $c
|
||||
incr i
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
# Tcl event types
|
||||
BTNDOWN = 4
|
||||
BTNUP = 5
|
||||
BTNDRAG = 6
|
||||
|
||||
SPACE = ' '
|
||||
|
||||
|
||||
|
||||
def constant(numchips):
|
||||
step = 255.0 / (numchips - 1)
|
||||
start = 0.0
|
||||
seq = []
|
||||
while numchips > 0:
|
||||
seq.append(int(start))
|
||||
start = start + step
|
||||
numchips = numchips - 1
|
||||
return seq
|
||||
|
||||
# red variations, green+blue = cyan constant
|
||||
def constant_red_generator(numchips, red, green, blue):
|
||||
seq = constant(numchips)
|
||||
return list(zip([red] * numchips, seq, seq))
|
||||
|
||||
# green variations, red+blue = magenta constant
|
||||
def constant_green_generator(numchips, red, green, blue):
|
||||
seq = constant(numchips)
|
||||
return list(zip(seq, [green] * numchips, seq))
|
||||
|
||||
# blue variations, red+green = yellow constant
|
||||
def constant_blue_generator(numchips, red, green, blue):
|
||||
seq = constant(numchips)
|
||||
return list(zip(seq, seq, [blue] * numchips))
|
||||
|
||||
# red variations, green+blue = cyan constant
|
||||
def constant_cyan_generator(numchips, red, green, blue):
|
||||
seq = constant(numchips)
|
||||
return list(zip(seq, [green] * numchips, [blue] * numchips))
|
||||
|
||||
# green variations, red+blue = magenta constant
|
||||
def constant_magenta_generator(numchips, red, green, blue):
|
||||
seq = constant(numchips)
|
||||
return list(zip([red] * numchips, seq, [blue] * numchips))
|
||||
|
||||
# blue variations, red+green = yellow constant
|
||||
def constant_yellow_generator(numchips, red, green, blue):
|
||||
seq = constant(numchips)
|
||||
return list(zip([red] * numchips, [green] * numchips, seq))
|
||||
|
||||
|
||||
|
||||
class LeftArrow:
|
||||
_ARROWWIDTH = 30
|
||||
_ARROWHEIGHT = 15
|
||||
_YOFFSET = 13
|
||||
_TEXTYOFFSET = 1
|
||||
_TAG = ('leftarrow',)
|
||||
|
||||
def __init__(self, canvas, x):
|
||||
self._canvas = canvas
|
||||
self.__arrow, self.__text = self._create(x)
|
||||
self.move_to(x)
|
||||
|
||||
def _create(self, x):
|
||||
arrow = self._canvas.create_line(
|
||||
x, self._ARROWHEIGHT + self._YOFFSET,
|
||||
x, self._YOFFSET,
|
||||
x + self._ARROWWIDTH, self._YOFFSET,
|
||||
arrow='first',
|
||||
width=3.0,
|
||||
tags=self._TAG)
|
||||
text = self._canvas.create_text(
|
||||
x + self._ARROWWIDTH + 13,
|
||||
self._ARROWHEIGHT - self._TEXTYOFFSET,
|
||||
tags=self._TAG,
|
||||
text='128')
|
||||
return arrow, text
|
||||
|
||||
def _x(self):
|
||||
coords = list(self._canvas.coords(self._TAG))
|
||||
assert coords
|
||||
return coords[0]
|
||||
|
||||
def move_to(self, x):
|
||||
deltax = x - self._x()
|
||||
self._canvas.move(self._TAG, deltax, 0)
|
||||
|
||||
def set_text(self, text):
|
||||
self._canvas.itemconfigure(self.__text, text=text)
|
||||
|
||||
|
||||
class RightArrow(LeftArrow):
|
||||
_TAG = ('rightarrow',)
|
||||
|
||||
def _create(self, x):
|
||||
arrow = self._canvas.create_line(
|
||||
x, self._YOFFSET,
|
||||
x + self._ARROWWIDTH, self._YOFFSET,
|
||||
x + self._ARROWWIDTH, self._ARROWHEIGHT + self._YOFFSET,
|
||||
arrow='last',
|
||||
width=3.0,
|
||||
tags=self._TAG)
|
||||
text = self._canvas.create_text(
|
||||
x - self._ARROWWIDTH + 15, # BAW: kludge
|
||||
self._ARROWHEIGHT - self._TEXTYOFFSET,
|
||||
justify=RIGHT,
|
||||
text='128',
|
||||
tags=self._TAG)
|
||||
return arrow, text
|
||||
|
||||
def _x(self):
|
||||
coords = list(self._canvas.coords(self._TAG))
|
||||
assert coords
|
||||
return coords[0] + self._ARROWWIDTH
|
||||
|
||||
|
||||
|
||||
class StripWidget:
|
||||
_CHIPHEIGHT = 50
|
||||
_CHIPWIDTH = 10
|
||||
_NUMCHIPS = 40
|
||||
|
||||
def __init__(self, switchboard,
|
||||
master = None,
|
||||
chipwidth = _CHIPWIDTH,
|
||||
chipheight = _CHIPHEIGHT,
|
||||
numchips = _NUMCHIPS,
|
||||
generator = None,
|
||||
axis = None,
|
||||
label = '',
|
||||
uwdvar = None,
|
||||
hexvar = None):
|
||||
# instance variables
|
||||
self.__generator = generator
|
||||
self.__axis = axis
|
||||
self.__numchips = numchips
|
||||
assert self.__axis in (0, 1, 2)
|
||||
self.__uwd = uwdvar
|
||||
self.__hexp = hexvar
|
||||
# the last chip selected
|
||||
self.__lastchip = None
|
||||
self.__sb = switchboard
|
||||
|
||||
canvaswidth = numchips * (chipwidth + 1)
|
||||
canvasheight = chipheight + 43 # BAW: Kludge
|
||||
|
||||
# create the canvas and pack it
|
||||
canvas = self.__canvas = Canvas(master,
|
||||
width=canvaswidth,
|
||||
height=canvasheight,
|
||||
## borderwidth=2,
|
||||
## relief=GROOVE
|
||||
)
|
||||
|
||||
canvas.pack()
|
||||
canvas.bind('<ButtonPress-1>', self.__select_chip)
|
||||
canvas.bind('<ButtonRelease-1>', self.__select_chip)
|
||||
canvas.bind('<B1-Motion>', self.__select_chip)
|
||||
|
||||
# Load a proc into the Tcl interpreter. This is used in the
|
||||
# set_color() method to speed up setting the chip colors.
|
||||
canvas.tk.eval(TCLPROC)
|
||||
|
||||
# create the color strip
|
||||
chips = self.__chips = []
|
||||
x = 1
|
||||
y = 30
|
||||
tags = ('chip',)
|
||||
for c in range(self.__numchips):
|
||||
color = 'grey'
|
||||
canvas.create_rectangle(
|
||||
x, y, x+chipwidth, y+chipheight,
|
||||
fill=color, outline=color,
|
||||
tags=tags)
|
||||
x = x + chipwidth + 1 # for outline
|
||||
chips.append(color)
|
||||
|
||||
# create the strip label
|
||||
self.__label = canvas.create_text(
|
||||
3, y + chipheight + 8,
|
||||
text=label,
|
||||
anchor=W)
|
||||
|
||||
# create the arrow and text item
|
||||
chipx = self.__arrow_x(0)
|
||||
self.__leftarrow = LeftArrow(canvas, chipx)
|
||||
|
||||
chipx = self.__arrow_x(len(chips) - 1)
|
||||
self.__rightarrow = RightArrow(canvas, chipx)
|
||||
|
||||
def __arrow_x(self, chipnum):
|
||||
coords = self.__canvas.coords(chipnum+1)
|
||||
assert coords
|
||||
x0, y0, x1, y1 = coords
|
||||
return (x1 + x0) / 2.0
|
||||
|
||||
# Invoked when one of the chips is clicked. This should just tell the
|
||||
# switchboard to set the color on all the output components
|
||||
def __select_chip(self, event=None):
|
||||
x = event.x
|
||||
y = event.y
|
||||
canvas = self.__canvas
|
||||
chip = canvas.find_overlapping(x, y, x, y)
|
||||
if chip and (1 <= chip[0] <= self.__numchips):
|
||||
color = self.__chips[chip[0]-1]
|
||||
red, green, blue = ColorDB.rrggbb_to_triplet(color)
|
||||
etype = int(event.type)
|
||||
if (etype == BTNUP or self.__uwd.get()):
|
||||
# update everyone
|
||||
self.__sb.update_views(red, green, blue)
|
||||
else:
|
||||
# just track the arrows
|
||||
self.__trackarrow(chip[0], (red, green, blue))
|
||||
|
||||
def __trackarrow(self, chip, rgbtuple):
|
||||
# invert the last chip
|
||||
if self.__lastchip is not None:
|
||||
color = self.__canvas.itemcget(self.__lastchip, 'fill')
|
||||
self.__canvas.itemconfigure(self.__lastchip, outline=color)
|
||||
self.__lastchip = chip
|
||||
# get the arrow's text
|
||||
coloraxis = rgbtuple[self.__axis]
|
||||
if self.__hexp.get():
|
||||
# hex
|
||||
text = hex(coloraxis)
|
||||
else:
|
||||
# decimal
|
||||
text = repr(coloraxis)
|
||||
# move the arrow, and set its text
|
||||
if coloraxis <= 128:
|
||||
# use the left arrow
|
||||
self.__leftarrow.set_text(text)
|
||||
self.__leftarrow.move_to(self.__arrow_x(chip-1))
|
||||
self.__rightarrow.move_to(-100)
|
||||
else:
|
||||
# use the right arrow
|
||||
self.__rightarrow.set_text(text)
|
||||
self.__rightarrow.move_to(self.__arrow_x(chip-1))
|
||||
self.__leftarrow.move_to(-100)
|
||||
# and set the chip's outline
|
||||
brightness = ColorDB.triplet_to_brightness(rgbtuple)
|
||||
if brightness <= 128:
|
||||
outline = 'white'
|
||||
else:
|
||||
outline = 'black'
|
||||
self.__canvas.itemconfigure(chip, outline=outline)
|
||||
|
||||
|
||||
def update_yourself(self, red, green, blue):
|
||||
assert self.__generator
|
||||
i = 1
|
||||
chip = 0
|
||||
chips = self.__chips = []
|
||||
tk = self.__canvas.tk
|
||||
# get the red, green, and blue components for all chips
|
||||
for t in self.__generator(self.__numchips, red, green, blue):
|
||||
rrggbb = ColorDB.triplet_to_rrggbb(t)
|
||||
chips.append(rrggbb)
|
||||
tred, tgreen, tblue = t
|
||||
if tred <= red and tgreen <= green and tblue <= blue:
|
||||
chip = i
|
||||
i = i + 1
|
||||
# call the raw tcl script
|
||||
colors = SPACE.join(chips)
|
||||
tk.eval('setcolor %s {%s}' % (self.__canvas._w, colors))
|
||||
# move the arrows around
|
||||
self.__trackarrow(chip, (red, green, blue))
|
||||
|
||||
def set(self, label, generator):
|
||||
self.__canvas.itemconfigure(self.__label, text=label)
|
||||
self.__generator = generator
|
||||
|
||||
|
||||
class StripViewer:
|
||||
def __init__(self, switchboard, master=None):
|
||||
self.__sb = switchboard
|
||||
optiondb = switchboard.optiondb()
|
||||
# create a frame inside the master.
|
||||
frame = Frame(master, relief=RAISED, borderwidth=1)
|
||||
frame.grid(row=1, column=0, columnspan=2, sticky='NSEW')
|
||||
# create the options to be used later
|
||||
uwd = self.__uwdvar = BooleanVar()
|
||||
uwd.set(optiondb.get('UPWHILEDRAG', 0))
|
||||
hexp = self.__hexpvar = BooleanVar()
|
||||
hexp.set(optiondb.get('HEXSTRIP', 0))
|
||||
# create the red, green, blue strips inside their own frame
|
||||
frame1 = Frame(frame)
|
||||
frame1.pack(expand=YES, fill=BOTH)
|
||||
self.__reds = StripWidget(switchboard, frame1,
|
||||
generator=constant_cyan_generator,
|
||||
axis=0,
|
||||
label='Red Variations',
|
||||
uwdvar=uwd, hexvar=hexp)
|
||||
|
||||
self.__greens = StripWidget(switchboard, frame1,
|
||||
generator=constant_magenta_generator,
|
||||
axis=1,
|
||||
label='Green Variations',
|
||||
uwdvar=uwd, hexvar=hexp)
|
||||
|
||||
self.__blues = StripWidget(switchboard, frame1,
|
||||
generator=constant_yellow_generator,
|
||||
axis=2,
|
||||
label='Blue Variations',
|
||||
uwdvar=uwd, hexvar=hexp)
|
||||
|
||||
# create a frame to contain the controls
|
||||
frame2 = Frame(frame)
|
||||
frame2.pack(expand=YES, fill=BOTH)
|
||||
frame2.columnconfigure(0, weight=20)
|
||||
frame2.columnconfigure(2, weight=20)
|
||||
|
||||
padx = 8
|
||||
|
||||
# create the black button
|
||||
blackbtn = Button(frame2,
|
||||
text='Black',
|
||||
command=self.__toblack)
|
||||
blackbtn.grid(row=0, column=0, rowspan=2, sticky=W, padx=padx)
|
||||
|
||||
# create the controls
|
||||
uwdbtn = Checkbutton(frame2,
|
||||
text='Update while dragging',
|
||||
variable=uwd)
|
||||
uwdbtn.grid(row=0, column=1, sticky=W)
|
||||
hexbtn = Checkbutton(frame2,
|
||||
text='Hexadecimal',
|
||||
variable=hexp,
|
||||
command=self.__togglehex)
|
||||
hexbtn.grid(row=1, column=1, sticky=W)
|
||||
|
||||
# XXX: ignore this feature for now; it doesn't work quite right yet
|
||||
|
||||
## gentypevar = self.__gentypevar = IntVar()
|
||||
## self.__variations = Radiobutton(frame,
|
||||
## text='Variations',
|
||||
## variable=gentypevar,
|
||||
## value=0,
|
||||
## command=self.__togglegentype)
|
||||
## self.__variations.grid(row=0, column=1, sticky=W)
|
||||
## self.__constants = Radiobutton(frame,
|
||||
## text='Constants',
|
||||
## variable=gentypevar,
|
||||
## value=1,
|
||||
## command=self.__togglegentype)
|
||||
## self.__constants.grid(row=1, column=1, sticky=W)
|
||||
|
||||
# create the white button
|
||||
whitebtn = Button(frame2,
|
||||
text='White',
|
||||
command=self.__towhite)
|
||||
whitebtn.grid(row=0, column=2, rowspan=2, sticky=E, padx=padx)
|
||||
|
||||
def update_yourself(self, red, green, blue):
|
||||
self.__reds.update_yourself(red, green, blue)
|
||||
self.__greens.update_yourself(red, green, blue)
|
||||
self.__blues.update_yourself(red, green, blue)
|
||||
|
||||
def __togglehex(self, event=None):
|
||||
red, green, blue = self.__sb.current_rgb()
|
||||
self.update_yourself(red, green, blue)
|
||||
|
||||
## def __togglegentype(self, event=None):
|
||||
## which = self.__gentypevar.get()
|
||||
## if which == 0:
|
||||
## self.__reds.set(label='Red Variations',
|
||||
## generator=constant_cyan_generator)
|
||||
## self.__greens.set(label='Green Variations',
|
||||
## generator=constant_magenta_generator)
|
||||
## self.__blues.set(label='Blue Variations',
|
||||
## generator=constant_yellow_generator)
|
||||
## elif which == 1:
|
||||
## self.__reds.set(label='Red Constant',
|
||||
## generator=constant_red_generator)
|
||||
## self.__greens.set(label='Green Constant',
|
||||
## generator=constant_green_generator)
|
||||
## self.__blues.set(label='Blue Constant',
|
||||
## generator=constant_blue_generator)
|
||||
## else:
|
||||
## assert 0
|
||||
## self.__sb.update_views_current()
|
||||
|
||||
def __toblack(self, event=None):
|
||||
self.__sb.update_views(0, 0, 0)
|
||||
|
||||
def __towhite(self, event=None):
|
||||
self.__sb.update_views(255, 255, 255)
|
||||
|
||||
def save_options(self, optiondb):
|
||||
optiondb['UPWHILEDRAG'] = self.__uwdvar.get()
|
||||
optiondb['HEXSTRIP'] = self.__hexpvar.get()
|
||||
138
Tools/pynche/Switchboard.py
Normal file
138
Tools/pynche/Switchboard.py
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
"""Switchboard class.
|
||||
|
||||
This class is used to coordinate updates among all Viewers. Every Viewer must
|
||||
conform to the following interface:
|
||||
|
||||
- it must include a method called update_yourself() which takes three
|
||||
arguments; the red, green, and blue values of the selected color.
|
||||
|
||||
- When a Viewer selects a color and wishes to update all other Views, it
|
||||
should call update_views() on the Switchboard object. Note that the
|
||||
Viewer typically does *not* update itself before calling update_views(),
|
||||
since this would cause it to get updated twice.
|
||||
|
||||
Optionally, Viewers can also implement:
|
||||
|
||||
- save_options() which takes an optiondb (a dictionary). Store into this
|
||||
dictionary any values the Viewer wants to save in the persistent
|
||||
~/.pynche file. This dictionary is saved using marshal. The namespace
|
||||
for the keys is ad-hoc; make sure you don't clobber some other Viewer's
|
||||
keys!
|
||||
|
||||
- withdraw() which takes no arguments. This is called when Pynche is
|
||||
unmapped. All Viewers should implement this.
|
||||
|
||||
- colordb_changed() which takes a single argument, an instance of
|
||||
ColorDB. This is called whenever the color name database is changed and
|
||||
gives a chance for the Viewers to do something on those events. See
|
||||
ListViewer for details.
|
||||
|
||||
External Viewers are found dynamically. Viewer modules should have names such
|
||||
as FooViewer.py. If such a named module has a module global variable called
|
||||
ADDTOVIEW and this variable is true, the Viewer will be added dynamically to
|
||||
the `View' menu. ADDTOVIEW contains a string which is used as the menu item
|
||||
to display the Viewer (one kludge: if the string contains a `%', this is used
|
||||
to indicate that the next character will get an underline in the menu,
|
||||
otherwise the first character is underlined).
|
||||
|
||||
FooViewer.py should contain a class called FooViewer, and its constructor
|
||||
should take two arguments, an instance of Switchboard, and optionally a Tk
|
||||
master window.
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
import marshal
|
||||
|
||||
|
||||
|
||||
class Switchboard:
|
||||
def __init__(self, initfile):
|
||||
self.__initfile = initfile
|
||||
self.__colordb = None
|
||||
self.__optiondb = {}
|
||||
self.__views = []
|
||||
self.__red = 0
|
||||
self.__green = 0
|
||||
self.__blue = 0
|
||||
self.__canceled = 0
|
||||
# read the initialization file
|
||||
fp = None
|
||||
if initfile:
|
||||
try:
|
||||
try:
|
||||
fp = open(initfile, 'rb')
|
||||
self.__optiondb = marshal.load(fp)
|
||||
if not isinstance(self.__optiondb, dict):
|
||||
print('Problem reading options from file:', initfile,
|
||||
file=sys.stderr)
|
||||
self.__optiondb = {}
|
||||
except (IOError, EOFError, ValueError):
|
||||
pass
|
||||
finally:
|
||||
if fp:
|
||||
fp.close()
|
||||
|
||||
def add_view(self, view):
|
||||
self.__views.append(view)
|
||||
|
||||
def update_views(self, red, green, blue):
|
||||
self.__red = red
|
||||
self.__green = green
|
||||
self.__blue = blue
|
||||
for v in self.__views:
|
||||
v.update_yourself(red, green, blue)
|
||||
|
||||
def update_views_current(self):
|
||||
self.update_views(self.__red, self.__green, self.__blue)
|
||||
|
||||
def current_rgb(self):
|
||||
return self.__red, self.__green, self.__blue
|
||||
|
||||
def colordb(self):
|
||||
return self.__colordb
|
||||
|
||||
def set_colordb(self, colordb):
|
||||
self.__colordb = colordb
|
||||
for v in self.__views:
|
||||
if hasattr(v, 'colordb_changed'):
|
||||
v.colordb_changed(colordb)
|
||||
self.update_views_current()
|
||||
|
||||
def optiondb(self):
|
||||
return self.__optiondb
|
||||
|
||||
def save_views(self):
|
||||
# save the current color
|
||||
self.__optiondb['RED'] = self.__red
|
||||
self.__optiondb['GREEN'] = self.__green
|
||||
self.__optiondb['BLUE'] = self.__blue
|
||||
for v in self.__views:
|
||||
if hasattr(v, 'save_options'):
|
||||
v.save_options(self.__optiondb)
|
||||
# save the name of the file used for the color database. we'll try to
|
||||
# load this first.
|
||||
self.__optiondb['DBFILE'] = self.__colordb.filename()
|
||||
fp = None
|
||||
try:
|
||||
try:
|
||||
fp = open(self.__initfile, 'wb')
|
||||
except IOError:
|
||||
print('Cannot write options to file:', \
|
||||
self.__initfile, file=sys.stderr)
|
||||
else:
|
||||
marshal.dump(self.__optiondb, fp)
|
||||
finally:
|
||||
if fp:
|
||||
fp.close()
|
||||
|
||||
def withdraw_views(self):
|
||||
for v in self.__views:
|
||||
if hasattr(v, 'withdraw'):
|
||||
v.withdraw()
|
||||
|
||||
def canceled(self, flag=1):
|
||||
self.__canceled = flag
|
||||
|
||||
def canceled_p(self):
|
||||
return self.__canceled
|
||||
188
Tools/pynche/TextViewer.py
Normal file
188
Tools/pynche/TextViewer.py
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
"""TextViewer class.
|
||||
|
||||
The TextViewer allows you to see how the selected color would affect various
|
||||
characteristics of a Tk text widget. This is an output viewer only.
|
||||
|
||||
In the top part of the window is a standard text widget with some sample text
|
||||
in it. You are free to edit this text in any way you want (BAW: allow you to
|
||||
change font characteristics). If you want changes in other viewers to update
|
||||
text characteristics, turn on Track color changes.
|
||||
|
||||
To select which characteristic tracks the change, select one of the radio
|
||||
buttons in the window below. Text foreground and background affect the text
|
||||
in the window above. The Selection is what you see when you click the middle
|
||||
button and drag it through some text. The Insertion is the insertion cursor
|
||||
in the text window (which only has a background).
|
||||
"""
|
||||
|
||||
from tkinter import *
|
||||
import ColorDB
|
||||
|
||||
ADDTOVIEW = 'Text Window...'
|
||||
|
||||
|
||||
|
||||
class TextViewer:
|
||||
def __init__(self, switchboard, master=None):
|
||||
self.__sb = switchboard
|
||||
optiondb = switchboard.optiondb()
|
||||
root = self.__root = Toplevel(master, class_='Pynche')
|
||||
root.protocol('WM_DELETE_WINDOW', self.withdraw)
|
||||
root.title('Pynche Text Window')
|
||||
root.iconname('Pynche Text Window')
|
||||
root.bind('<Alt-q>', self.__quit)
|
||||
root.bind('<Alt-Q>', self.__quit)
|
||||
root.bind('<Alt-w>', self.withdraw)
|
||||
root.bind('<Alt-W>', self.withdraw)
|
||||
#
|
||||
# create the text widget
|
||||
#
|
||||
self.__text = Text(root, relief=SUNKEN,
|
||||
background=optiondb.get('TEXTBG', 'black'),
|
||||
foreground=optiondb.get('TEXTFG', 'white'),
|
||||
width=35, height=15)
|
||||
sfg = optiondb.get('TEXT_SFG')
|
||||
if sfg:
|
||||
self.__text.configure(selectforeground=sfg)
|
||||
sbg = optiondb.get('TEXT_SBG')
|
||||
if sbg:
|
||||
self.__text.configure(selectbackground=sbg)
|
||||
ibg = optiondb.get('TEXT_IBG')
|
||||
if ibg:
|
||||
self.__text.configure(insertbackground=ibg)
|
||||
self.__text.pack()
|
||||
self.__text.insert(0.0, optiondb.get('TEXT', '''\
|
||||
Insert some stuff here and play
|
||||
with the buttons below to see
|
||||
how the colors interact in
|
||||
textual displays.
|
||||
|
||||
See how the selection can also
|
||||
be affected by tickling the buttons
|
||||
and choosing a color.'''))
|
||||
insert = optiondb.get('TEXTINS')
|
||||
if insert:
|
||||
self.__text.mark_set(INSERT, insert)
|
||||
try:
|
||||
start, end = optiondb.get('TEXTSEL', (6.0, END))
|
||||
self.__text.tag_add(SEL, start, end)
|
||||
except ValueError:
|
||||
# selection wasn't set
|
||||
pass
|
||||
self.__text.focus_set()
|
||||
#
|
||||
# variables
|
||||
self.__trackp = BooleanVar()
|
||||
self.__trackp.set(optiondb.get('TRACKP', 0))
|
||||
self.__which = IntVar()
|
||||
self.__which.set(optiondb.get('WHICH', 0))
|
||||
#
|
||||
# track toggle
|
||||
self.__t = Checkbutton(root, text='Track color changes',
|
||||
variable=self.__trackp,
|
||||
relief=GROOVE,
|
||||
command=self.__toggletrack)
|
||||
self.__t.pack(fill=X, expand=YES)
|
||||
frame = self.__frame = Frame(root)
|
||||
frame.pack()
|
||||
#
|
||||
# labels
|
||||
self.__labels = []
|
||||
row = 2
|
||||
for text in ('Text:', 'Selection:', 'Insertion:'):
|
||||
l = Label(frame, text=text)
|
||||
l.grid(row=row, column=0, sticky=E)
|
||||
self.__labels.append(l)
|
||||
row += 1
|
||||
col = 1
|
||||
for text in ('Foreground', 'Background'):
|
||||
l = Label(frame, text=text)
|
||||
l.grid(row=1, column=col)
|
||||
self.__labels.append(l)
|
||||
col += 1
|
||||
#
|
||||
# radios
|
||||
self.__radios = []
|
||||
for col in (1, 2):
|
||||
for row in (2, 3, 4):
|
||||
# there is no insertforeground option
|
||||
if row==4 and col==1:
|
||||
continue
|
||||
r = Radiobutton(frame, variable=self.__which,
|
||||
value=(row-2)*2 + col-1,
|
||||
command=self.__set_color)
|
||||
r.grid(row=row, column=col)
|
||||
self.__radios.append(r)
|
||||
self.__toggletrack()
|
||||
|
||||
def __quit(self, event=None):
|
||||
self.__root.quit()
|
||||
|
||||
def withdraw(self, event=None):
|
||||
self.__root.withdraw()
|
||||
|
||||
def deiconify(self, event=None):
|
||||
self.__root.deiconify()
|
||||
|
||||
def __forceupdate(self, event=None):
|
||||
self.__sb.update_views_current()
|
||||
|
||||
def __toggletrack(self, event=None):
|
||||
if self.__trackp.get():
|
||||
state = NORMAL
|
||||
fg = self.__radios[0]['foreground']
|
||||
else:
|
||||
state = DISABLED
|
||||
fg = self.__radios[0]['disabledforeground']
|
||||
for r in self.__radios:
|
||||
r.configure(state=state)
|
||||
for l in self.__labels:
|
||||
l.configure(foreground=fg)
|
||||
|
||||
def __set_color(self, event=None):
|
||||
which = self.__which.get()
|
||||
text = self.__text
|
||||
if which == 0:
|
||||
color = text['foreground']
|
||||
elif which == 1:
|
||||
color = text['background']
|
||||
elif which == 2:
|
||||
color = text['selectforeground']
|
||||
elif which == 3:
|
||||
color = text['selectbackground']
|
||||
elif which == 5:
|
||||
color = text['insertbackground']
|
||||
try:
|
||||
red, green, blue = ColorDB.rrggbb_to_triplet(color)
|
||||
except ColorDB.BadColor:
|
||||
# must have been a color name
|
||||
red, green, blue = self.__sb.colordb().find_byname(color)
|
||||
self.__sb.update_views(red, green, blue)
|
||||
|
||||
def update_yourself(self, red, green, blue):
|
||||
if self.__trackp.get():
|
||||
colorname = ColorDB.triplet_to_rrggbb((red, green, blue))
|
||||
which = self.__which.get()
|
||||
text = self.__text
|
||||
if which == 0:
|
||||
text.configure(foreground=colorname)
|
||||
elif which == 1:
|
||||
text.configure(background=colorname)
|
||||
elif which == 2:
|
||||
text.configure(selectforeground=colorname)
|
||||
elif which == 3:
|
||||
text.configure(selectbackground=colorname)
|
||||
elif which == 5:
|
||||
text.configure(insertbackground=colorname)
|
||||
|
||||
def save_options(self, optiondb):
|
||||
optiondb['TRACKP'] = self.__trackp.get()
|
||||
optiondb['WHICH'] = self.__which.get()
|
||||
optiondb['TEXT'] = self.__text.get(0.0, 'end - 1c')
|
||||
optiondb['TEXTSEL'] = self.__text.tag_ranges(SEL)[0:2]
|
||||
optiondb['TEXTINS'] = self.__text.index(INSERT)
|
||||
optiondb['TEXTFG'] = self.__text['foreground']
|
||||
optiondb['TEXTBG'] = self.__text['background']
|
||||
optiondb['TEXT_SFG'] = self.__text['selectforeground']
|
||||
optiondb['TEXT_SBG'] = self.__text['selectbackground']
|
||||
optiondb['TEXT_IBG'] = self.__text['insertbackground']
|
||||
161
Tools/pynche/TypeinViewer.py
Normal file
161
Tools/pynche/TypeinViewer.py
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
"""TypeinViewer class.
|
||||
|
||||
The TypeinViewer is what you see at the lower right of the main Pynche
|
||||
widget. It contains three text entry fields, one each for red, green, blue.
|
||||
Input into these windows is highly constrained; it only allows you to enter
|
||||
values that are legal for a color axis. This usually means 0-255 for decimal
|
||||
input and 0x0 - 0xff for hex input.
|
||||
|
||||
You can toggle whether you want to view and input the values in either decimal
|
||||
or hex by clicking on Hexadecimal. By clicking on Update while typing, the
|
||||
color selection will be made on every change to the text field. Otherwise,
|
||||
you must hit Return or Tab to select the color.
|
||||
"""
|
||||
|
||||
from tkinter import *
|
||||
|
||||
|
||||
|
||||
class TypeinViewer:
|
||||
def __init__(self, switchboard, master=None):
|
||||
# non-gui ivars
|
||||
self.__sb = switchboard
|
||||
optiondb = switchboard.optiondb()
|
||||
self.__hexp = BooleanVar()
|
||||
self.__hexp.set(optiondb.get('HEXTYPE', 0))
|
||||
self.__uwtyping = BooleanVar()
|
||||
self.__uwtyping.set(optiondb.get('UPWHILETYPE', 0))
|
||||
# create the gui
|
||||
self.__frame = Frame(master, relief=RAISED, borderwidth=1)
|
||||
self.__frame.grid(row=3, column=1, sticky='NSEW')
|
||||
# Red
|
||||
self.__xl = Label(self.__frame, text='Red:')
|
||||
self.__xl.grid(row=0, column=0, sticky=E)
|
||||
subframe = Frame(self.__frame)
|
||||
subframe.grid(row=0, column=1)
|
||||
self.__xox = Label(subframe, text='0x')
|
||||
self.__xox.grid(row=0, column=0, sticky=E)
|
||||
self.__xox['font'] = 'courier'
|
||||
self.__x = Entry(subframe, width=3)
|
||||
self.__x.grid(row=0, column=1)
|
||||
self.__x.bindtags(self.__x.bindtags() + ('Normalize', 'Update'))
|
||||
self.__x.bind_class('Normalize', '<Key>', self.__normalize)
|
||||
self.__x.bind_class('Update' , '<Key>', self.__maybeupdate)
|
||||
# Green
|
||||
self.__yl = Label(self.__frame, text='Green:')
|
||||
self.__yl.grid(row=1, column=0, sticky=E)
|
||||
subframe = Frame(self.__frame)
|
||||
subframe.grid(row=1, column=1)
|
||||
self.__yox = Label(subframe, text='0x')
|
||||
self.__yox.grid(row=0, column=0, sticky=E)
|
||||
self.__yox['font'] = 'courier'
|
||||
self.__y = Entry(subframe, width=3)
|
||||
self.__y.grid(row=0, column=1)
|
||||
self.__y.bindtags(self.__y.bindtags() + ('Normalize', 'Update'))
|
||||
# Blue
|
||||
self.__zl = Label(self.__frame, text='Blue:')
|
||||
self.__zl.grid(row=2, column=0, sticky=E)
|
||||
subframe = Frame(self.__frame)
|
||||
subframe.grid(row=2, column=1)
|
||||
self.__zox = Label(subframe, text='0x')
|
||||
self.__zox.grid(row=0, column=0, sticky=E)
|
||||
self.__zox['font'] = 'courier'
|
||||
self.__z = Entry(subframe, width=3)
|
||||
self.__z.grid(row=0, column=1)
|
||||
self.__z.bindtags(self.__z.bindtags() + ('Normalize', 'Update'))
|
||||
# Update while typing?
|
||||
self.__uwt = Checkbutton(self.__frame,
|
||||
text='Update while typing',
|
||||
variable=self.__uwtyping)
|
||||
self.__uwt.grid(row=3, column=0, columnspan=2, sticky=W)
|
||||
# Hex/Dec
|
||||
self.__hex = Checkbutton(self.__frame,
|
||||
text='Hexadecimal',
|
||||
variable=self.__hexp,
|
||||
command=self.__togglehex)
|
||||
self.__hex.grid(row=4, column=0, columnspan=2, sticky=W)
|
||||
|
||||
def __togglehex(self, event=None):
|
||||
red, green, blue = self.__sb.current_rgb()
|
||||
if self.__hexp.get():
|
||||
label = '0x'
|
||||
else:
|
||||
label = ' '
|
||||
self.__xox['text'] = label
|
||||
self.__yox['text'] = label
|
||||
self.__zox['text'] = label
|
||||
self.update_yourself(red, green, blue)
|
||||
|
||||
def __normalize(self, event=None):
|
||||
ew = event.widget
|
||||
contents = ew.get()
|
||||
icursor = ew.index(INSERT)
|
||||
if contents and contents[0] in 'xX' and self.__hexp.get():
|
||||
contents = '0' + contents
|
||||
# Figure out the contents in the current base.
|
||||
try:
|
||||
if self.__hexp.get():
|
||||
v = int(contents, 16)
|
||||
else:
|
||||
v = int(contents)
|
||||
except ValueError:
|
||||
v = None
|
||||
# If value is not legal, or empty, delete the last character inserted
|
||||
# and ring the bell. Don't ring the bell if the field is empty (it'll
|
||||
# just equal zero.
|
||||
if v is None:
|
||||
pass
|
||||
elif v < 0 or v > 255:
|
||||
i = ew.index(INSERT)
|
||||
if event.char:
|
||||
contents = contents[:i-1] + contents[i:]
|
||||
icursor -= 1
|
||||
ew.bell()
|
||||
elif self.__hexp.get():
|
||||
contents = hex(v)[2:]
|
||||
else:
|
||||
contents = int(v)
|
||||
ew.delete(0, END)
|
||||
ew.insert(0, contents)
|
||||
ew.icursor(icursor)
|
||||
|
||||
def __maybeupdate(self, event=None):
|
||||
if self.__uwtyping.get() or event.keysym in ('Return', 'Tab'):
|
||||
self.__update(event)
|
||||
|
||||
def __update(self, event=None):
|
||||
redstr = self.__x.get() or '0'
|
||||
greenstr = self.__y.get() or '0'
|
||||
bluestr = self.__z.get() or '0'
|
||||
if self.__hexp.get():
|
||||
base = 16
|
||||
else:
|
||||
base = 10
|
||||
red, green, blue = [int(x, base) for x in (redstr, greenstr, bluestr)]
|
||||
self.__sb.update_views(red, green, blue)
|
||||
|
||||
def update_yourself(self, red, green, blue):
|
||||
if self.__hexp.get():
|
||||
sred, sgreen, sblue = [hex(x)[2:] for x in (red, green, blue)]
|
||||
else:
|
||||
sred, sgreen, sblue = red, green, blue
|
||||
x, y, z = self.__x, self.__y, self.__z
|
||||
xicursor = x.index(INSERT)
|
||||
yicursor = y.index(INSERT)
|
||||
zicursor = z.index(INSERT)
|
||||
x.delete(0, END)
|
||||
y.delete(0, END)
|
||||
z.delete(0, END)
|
||||
x.insert(0, sred)
|
||||
y.insert(0, sgreen)
|
||||
z.insert(0, sblue)
|
||||
x.icursor(xicursor)
|
||||
y.icursor(yicursor)
|
||||
z.icursor(zicursor)
|
||||
|
||||
def hexp_var(self):
|
||||
return self.__hexp
|
||||
|
||||
def save_options(self, optiondb):
|
||||
optiondb['HEXTYPE'] = self.__hexp.get()
|
||||
optiondb['UPWHILETYPE'] = self.__uwtyping.get()
|
||||
753
Tools/pynche/X/rgb.txt
Normal file
753
Tools/pynche/X/rgb.txt
Normal file
|
|
@ -0,0 +1,753 @@
|
|||
! $XConsortium: rgb.txt,v 10.41 94/02/20 18:39:36 rws Exp $
|
||||
255 250 250 snow
|
||||
248 248 255 ghost white
|
||||
248 248 255 GhostWhite
|
||||
245 245 245 white smoke
|
||||
245 245 245 WhiteSmoke
|
||||
220 220 220 gainsboro
|
||||
255 250 240 floral white
|
||||
255 250 240 FloralWhite
|
||||
253 245 230 old lace
|
||||
253 245 230 OldLace
|
||||
250 240 230 linen
|
||||
250 235 215 antique white
|
||||
250 235 215 AntiqueWhite
|
||||
255 239 213 papaya whip
|
||||
255 239 213 PapayaWhip
|
||||
255 235 205 blanched almond
|
||||
255 235 205 BlanchedAlmond
|
||||
255 228 196 bisque
|
||||
255 218 185 peach puff
|
||||
255 218 185 PeachPuff
|
||||
255 222 173 navajo white
|
||||
255 222 173 NavajoWhite
|
||||
255 228 181 moccasin
|
||||
255 248 220 cornsilk
|
||||
255 255 240 ivory
|
||||
255 250 205 lemon chiffon
|
||||
255 250 205 LemonChiffon
|
||||
255 245 238 seashell
|
||||
240 255 240 honeydew
|
||||
245 255 250 mint cream
|
||||
245 255 250 MintCream
|
||||
240 255 255 azure
|
||||
240 248 255 alice blue
|
||||
240 248 255 AliceBlue
|
||||
230 230 250 lavender
|
||||
255 240 245 lavender blush
|
||||
255 240 245 LavenderBlush
|
||||
255 228 225 misty rose
|
||||
255 228 225 MistyRose
|
||||
255 255 255 white
|
||||
0 0 0 black
|
||||
47 79 79 dark slate gray
|
||||
47 79 79 DarkSlateGray
|
||||
47 79 79 dark slate grey
|
||||
47 79 79 DarkSlateGrey
|
||||
105 105 105 dim gray
|
||||
105 105 105 DimGray
|
||||
105 105 105 dim grey
|
||||
105 105 105 DimGrey
|
||||
112 128 144 slate gray
|
||||
112 128 144 SlateGray
|
||||
112 128 144 slate grey
|
||||
112 128 144 SlateGrey
|
||||
119 136 153 light slate gray
|
||||
119 136 153 LightSlateGray
|
||||
119 136 153 light slate grey
|
||||
119 136 153 LightSlateGrey
|
||||
190 190 190 gray
|
||||
190 190 190 grey
|
||||
211 211 211 light grey
|
||||
211 211 211 LightGrey
|
||||
211 211 211 light gray
|
||||
211 211 211 LightGray
|
||||
25 25 112 midnight blue
|
||||
25 25 112 MidnightBlue
|
||||
0 0 128 navy
|
||||
0 0 128 navy blue
|
||||
0 0 128 NavyBlue
|
||||
100 149 237 cornflower blue
|
||||
100 149 237 CornflowerBlue
|
||||
72 61 139 dark slate blue
|
||||
72 61 139 DarkSlateBlue
|
||||
106 90 205 slate blue
|
||||
106 90 205 SlateBlue
|
||||
123 104 238 medium slate blue
|
||||
123 104 238 MediumSlateBlue
|
||||
132 112 255 light slate blue
|
||||
132 112 255 LightSlateBlue
|
||||
0 0 205 medium blue
|
||||
0 0 205 MediumBlue
|
||||
65 105 225 royal blue
|
||||
65 105 225 RoyalBlue
|
||||
0 0 255 blue
|
||||
30 144 255 dodger blue
|
||||
30 144 255 DodgerBlue
|
||||
0 191 255 deep sky blue
|
||||
0 191 255 DeepSkyBlue
|
||||
135 206 235 sky blue
|
||||
135 206 235 SkyBlue
|
||||
135 206 250 light sky blue
|
||||
135 206 250 LightSkyBlue
|
||||
70 130 180 steel blue
|
||||
70 130 180 SteelBlue
|
||||
176 196 222 light steel blue
|
||||
176 196 222 LightSteelBlue
|
||||
173 216 230 light blue
|
||||
173 216 230 LightBlue
|
||||
176 224 230 powder blue
|
||||
176 224 230 PowderBlue
|
||||
175 238 238 pale turquoise
|
||||
175 238 238 PaleTurquoise
|
||||
0 206 209 dark turquoise
|
||||
0 206 209 DarkTurquoise
|
||||
72 209 204 medium turquoise
|
||||
72 209 204 MediumTurquoise
|
||||
64 224 208 turquoise
|
||||
0 255 255 cyan
|
||||
224 255 255 light cyan
|
||||
224 255 255 LightCyan
|
||||
95 158 160 cadet blue
|
||||
95 158 160 CadetBlue
|
||||
102 205 170 medium aquamarine
|
||||
102 205 170 MediumAquamarine
|
||||
127 255 212 aquamarine
|
||||
0 100 0 dark green
|
||||
0 100 0 DarkGreen
|
||||
85 107 47 dark olive green
|
||||
85 107 47 DarkOliveGreen
|
||||
143 188 143 dark sea green
|
||||
143 188 143 DarkSeaGreen
|
||||
46 139 87 sea green
|
||||
46 139 87 SeaGreen
|
||||
60 179 113 medium sea green
|
||||
60 179 113 MediumSeaGreen
|
||||
32 178 170 light sea green
|
||||
32 178 170 LightSeaGreen
|
||||
152 251 152 pale green
|
||||
152 251 152 PaleGreen
|
||||
0 255 127 spring green
|
||||
0 255 127 SpringGreen
|
||||
124 252 0 lawn green
|
||||
124 252 0 LawnGreen
|
||||
0 255 0 green
|
||||
127 255 0 chartreuse
|
||||
0 250 154 medium spring green
|
||||
0 250 154 MediumSpringGreen
|
||||
173 255 47 green yellow
|
||||
173 255 47 GreenYellow
|
||||
50 205 50 lime green
|
||||
50 205 50 LimeGreen
|
||||
154 205 50 yellow green
|
||||
154 205 50 YellowGreen
|
||||
34 139 34 forest green
|
||||
34 139 34 ForestGreen
|
||||
107 142 35 olive drab
|
||||
107 142 35 OliveDrab
|
||||
189 183 107 dark khaki
|
||||
189 183 107 DarkKhaki
|
||||
240 230 140 khaki
|
||||
238 232 170 pale goldenrod
|
||||
238 232 170 PaleGoldenrod
|
||||
250 250 210 light goldenrod yellow
|
||||
250 250 210 LightGoldenrodYellow
|
||||
255 255 224 light yellow
|
||||
255 255 224 LightYellow
|
||||
255 255 0 yellow
|
||||
255 215 0 gold
|
||||
238 221 130 light goldenrod
|
||||
238 221 130 LightGoldenrod
|
||||
218 165 32 goldenrod
|
||||
184 134 11 dark goldenrod
|
||||
184 134 11 DarkGoldenrod
|
||||
188 143 143 rosy brown
|
||||
188 143 143 RosyBrown
|
||||
205 92 92 indian red
|
||||
205 92 92 IndianRed
|
||||
139 69 19 saddle brown
|
||||
139 69 19 SaddleBrown
|
||||
160 82 45 sienna
|
||||
205 133 63 peru
|
||||
222 184 135 burlywood
|
||||
245 245 220 beige
|
||||
245 222 179 wheat
|
||||
244 164 96 sandy brown
|
||||
244 164 96 SandyBrown
|
||||
210 180 140 tan
|
||||
210 105 30 chocolate
|
||||
178 34 34 firebrick
|
||||
165 42 42 brown
|
||||
233 150 122 dark salmon
|
||||
233 150 122 DarkSalmon
|
||||
250 128 114 salmon
|
||||
255 160 122 light salmon
|
||||
255 160 122 LightSalmon
|
||||
255 165 0 orange
|
||||
255 140 0 dark orange
|
||||
255 140 0 DarkOrange
|
||||
255 127 80 coral
|
||||
240 128 128 light coral
|
||||
240 128 128 LightCoral
|
||||
255 99 71 tomato
|
||||
255 69 0 orange red
|
||||
255 69 0 OrangeRed
|
||||
255 0 0 red
|
||||
255 105 180 hot pink
|
||||
255 105 180 HotPink
|
||||
255 20 147 deep pink
|
||||
255 20 147 DeepPink
|
||||
255 192 203 pink
|
||||
255 182 193 light pink
|
||||
255 182 193 LightPink
|
||||
219 112 147 pale violet red
|
||||
219 112 147 PaleVioletRed
|
||||
176 48 96 maroon
|
||||
199 21 133 medium violet red
|
||||
199 21 133 MediumVioletRed
|
||||
208 32 144 violet red
|
||||
208 32 144 VioletRed
|
||||
255 0 255 magenta
|
||||
238 130 238 violet
|
||||
221 160 221 plum
|
||||
218 112 214 orchid
|
||||
186 85 211 medium orchid
|
||||
186 85 211 MediumOrchid
|
||||
153 50 204 dark orchid
|
||||
153 50 204 DarkOrchid
|
||||
148 0 211 dark violet
|
||||
148 0 211 DarkViolet
|
||||
138 43 226 blue violet
|
||||
138 43 226 BlueViolet
|
||||
160 32 240 purple
|
||||
147 112 219 medium purple
|
||||
147 112 219 MediumPurple
|
||||
216 191 216 thistle
|
||||
255 250 250 snow1
|
||||
238 233 233 snow2
|
||||
205 201 201 snow3
|
||||
139 137 137 snow4
|
||||
255 245 238 seashell1
|
||||
238 229 222 seashell2
|
||||
205 197 191 seashell3
|
||||
139 134 130 seashell4
|
||||
255 239 219 AntiqueWhite1
|
||||
238 223 204 AntiqueWhite2
|
||||
205 192 176 AntiqueWhite3
|
||||
139 131 120 AntiqueWhite4
|
||||
255 228 196 bisque1
|
||||
238 213 183 bisque2
|
||||
205 183 158 bisque3
|
||||
139 125 107 bisque4
|
||||
255 218 185 PeachPuff1
|
||||
238 203 173 PeachPuff2
|
||||
205 175 149 PeachPuff3
|
||||
139 119 101 PeachPuff4
|
||||
255 222 173 NavajoWhite1
|
||||
238 207 161 NavajoWhite2
|
||||
205 179 139 NavajoWhite3
|
||||
139 121 94 NavajoWhite4
|
||||
255 250 205 LemonChiffon1
|
||||
238 233 191 LemonChiffon2
|
||||
205 201 165 LemonChiffon3
|
||||
139 137 112 LemonChiffon4
|
||||
255 248 220 cornsilk1
|
||||
238 232 205 cornsilk2
|
||||
205 200 177 cornsilk3
|
||||
139 136 120 cornsilk4
|
||||
255 255 240 ivory1
|
||||
238 238 224 ivory2
|
||||
205 205 193 ivory3
|
||||
139 139 131 ivory4
|
||||
240 255 240 honeydew1
|
||||
224 238 224 honeydew2
|
||||
193 205 193 honeydew3
|
||||
131 139 131 honeydew4
|
||||
255 240 245 LavenderBlush1
|
||||
238 224 229 LavenderBlush2
|
||||
205 193 197 LavenderBlush3
|
||||
139 131 134 LavenderBlush4
|
||||
255 228 225 MistyRose1
|
||||
238 213 210 MistyRose2
|
||||
205 183 181 MistyRose3
|
||||
139 125 123 MistyRose4
|
||||
240 255 255 azure1
|
||||
224 238 238 azure2
|
||||
193 205 205 azure3
|
||||
131 139 139 azure4
|
||||
131 111 255 SlateBlue1
|
||||
122 103 238 SlateBlue2
|
||||
105 89 205 SlateBlue3
|
||||
71 60 139 SlateBlue4
|
||||
72 118 255 RoyalBlue1
|
||||
67 110 238 RoyalBlue2
|
||||
58 95 205 RoyalBlue3
|
||||
39 64 139 RoyalBlue4
|
||||
0 0 255 blue1
|
||||
0 0 238 blue2
|
||||
0 0 205 blue3
|
||||
0 0 139 blue4
|
||||
30 144 255 DodgerBlue1
|
||||
28 134 238 DodgerBlue2
|
||||
24 116 205 DodgerBlue3
|
||||
16 78 139 DodgerBlue4
|
||||
99 184 255 SteelBlue1
|
||||
92 172 238 SteelBlue2
|
||||
79 148 205 SteelBlue3
|
||||
54 100 139 SteelBlue4
|
||||
0 191 255 DeepSkyBlue1
|
||||
0 178 238 DeepSkyBlue2
|
||||
0 154 205 DeepSkyBlue3
|
||||
0 104 139 DeepSkyBlue4
|
||||
135 206 255 SkyBlue1
|
||||
126 192 238 SkyBlue2
|
||||
108 166 205 SkyBlue3
|
||||
74 112 139 SkyBlue4
|
||||
176 226 255 LightSkyBlue1
|
||||
164 211 238 LightSkyBlue2
|
||||
141 182 205 LightSkyBlue3
|
||||
96 123 139 LightSkyBlue4
|
||||
198 226 255 SlateGray1
|
||||
185 211 238 SlateGray2
|
||||
159 182 205 SlateGray3
|
||||
108 123 139 SlateGray4
|
||||
202 225 255 LightSteelBlue1
|
||||
188 210 238 LightSteelBlue2
|
||||
162 181 205 LightSteelBlue3
|
||||
110 123 139 LightSteelBlue4
|
||||
191 239 255 LightBlue1
|
||||
178 223 238 LightBlue2
|
||||
154 192 205 LightBlue3
|
||||
104 131 139 LightBlue4
|
||||
224 255 255 LightCyan1
|
||||
209 238 238 LightCyan2
|
||||
180 205 205 LightCyan3
|
||||
122 139 139 LightCyan4
|
||||
187 255 255 PaleTurquoise1
|
||||
174 238 238 PaleTurquoise2
|
||||
150 205 205 PaleTurquoise3
|
||||
102 139 139 PaleTurquoise4
|
||||
152 245 255 CadetBlue1
|
||||
142 229 238 CadetBlue2
|
||||
122 197 205 CadetBlue3
|
||||
83 134 139 CadetBlue4
|
||||
0 245 255 turquoise1
|
||||
0 229 238 turquoise2
|
||||
0 197 205 turquoise3
|
||||
0 134 139 turquoise4
|
||||
0 255 255 cyan1
|
||||
0 238 238 cyan2
|
||||
0 205 205 cyan3
|
||||
0 139 139 cyan4
|
||||
151 255 255 DarkSlateGray1
|
||||
141 238 238 DarkSlateGray2
|
||||
121 205 205 DarkSlateGray3
|
||||
82 139 139 DarkSlateGray4
|
||||
127 255 212 aquamarine1
|
||||
118 238 198 aquamarine2
|
||||
102 205 170 aquamarine3
|
||||
69 139 116 aquamarine4
|
||||
193 255 193 DarkSeaGreen1
|
||||
180 238 180 DarkSeaGreen2
|
||||
155 205 155 DarkSeaGreen3
|
||||
105 139 105 DarkSeaGreen4
|
||||
84 255 159 SeaGreen1
|
||||
78 238 148 SeaGreen2
|
||||
67 205 128 SeaGreen3
|
||||
46 139 87 SeaGreen4
|
||||
154 255 154 PaleGreen1
|
||||
144 238 144 PaleGreen2
|
||||
124 205 124 PaleGreen3
|
||||
84 139 84 PaleGreen4
|
||||
0 255 127 SpringGreen1
|
||||
0 238 118 SpringGreen2
|
||||
0 205 102 SpringGreen3
|
||||
0 139 69 SpringGreen4
|
||||
0 255 0 green1
|
||||
0 238 0 green2
|
||||
0 205 0 green3
|
||||
0 139 0 green4
|
||||
127 255 0 chartreuse1
|
||||
118 238 0 chartreuse2
|
||||
102 205 0 chartreuse3
|
||||
69 139 0 chartreuse4
|
||||
192 255 62 OliveDrab1
|
||||
179 238 58 OliveDrab2
|
||||
154 205 50 OliveDrab3
|
||||
105 139 34 OliveDrab4
|
||||
202 255 112 DarkOliveGreen1
|
||||
188 238 104 DarkOliveGreen2
|
||||
162 205 90 DarkOliveGreen3
|
||||
110 139 61 DarkOliveGreen4
|
||||
255 246 143 khaki1
|
||||
238 230 133 khaki2
|
||||
205 198 115 khaki3
|
||||
139 134 78 khaki4
|
||||
255 236 139 LightGoldenrod1
|
||||
238 220 130 LightGoldenrod2
|
||||
205 190 112 LightGoldenrod3
|
||||
139 129 76 LightGoldenrod4
|
||||
255 255 224 LightYellow1
|
||||
238 238 209 LightYellow2
|
||||
205 205 180 LightYellow3
|
||||
139 139 122 LightYellow4
|
||||
255 255 0 yellow1
|
||||
238 238 0 yellow2
|
||||
205 205 0 yellow3
|
||||
139 139 0 yellow4
|
||||
255 215 0 gold1
|
||||
238 201 0 gold2
|
||||
205 173 0 gold3
|
||||
139 117 0 gold4
|
||||
255 193 37 goldenrod1
|
||||
238 180 34 goldenrod2
|
||||
205 155 29 goldenrod3
|
||||
139 105 20 goldenrod4
|
||||
255 185 15 DarkGoldenrod1
|
||||
238 173 14 DarkGoldenrod2
|
||||
205 149 12 DarkGoldenrod3
|
||||
139 101 8 DarkGoldenrod4
|
||||
255 193 193 RosyBrown1
|
||||
238 180 180 RosyBrown2
|
||||
205 155 155 RosyBrown3
|
||||
139 105 105 RosyBrown4
|
||||
255 106 106 IndianRed1
|
||||
238 99 99 IndianRed2
|
||||
205 85 85 IndianRed3
|
||||
139 58 58 IndianRed4
|
||||
255 130 71 sienna1
|
||||
238 121 66 sienna2
|
||||
205 104 57 sienna3
|
||||
139 71 38 sienna4
|
||||
255 211 155 burlywood1
|
||||
238 197 145 burlywood2
|
||||
205 170 125 burlywood3
|
||||
139 115 85 burlywood4
|
||||
255 231 186 wheat1
|
||||
238 216 174 wheat2
|
||||
205 186 150 wheat3
|
||||
139 126 102 wheat4
|
||||
255 165 79 tan1
|
||||
238 154 73 tan2
|
||||
205 133 63 tan3
|
||||
139 90 43 tan4
|
||||
255 127 36 chocolate1
|
||||
238 118 33 chocolate2
|
||||
205 102 29 chocolate3
|
||||
139 69 19 chocolate4
|
||||
255 48 48 firebrick1
|
||||
238 44 44 firebrick2
|
||||
205 38 38 firebrick3
|
||||
139 26 26 firebrick4
|
||||
255 64 64 brown1
|
||||
238 59 59 brown2
|
||||
205 51 51 brown3
|
||||
139 35 35 brown4
|
||||
255 140 105 salmon1
|
||||
238 130 98 salmon2
|
||||
205 112 84 salmon3
|
||||
139 76 57 salmon4
|
||||
255 160 122 LightSalmon1
|
||||
238 149 114 LightSalmon2
|
||||
205 129 98 LightSalmon3
|
||||
139 87 66 LightSalmon4
|
||||
255 165 0 orange1
|
||||
238 154 0 orange2
|
||||
205 133 0 orange3
|
||||
139 90 0 orange4
|
||||
255 127 0 DarkOrange1
|
||||
238 118 0 DarkOrange2
|
||||
205 102 0 DarkOrange3
|
||||
139 69 0 DarkOrange4
|
||||
255 114 86 coral1
|
||||
238 106 80 coral2
|
||||
205 91 69 coral3
|
||||
139 62 47 coral4
|
||||
255 99 71 tomato1
|
||||
238 92 66 tomato2
|
||||
205 79 57 tomato3
|
||||
139 54 38 tomato4
|
||||
255 69 0 OrangeRed1
|
||||
238 64 0 OrangeRed2
|
||||
205 55 0 OrangeRed3
|
||||
139 37 0 OrangeRed4
|
||||
255 0 0 red1
|
||||
238 0 0 red2
|
||||
205 0 0 red3
|
||||
139 0 0 red4
|
||||
255 20 147 DeepPink1
|
||||
238 18 137 DeepPink2
|
||||
205 16 118 DeepPink3
|
||||
139 10 80 DeepPink4
|
||||
255 110 180 HotPink1
|
||||
238 106 167 HotPink2
|
||||
205 96 144 HotPink3
|
||||
139 58 98 HotPink4
|
||||
255 181 197 pink1
|
||||
238 169 184 pink2
|
||||
205 145 158 pink3
|
||||
139 99 108 pink4
|
||||
255 174 185 LightPink1
|
||||
238 162 173 LightPink2
|
||||
205 140 149 LightPink3
|
||||
139 95 101 LightPink4
|
||||
255 130 171 PaleVioletRed1
|
||||
238 121 159 PaleVioletRed2
|
||||
205 104 137 PaleVioletRed3
|
||||
139 71 93 PaleVioletRed4
|
||||
255 52 179 maroon1
|
||||
238 48 167 maroon2
|
||||
205 41 144 maroon3
|
||||
139 28 98 maroon4
|
||||
255 62 150 VioletRed1
|
||||
238 58 140 VioletRed2
|
||||
205 50 120 VioletRed3
|
||||
139 34 82 VioletRed4
|
||||
255 0 255 magenta1
|
||||
238 0 238 magenta2
|
||||
205 0 205 magenta3
|
||||
139 0 139 magenta4
|
||||
255 131 250 orchid1
|
||||
238 122 233 orchid2
|
||||
205 105 201 orchid3
|
||||
139 71 137 orchid4
|
||||
255 187 255 plum1
|
||||
238 174 238 plum2
|
||||
205 150 205 plum3
|
||||
139 102 139 plum4
|
||||
224 102 255 MediumOrchid1
|
||||
209 95 238 MediumOrchid2
|
||||
180 82 205 MediumOrchid3
|
||||
122 55 139 MediumOrchid4
|
||||
191 62 255 DarkOrchid1
|
||||
178 58 238 DarkOrchid2
|
||||
154 50 205 DarkOrchid3
|
||||
104 34 139 DarkOrchid4
|
||||
155 48 255 purple1
|
||||
145 44 238 purple2
|
||||
125 38 205 purple3
|
||||
85 26 139 purple4
|
||||
171 130 255 MediumPurple1
|
||||
159 121 238 MediumPurple2
|
||||
137 104 205 MediumPurple3
|
||||
93 71 139 MediumPurple4
|
||||
255 225 255 thistle1
|
||||
238 210 238 thistle2
|
||||
205 181 205 thistle3
|
||||
139 123 139 thistle4
|
||||
0 0 0 gray0
|
||||
0 0 0 grey0
|
||||
3 3 3 gray1
|
||||
3 3 3 grey1
|
||||
5 5 5 gray2
|
||||
5 5 5 grey2
|
||||
8 8 8 gray3
|
||||
8 8 8 grey3
|
||||
10 10 10 gray4
|
||||
10 10 10 grey4
|
||||
13 13 13 gray5
|
||||
13 13 13 grey5
|
||||
15 15 15 gray6
|
||||
15 15 15 grey6
|
||||
18 18 18 gray7
|
||||
18 18 18 grey7
|
||||
20 20 20 gray8
|
||||
20 20 20 grey8
|
||||
23 23 23 gray9
|
||||
23 23 23 grey9
|
||||
26 26 26 gray10
|
||||
26 26 26 grey10
|
||||
28 28 28 gray11
|
||||
28 28 28 grey11
|
||||
31 31 31 gray12
|
||||
31 31 31 grey12
|
||||
33 33 33 gray13
|
||||
33 33 33 grey13
|
||||
36 36 36 gray14
|
||||
36 36 36 grey14
|
||||
38 38 38 gray15
|
||||
38 38 38 grey15
|
||||
41 41 41 gray16
|
||||
41 41 41 grey16
|
||||
43 43 43 gray17
|
||||
43 43 43 grey17
|
||||
46 46 46 gray18
|
||||
46 46 46 grey18
|
||||
48 48 48 gray19
|
||||
48 48 48 grey19
|
||||
51 51 51 gray20
|
||||
51 51 51 grey20
|
||||
54 54 54 gray21
|
||||
54 54 54 grey21
|
||||
56 56 56 gray22
|
||||
56 56 56 grey22
|
||||
59 59 59 gray23
|
||||
59 59 59 grey23
|
||||
61 61 61 gray24
|
||||
61 61 61 grey24
|
||||
64 64 64 gray25
|
||||
64 64 64 grey25
|
||||
66 66 66 gray26
|
||||
66 66 66 grey26
|
||||
69 69 69 gray27
|
||||
69 69 69 grey27
|
||||
71 71 71 gray28
|
||||
71 71 71 grey28
|
||||
74 74 74 gray29
|
||||
74 74 74 grey29
|
||||
77 77 77 gray30
|
||||
77 77 77 grey30
|
||||
79 79 79 gray31
|
||||
79 79 79 grey31
|
||||
82 82 82 gray32
|
||||
82 82 82 grey32
|
||||
84 84 84 gray33
|
||||
84 84 84 grey33
|
||||
87 87 87 gray34
|
||||
87 87 87 grey34
|
||||
89 89 89 gray35
|
||||
89 89 89 grey35
|
||||
92 92 92 gray36
|
||||
92 92 92 grey36
|
||||
94 94 94 gray37
|
||||
94 94 94 grey37
|
||||
97 97 97 gray38
|
||||
97 97 97 grey38
|
||||
99 99 99 gray39
|
||||
99 99 99 grey39
|
||||
102 102 102 gray40
|
||||
102 102 102 grey40
|
||||
105 105 105 gray41
|
||||
105 105 105 grey41
|
||||
107 107 107 gray42
|
||||
107 107 107 grey42
|
||||
110 110 110 gray43
|
||||
110 110 110 grey43
|
||||
112 112 112 gray44
|
||||
112 112 112 grey44
|
||||
115 115 115 gray45
|
||||
115 115 115 grey45
|
||||
117 117 117 gray46
|
||||
117 117 117 grey46
|
||||
120 120 120 gray47
|
||||
120 120 120 grey47
|
||||
122 122 122 gray48
|
||||
122 122 122 grey48
|
||||
125 125 125 gray49
|
||||
125 125 125 grey49
|
||||
127 127 127 gray50
|
||||
127 127 127 grey50
|
||||
130 130 130 gray51
|
||||
130 130 130 grey51
|
||||
133 133 133 gray52
|
||||
133 133 133 grey52
|
||||
135 135 135 gray53
|
||||
135 135 135 grey53
|
||||
138 138 138 gray54
|
||||
138 138 138 grey54
|
||||
140 140 140 gray55
|
||||
140 140 140 grey55
|
||||
143 143 143 gray56
|
||||
143 143 143 grey56
|
||||
145 145 145 gray57
|
||||
145 145 145 grey57
|
||||
148 148 148 gray58
|
||||
148 148 148 grey58
|
||||
150 150 150 gray59
|
||||
150 150 150 grey59
|
||||
153 153 153 gray60
|
||||
153 153 153 grey60
|
||||
156 156 156 gray61
|
||||
156 156 156 grey61
|
||||
158 158 158 gray62
|
||||
158 158 158 grey62
|
||||
161 161 161 gray63
|
||||
161 161 161 grey63
|
||||
163 163 163 gray64
|
||||
163 163 163 grey64
|
||||
166 166 166 gray65
|
||||
166 166 166 grey65
|
||||
168 168 168 gray66
|
||||
168 168 168 grey66
|
||||
171 171 171 gray67
|
||||
171 171 171 grey67
|
||||
173 173 173 gray68
|
||||
173 173 173 grey68
|
||||
176 176 176 gray69
|
||||
176 176 176 grey69
|
||||
179 179 179 gray70
|
||||
179 179 179 grey70
|
||||
181 181 181 gray71
|
||||
181 181 181 grey71
|
||||
184 184 184 gray72
|
||||
184 184 184 grey72
|
||||
186 186 186 gray73
|
||||
186 186 186 grey73
|
||||
189 189 189 gray74
|
||||
189 189 189 grey74
|
||||
191 191 191 gray75
|
||||
191 191 191 grey75
|
||||
194 194 194 gray76
|
||||
194 194 194 grey76
|
||||
196 196 196 gray77
|
||||
196 196 196 grey77
|
||||
199 199 199 gray78
|
||||
199 199 199 grey78
|
||||
201 201 201 gray79
|
||||
201 201 201 grey79
|
||||
204 204 204 gray80
|
||||
204 204 204 grey80
|
||||
207 207 207 gray81
|
||||
207 207 207 grey81
|
||||
209 209 209 gray82
|
||||
209 209 209 grey82
|
||||
212 212 212 gray83
|
||||
212 212 212 grey83
|
||||
214 214 214 gray84
|
||||
214 214 214 grey84
|
||||
217 217 217 gray85
|
||||
217 217 217 grey85
|
||||
219 219 219 gray86
|
||||
219 219 219 grey86
|
||||
222 222 222 gray87
|
||||
222 222 222 grey87
|
||||
224 224 224 gray88
|
||||
224 224 224 grey88
|
||||
227 227 227 gray89
|
||||
227 227 227 grey89
|
||||
229 229 229 gray90
|
||||
229 229 229 grey90
|
||||
232 232 232 gray91
|
||||
232 232 232 grey91
|
||||
235 235 235 gray92
|
||||
235 235 235 grey92
|
||||
237 237 237 gray93
|
||||
237 237 237 grey93
|
||||
240 240 240 gray94
|
||||
240 240 240 grey94
|
||||
242 242 242 gray95
|
||||
242 242 242 grey95
|
||||
245 245 245 gray96
|
||||
245 245 245 grey96
|
||||
247 247 247 gray97
|
||||
247 247 247 grey97
|
||||
250 250 250 gray98
|
||||
250 250 250 grey98
|
||||
252 252 252 gray99
|
||||
252 252 252 grey99
|
||||
255 255 255 gray100
|
||||
255 255 255 grey100
|
||||
169 169 169 dark grey
|
||||
169 169 169 DarkGrey
|
||||
169 169 169 dark gray
|
||||
169 169 169 DarkGray
|
||||
0 0 139 dark blue
|
||||
0 0 139 DarkBlue
|
||||
0 139 139 dark cyan
|
||||
0 139 139 DarkCyan
|
||||
139 0 139 dark magenta
|
||||
139 0 139 DarkMagenta
|
||||
139 0 0 dark red
|
||||
139 0 0 DarkRed
|
||||
144 238 144 light green
|
||||
144 238 144 LightGreen
|
||||
29
Tools/pynche/X/xlicense.txt
Normal file
29
Tools/pynche/X/xlicense.txt
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
X Window System License - X11R6.4
|
||||
|
||||
Copyright (c) 1998 The Open Group
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Except as contained in this notice, the name of The Open Group shall
|
||||
not be used in advertising or otherwise to promote the sale, use or
|
||||
other dealings in this Software without prior written authorization
|
||||
from The Open Group.
|
||||
|
||||
X Window System is a trademark of The Open Group
|
||||
1
Tools/pynche/__init__.py
Normal file
1
Tools/pynche/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
# Dummy file to make this directory a package.
|
||||
17
Tools/pynche/html40colors.txt
Normal file
17
Tools/pynche/html40colors.txt
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
# HTML 4.0 color names
|
||||
Black #000000
|
||||
Silver #c0c0c0
|
||||
Gray #808080
|
||||
White #ffffff
|
||||
Maroon #800000
|
||||
Red #ff0000
|
||||
Purple #800080
|
||||
Fuchsia #ff00ff
|
||||
Green #008000
|
||||
Lime #00ff00
|
||||
Olive #808000
|
||||
Yellow #ffff00
|
||||
Navy #000080
|
||||
Blue #0000ff
|
||||
Teal #008080
|
||||
Aqua #00ffff
|
||||
100
Tools/pynche/namedcolors.txt
Normal file
100
Tools/pynche/namedcolors.txt
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
# named colors from http://www.lightlink.com/xine/bells/namedcolors.html
|
||||
White #FFFFFF
|
||||
Red #FF0000
|
||||
Green #00FF00
|
||||
Blue #0000FF
|
||||
Magenta #FF00FF
|
||||
Cyan #00FFFF
|
||||
Yellow #FFFF00
|
||||
Black #000000
|
||||
Aquamarine #70DB93
|
||||
Baker's Chocolate #5C3317
|
||||
Blue Violet #9F5F9F
|
||||
Brass #B5A642
|
||||
Bright Gold #D9D919
|
||||
Brown #A62A2A
|
||||
Bronze #8C7853
|
||||
Bronze II #A67D3D
|
||||
Cadet Blue #5F9F9F
|
||||
Cool Copper #D98719
|
||||
Copper #B87333
|
||||
Coral #FF7F00
|
||||
Corn Flower Blue #42426F
|
||||
Dark Brown #5C4033
|
||||
Dark Green #2F4F2F
|
||||
Dark Green Copper #4A766E
|
||||
Dark Olive Green #4F4F2F
|
||||
Dark Orchid #9932CD
|
||||
Dark Purple #871F78
|
||||
Dark Slate Blue #6B238E
|
||||
Dark Slate Grey #2F4F4F
|
||||
Dark Tan #97694F
|
||||
Dark Turquoise #7093DB
|
||||
Dark Wood #855E42
|
||||
Dim Grey #545454
|
||||
Dusty Rose #856363
|
||||
Feldspar #D19275
|
||||
Firebrick #8E2323
|
||||
Forest Green #238E23
|
||||
Gold #CD7F32
|
||||
Goldenrod #DBDB70
|
||||
Grey #C0C0C0
|
||||
Green Copper #527F76
|
||||
Green Yellow #93DB70
|
||||
Hunter Green #215E21
|
||||
Indian Red #4E2F2F
|
||||
Khaki #9F9F5F
|
||||
Light Blue #C0D9D9
|
||||
Light Grey #A8A8A8
|
||||
Light Steel Blue #8F8FBD
|
||||
Light Wood #E9C2A6
|
||||
Lime Green #32CD32
|
||||
Mandarian Orange #E47833
|
||||
Maroon #8E236B
|
||||
Medium Aquamarine #32CD99
|
||||
Medium Blue #3232CD
|
||||
Medium Forest Green #6B8E23
|
||||
Medium Goldenrod #EAEAAE
|
||||
Medium Orchid #9370DB
|
||||
Medium Sea Green #426F42
|
||||
Medium Slate Blue #7F00FF
|
||||
Medium Spring Green #7FFF00
|
||||
Medium Turquoise #70DBDB
|
||||
Medium Violet Red #DB7093
|
||||
Medium Wood #A68064
|
||||
Midnight Blue #2F2F4F
|
||||
Navy Blue #23238E
|
||||
Neon Blue #4D4DFF
|
||||
Neon Pink #FF6EC7
|
||||
New Midnight Blue #00009C
|
||||
New Tan #EBC79E
|
||||
Old Gold #CFB53B
|
||||
Orange #FF7F00
|
||||
Orange Red #FF2400
|
||||
Orchid #DB70DB
|
||||
Pale Green #8FBC8F
|
||||
Pink #BC8F8F
|
||||
Plum #EAADEA
|
||||
Quartz #D9D9F3
|
||||
Rich Blue #5959AB
|
||||
Salmon #6F4242
|
||||
Scarlet #8C1717
|
||||
Sea Green #238E68
|
||||
Semi-Sweet Chocolate #6B4226
|
||||
Sienna #8E6B23
|
||||
Silver #E6E8FA
|
||||
Sky Blue #3299CC
|
||||
Slate Blue #007FFF
|
||||
Spicy Pink #FF1CAE
|
||||
Spring Green #00FF7F
|
||||
Steel Blue #236B8E
|
||||
Summer Sky #38B0DE
|
||||
Tan #DB9370
|
||||
Thistle #D8BFD8
|
||||
Turquoise #ADEAEA
|
||||
Very Dark Brown #5C4033
|
||||
Very Light Grey #CDCDCD
|
||||
Violet #4F2F4F
|
||||
Violet Red #CC3299
|
||||
Wheat #D8D8BF
|
||||
Yellow Green #99CC32
|
||||
125
Tools/pynche/pyColorChooser.py
Normal file
125
Tools/pynche/pyColorChooser.py
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
"""Color chooser implementing (almost) the tkColorColor interface
|
||||
"""
|
||||
|
||||
import os
|
||||
import Main
|
||||
import ColorDB
|
||||
|
||||
|
||||
|
||||
class Chooser:
|
||||
"""Ask for a color"""
|
||||
def __init__(self,
|
||||
master = None,
|
||||
databasefile = None,
|
||||
initfile = None,
|
||||
ignore = None,
|
||||
wantspec = None):
|
||||
self.__master = master
|
||||
self.__databasefile = databasefile
|
||||
self.__initfile = initfile or os.path.expanduser('~/.pynche')
|
||||
self.__ignore = ignore
|
||||
self.__pw = None
|
||||
self.__wantspec = wantspec
|
||||
|
||||
def show(self, color, options):
|
||||
# scan for options that can override the ctor options
|
||||
self.__wantspec = options.get('wantspec', self.__wantspec)
|
||||
dbfile = options.get('databasefile', self.__databasefile)
|
||||
# load the database file
|
||||
colordb = None
|
||||
if dbfile != self.__databasefile:
|
||||
colordb = ColorDB.get_colordb(dbfile)
|
||||
if not self.__master:
|
||||
from tkinter import Tk
|
||||
self.__master = Tk()
|
||||
if not self.__pw:
|
||||
self.__pw, self.__sb = \
|
||||
Main.build(master = self.__master,
|
||||
initfile = self.__initfile,
|
||||
ignore = self.__ignore)
|
||||
else:
|
||||
self.__pw.deiconify()
|
||||
# convert color
|
||||
if colordb:
|
||||
self.__sb.set_colordb(colordb)
|
||||
else:
|
||||
colordb = self.__sb.colordb()
|
||||
if color:
|
||||
r, g, b = Main.initial_color(color, colordb)
|
||||
self.__sb.update_views(r, g, b)
|
||||
# reset the canceled flag and run it
|
||||
self.__sb.canceled(0)
|
||||
Main.run(self.__pw, self.__sb)
|
||||
rgbtuple = self.__sb.current_rgb()
|
||||
self.__pw.withdraw()
|
||||
# check to see if the cancel button was pushed
|
||||
if self.__sb.canceled_p():
|
||||
return None, None
|
||||
# Try to return the color name from the database if there is an exact
|
||||
# match, otherwise use the "#rrggbb" spec. BAW: Forget about color
|
||||
# aliases for now, maybe later we should return these too.
|
||||
name = None
|
||||
if not self.__wantspec:
|
||||
try:
|
||||
name = colordb.find_byrgb(rgbtuple)[0]
|
||||
except ColorDB.BadColor:
|
||||
pass
|
||||
if name is None:
|
||||
name = ColorDB.triplet_to_rrggbb(rgbtuple)
|
||||
return rgbtuple, name
|
||||
|
||||
def save(self):
|
||||
if self.__sb:
|
||||
self.__sb.save_views()
|
||||
|
||||
|
||||
# convenience stuff
|
||||
_chooser = None
|
||||
|
||||
def askcolor(color = None, **options):
|
||||
"""Ask for a color"""
|
||||
global _chooser
|
||||
if not _chooser:
|
||||
_chooser = Chooser(**options)
|
||||
return _chooser.show(color, options)
|
||||
|
||||
def save():
|
||||
global _chooser
|
||||
if _chooser:
|
||||
_chooser.save()
|
||||
|
||||
|
||||
# test stuff
|
||||
if __name__ == '__main__':
|
||||
from tkinter import *
|
||||
|
||||
class Tester:
|
||||
def __init__(self):
|
||||
self.__root = tk = Tk()
|
||||
b = Button(tk, text='Choose Color...', command=self.__choose)
|
||||
b.pack()
|
||||
self.__l = Label(tk)
|
||||
self.__l.pack()
|
||||
q = Button(tk, text='Quit', command=self.__quit)
|
||||
q.pack()
|
||||
|
||||
def __choose(self, event=None):
|
||||
rgb, name = askcolor(master=self.__root)
|
||||
if rgb is None:
|
||||
text = 'You hit CANCEL!'
|
||||
else:
|
||||
r, g, b = rgb
|
||||
text = 'You picked %s (%3d/%3d/%3d)' % (name, r, g, b)
|
||||
self.__l.configure(text=text)
|
||||
|
||||
def __quit(self, event=None):
|
||||
self.__root.quit()
|
||||
|
||||
def run(self):
|
||||
self.__root.mainloop()
|
||||
t = Tester()
|
||||
t.run()
|
||||
# simpler
|
||||
## print 'color:', askcolor()
|
||||
## print 'color:', askcolor()
|
||||
7
Tools/pynche/pynche.pyw
Normal file
7
Tools/pynche/pynche.pyw
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
#! /usr/bin/env python
|
||||
|
||||
"""Run this file under Windows to inhibit the console window.
|
||||
Run the file pynche.py under Unix or when debugging under Windows.
|
||||
"""
|
||||
import Main
|
||||
Main.main()
|
||||
141
Tools/pynche/webcolors.txt
Normal file
141
Tools/pynche/webcolors.txt
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
# De-facto NS & MSIE recognized HTML color names
|
||||
AliceBlue #f0f8ff
|
||||
AntiqueWhite #faebd7
|
||||
Aqua #00ffff
|
||||
Aquamarine #7fffd4
|
||||
Azure #f0ffff
|
||||
Beige #f5f5dc
|
||||
Bisque #ffe4c4
|
||||
Black #000000
|
||||
BlanchedAlmond #ffebcd
|
||||
Blue #0000ff
|
||||
BlueViolet #8a2be2
|
||||
Brown #a52a2a
|
||||
BurlyWood #deb887
|
||||
CadetBlue #5f9ea0
|
||||
Chartreuse #7fff00
|
||||
Chocolate #d2691e
|
||||
Coral #ff7f50
|
||||
CornflowerBlue #6495ed
|
||||
Cornsilk #fff8dc
|
||||
Crimson #dc143c
|
||||
Cyan #00ffff
|
||||
DarkBlue #00008b
|
||||
DarkCyan #008b8b
|
||||
DarkGoldenrod #b8860b
|
||||
DarkGray #a9a9a9
|
||||
DarkGreen #006400
|
||||
DarkKhaki #bdb76b
|
||||
DarkMagenta #8b008b
|
||||
DarkOliveGreen #556b2f
|
||||
DarkOrange #ff8c00
|
||||
DarkOrchid #9932cc
|
||||
DarkRed #8b0000
|
||||
DarkSalmon #e9967a
|
||||
DarkSeaGreen #8fbc8f
|
||||
DarkSlateBlue #483d8b
|
||||
DarkSlateGray #2f4f4f
|
||||
DarkTurquoise #00ced1
|
||||
DarkViolet #9400d3
|
||||
DeepPink #ff1493
|
||||
DeepSkyBlue #00bfff
|
||||
DimGray #696969
|
||||
DodgerBlue #1e90ff
|
||||
FireBrick #b22222
|
||||
FloralWhite #fffaf0
|
||||
ForestGreen #228b22
|
||||
Fuchsia #ff00ff
|
||||
Gainsboro #dcdcdc
|
||||
GhostWhite #f8f8ff
|
||||
Gold #ffd700
|
||||
Goldenrod #daa520
|
||||
Gray #808080
|
||||
Green #008000
|
||||
GreenYellow #adff2f
|
||||
Honeydew #f0fff0
|
||||
HotPink #ff69b4
|
||||
IndianRed #cd5c5c
|
||||
Indigo #4b0082
|
||||
Ivory #fffff0
|
||||
Khaki #f0e68c
|
||||
Lavender #e6e6fa
|
||||
LavenderBlush #fff0f5
|
||||
LawnGreen #7cfc00
|
||||
LemonChiffon #fffacd
|
||||
LightBlue #add8e6
|
||||
LightCoral #f08080
|
||||
LightCyan #e0ffff
|
||||
LightGoldenrodYellow #fafad2
|
||||
LightGreen #90ee90
|
||||
LightGrey #d3d3d3
|
||||
LightPink #ffb6c1
|
||||
LightSalmon #ffa07a
|
||||
LightSeaGreen #20b2aa
|
||||
LightSkyBlue #87cefa
|
||||
LightSlateGray #778899
|
||||
LightSteelBlue #b0c4de
|
||||
LightYellow #ffffe0
|
||||
Lime #00ff00
|
||||
LimeGreen #32cd32
|
||||
Linen #faf0e6
|
||||
Magenta #ff00ff
|
||||
Maroon #800000
|
||||
MediumAquamarine #66cdaa
|
||||
MediumBlue #0000cd
|
||||
MediumOrchid #ba55d3
|
||||
MediumPurple #9370db
|
||||
MediumSeaGreen #3cb371
|
||||
MediumSlateBlue #7b68ee
|
||||
MediumSpringGreen #00fa9a
|
||||
MediumTurquoise #48d1cc
|
||||
MediumVioletRed #c71585
|
||||
MidnightBlue #191970
|
||||
MintCream #f5fffa
|
||||
MistyRose #ffe4e1
|
||||
Moccasin #ffe4b5
|
||||
NavajoWhite #ffdead
|
||||
Navy #000080
|
||||
OldLace #fdf5e6
|
||||
Olive #808000
|
||||
OliveDrab #6b8e23
|
||||
Orange #ffa500
|
||||
OrangeRed #ff4500
|
||||
Orchid #da70d6
|
||||
PaleGoldenrod #eee8aa
|
||||
PaleGreen #98fb98
|
||||
PaleTurquoise #afeeee
|
||||
PaleVioletRed #db7093
|
||||
PapayaWhip #ffefd5
|
||||
PeachPuff #ffdab9
|
||||
Peru #cd853f
|
||||
Pink #ffc0cb
|
||||
Plum #dda0dd
|
||||
PowderBlue #b0e0e6
|
||||
Purple #800080
|
||||
Red #ff0000
|
||||
RosyBrown #bc8f8f
|
||||
RoyalBlue #4169e1
|
||||
SaddleBrown #8b4513
|
||||
Salmon #fa8072
|
||||
SandyBrown #f4a460
|
||||
SeaGreen #2e8b57
|
||||
Seashell #fff5ee
|
||||
Sienna #a0522d
|
||||
Silver #c0c0c0
|
||||
SkyBlue #87ceeb
|
||||
SlateBlue #6a5acd
|
||||
SlateGray #708090
|
||||
Snow #fffafa
|
||||
SpringGreen #00ff7f
|
||||
SteelBlue #4682b4
|
||||
Tan #d2b48c
|
||||
Teal #008080
|
||||
Thistle #d8bfd8
|
||||
Tomato #ff6347
|
||||
Turquoise #40e0d0
|
||||
Violet #ee82ee
|
||||
Wheat #f5deb3
|
||||
White #ffffff
|
||||
WhiteSmoke #f5f5f5
|
||||
Yellow #ffff00
|
||||
YellowGreen #9acd32
|
||||
217
Tools/pynche/websafe.txt
Normal file
217
Tools/pynche/websafe.txt
Normal file
|
|
@ -0,0 +1,217 @@
|
|||
# Websafe RGB values
|
||||
#000000
|
||||
#000033
|
||||
#000066
|
||||
#000099
|
||||
#0000cc
|
||||
#0000ff
|
||||
#003300
|
||||
#003333
|
||||
#003366
|
||||
#003399
|
||||
#0033cc
|
||||
#0033ff
|
||||
#006600
|
||||
#006633
|
||||
#006666
|
||||
#006699
|
||||
#0066cc
|
||||
#0066ff
|
||||
#009900
|
||||
#009933
|
||||
#009966
|
||||
#009999
|
||||
#0099cc
|
||||
#0099ff
|
||||
#00cc00
|
||||
#00cc33
|
||||
#00cc66
|
||||
#00cc99
|
||||
#00cccc
|
||||
#00ccff
|
||||
#00ff00
|
||||
#00ff33
|
||||
#00ff66
|
||||
#00ff99
|
||||
#00ffcc
|
||||
#00ffff
|
||||
#330000
|
||||
#330033
|
||||
#330066
|
||||
#330099
|
||||
#3300cc
|
||||
#3300ff
|
||||
#333300
|
||||
#333333
|
||||
#333366
|
||||
#333399
|
||||
#3333cc
|
||||
#3333ff
|
||||
#336600
|
||||
#336633
|
||||
#336666
|
||||
#336699
|
||||
#3366cc
|
||||
#3366ff
|
||||
#339900
|
||||
#339933
|
||||
#339966
|
||||
#339999
|
||||
#3399cc
|
||||
#3399ff
|
||||
#33cc00
|
||||
#33cc33
|
||||
#33cc66
|
||||
#33cc99
|
||||
#33cccc
|
||||
#33ccff
|
||||
#33ff00
|
||||
#33ff33
|
||||
#33ff66
|
||||
#33ff99
|
||||
#33ffcc
|
||||
#33ffff
|
||||
#660000
|
||||
#660033
|
||||
#660066
|
||||
#660099
|
||||
#6600cc
|
||||
#6600ff
|
||||
#663300
|
||||
#663333
|
||||
#663366
|
||||
#663399
|
||||
#6633cc
|
||||
#6633ff
|
||||
#666600
|
||||
#666633
|
||||
#666666
|
||||
#666699
|
||||
#6666cc
|
||||
#6666ff
|
||||
#669900
|
||||
#669933
|
||||
#669966
|
||||
#669999
|
||||
#6699cc
|
||||
#6699ff
|
||||
#66cc00
|
||||
#66cc33
|
||||
#66cc66
|
||||
#66cc99
|
||||
#66cccc
|
||||
#66ccff
|
||||
#66ff00
|
||||
#66ff33
|
||||
#66ff66
|
||||
#66ff99
|
||||
#66ffcc
|
||||
#66ffff
|
||||
#990000
|
||||
#990033
|
||||
#990066
|
||||
#990099
|
||||
#9900cc
|
||||
#9900ff
|
||||
#993300
|
||||
#993333
|
||||
#993366
|
||||
#993399
|
||||
#9933cc
|
||||
#9933ff
|
||||
#996600
|
||||
#996633
|
||||
#996666
|
||||
#996699
|
||||
#9966cc
|
||||
#9966ff
|
||||
#999900
|
||||
#999933
|
||||
#999966
|
||||
#999999
|
||||
#9999cc
|
||||
#9999ff
|
||||
#99cc00
|
||||
#99cc33
|
||||
#99cc66
|
||||
#99cc99
|
||||
#99cccc
|
||||
#99ccff
|
||||
#99ff00
|
||||
#99ff33
|
||||
#99ff66
|
||||
#99ff99
|
||||
#99ffcc
|
||||
#99ffff
|
||||
#cc0000
|
||||
#cc0033
|
||||
#cc0066
|
||||
#cc0099
|
||||
#cc00cc
|
||||
#cc00ff
|
||||
#cc3300
|
||||
#cc3333
|
||||
#cc3366
|
||||
#cc3399
|
||||
#cc33cc
|
||||
#cc33ff
|
||||
#cc6600
|
||||
#cc6633
|
||||
#cc6666
|
||||
#cc6699
|
||||
#cc66cc
|
||||
#cc66ff
|
||||
#cc9900
|
||||
#cc9933
|
||||
#cc9966
|
||||
#cc9999
|
||||
#cc99cc
|
||||
#cc99ff
|
||||
#cccc00
|
||||
#cccc33
|
||||
#cccc66
|
||||
#cccc99
|
||||
#cccccc
|
||||
#ccccff
|
||||
#ccff00
|
||||
#ccff33
|
||||
#ccff66
|
||||
#ccff99
|
||||
#ccffcc
|
||||
#ccffff
|
||||
#ff0000
|
||||
#ff0033
|
||||
#ff0066
|
||||
#ff0099
|
||||
#ff00cc
|
||||
#ff00ff
|
||||
#ff3300
|
||||
#ff3333
|
||||
#ff3366
|
||||
#ff3399
|
||||
#ff33cc
|
||||
#ff33ff
|
||||
#ff6600
|
||||
#ff6633
|
||||
#ff6666
|
||||
#ff6699
|
||||
#ff66cc
|
||||
#ff66ff
|
||||
#ff9900
|
||||
#ff9933
|
||||
#ff9966
|
||||
#ff9999
|
||||
#ff99cc
|
||||
#ff99ff
|
||||
#ffcc00
|
||||
#ffcc33
|
||||
#ffcc66
|
||||
#ffcc99
|
||||
#ffcccc
|
||||
#ffccff
|
||||
#ffff00
|
||||
#ffff33
|
||||
#ffff66
|
||||
#ffff99
|
||||
#ffffcc
|
||||
#ffffff
|
||||
5
Tools/scripts/2to3.py
Normal file
5
Tools/scripts/2to3.py
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
import sys
|
||||
from lib2to3.main import main
|
||||
|
||||
sys.exit(main("lib2to3.fixes"))
|
||||
202
Tools/scripts/abitype.py
Normal file
202
Tools/scripts/abitype.py
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
#!/usr/bin/env python3
|
||||
# This script converts a C file to use the PEP 384 type definition API
|
||||
# Usage: abitype.py < old_code > new_code
|
||||
import re, sys
|
||||
|
||||
###### Replacement of PyTypeObject static instances ##############
|
||||
|
||||
# classify each token, giving it a one-letter code:
|
||||
# S: static
|
||||
# T: PyTypeObject
|
||||
# I: ident
|
||||
# W: whitespace
|
||||
# =, {, }, ; : themselves
|
||||
def classify():
|
||||
res = []
|
||||
for t,v in tokens:
|
||||
if t == 'other' and v in "={};":
|
||||
res.append(v)
|
||||
elif t == 'ident':
|
||||
if v == 'PyTypeObject':
|
||||
res.append('T')
|
||||
elif v == 'static':
|
||||
res.append('S')
|
||||
else:
|
||||
res.append('I')
|
||||
elif t == 'ws':
|
||||
res.append('W')
|
||||
else:
|
||||
res.append('.')
|
||||
return ''.join(res)
|
||||
|
||||
# Obtain a list of fields of a PyTypeObject, in declaration order,
|
||||
# skipping ob_base
|
||||
# All comments are dropped from the variable (which are typically
|
||||
# just the slot names, anyway), and information is discarded whether
|
||||
# the original type was static.
|
||||
def get_fields(start, real_end):
|
||||
pos = start
|
||||
# static?
|
||||
if tokens[pos][1] == 'static':
|
||||
pos += 2
|
||||
# PyTypeObject
|
||||
pos += 2
|
||||
# name
|
||||
name = tokens[pos][1]
|
||||
pos += 1
|
||||
while tokens[pos][1] != '{':
|
||||
pos += 1
|
||||
pos += 1
|
||||
# PyVarObject_HEAD_INIT
|
||||
while tokens[pos][0] in ('ws', 'comment'):
|
||||
pos += 1
|
||||
if tokens[pos][1] != 'PyVarObject_HEAD_INIT':
|
||||
raise Exception('%s has no PyVarObject_HEAD_INIT' % name)
|
||||
while tokens[pos][1] != ')':
|
||||
pos += 1
|
||||
pos += 1
|
||||
# field definitions: various tokens, comma-separated
|
||||
fields = []
|
||||
while True:
|
||||
while tokens[pos][0] in ('ws', 'comment'):
|
||||
pos += 1
|
||||
end = pos
|
||||
while tokens[end][1] not in ',}':
|
||||
if tokens[end][1] == '(':
|
||||
nesting = 1
|
||||
while nesting:
|
||||
end += 1
|
||||
if tokens[end][1] == '(': nesting+=1
|
||||
if tokens[end][1] == ')': nesting-=1
|
||||
end += 1
|
||||
assert end < real_end
|
||||
# join field, excluding separator and trailing ws
|
||||
end1 = end-1
|
||||
while tokens[end1][0] in ('ws', 'comment'):
|
||||
end1 -= 1
|
||||
fields.append(''.join(t[1] for t in tokens[pos:end1+1]))
|
||||
if tokens[end][1] == '}':
|
||||
break
|
||||
pos = end+1
|
||||
return name, fields
|
||||
|
||||
# List of type slots as of Python 3.2, omitting ob_base
|
||||
typeslots = [
|
||||
'tp_name',
|
||||
'tp_basicsize',
|
||||
'tp_itemsize',
|
||||
'tp_dealloc',
|
||||
'tp_print',
|
||||
'tp_getattr',
|
||||
'tp_setattr',
|
||||
'tp_reserved',
|
||||
'tp_repr',
|
||||
'tp_as_number',
|
||||
'tp_as_sequence',
|
||||
'tp_as_mapping',
|
||||
'tp_hash',
|
||||
'tp_call',
|
||||
'tp_str',
|
||||
'tp_getattro',
|
||||
'tp_setattro',
|
||||
'tp_as_buffer',
|
||||
'tp_flags',
|
||||
'tp_doc',
|
||||
'tp_traverse',
|
||||
'tp_clear',
|
||||
'tp_richcompare',
|
||||
'tp_weaklistoffset',
|
||||
'tp_iter',
|
||||
'iternextfunc',
|
||||
'tp_methods',
|
||||
'tp_members',
|
||||
'tp_getset',
|
||||
'tp_base',
|
||||
'tp_dict',
|
||||
'tp_descr_get',
|
||||
'tp_descr_set',
|
||||
'tp_dictoffset',
|
||||
'tp_init',
|
||||
'tp_alloc',
|
||||
'tp_new',
|
||||
'tp_free',
|
||||
'tp_is_gc',
|
||||
'tp_bases',
|
||||
'tp_mro',
|
||||
'tp_cache',
|
||||
'tp_subclasses',
|
||||
'tp_weaklist',
|
||||
'tp_del',
|
||||
'tp_version_tag',
|
||||
]
|
||||
|
||||
# Generate a PyType_Spec definition
|
||||
def make_slots(name, fields):
|
||||
res = []
|
||||
res.append('static PyType_Slot %s_slots[] = {' % name)
|
||||
# defaults for spec
|
||||
spec = { 'tp_itemsize':'0' }
|
||||
for i, val in enumerate(fields):
|
||||
if val.endswith('0'):
|
||||
continue
|
||||
if typeslots[i] in ('tp_name', 'tp_doc', 'tp_basicsize',
|
||||
'tp_itemsize', 'tp_flags'):
|
||||
spec[typeslots[i]] = val
|
||||
continue
|
||||
res.append(' {Py_%s, %s},' % (typeslots[i], val))
|
||||
res.append('};')
|
||||
res.append('static PyType_Spec %s_spec = {' % name)
|
||||
res.append(' %s,' % spec['tp_name'])
|
||||
res.append(' %s,' % spec['tp_basicsize'])
|
||||
res.append(' %s,' % spec['tp_itemsize'])
|
||||
res.append(' %s,' % spec['tp_flags'])
|
||||
res.append(' %s_slots,' % name)
|
||||
res.append('};\n')
|
||||
return '\n'.join(res)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
############ Simplistic C scanner ##################################
|
||||
tokenizer = re.compile(
|
||||
r"(?P<preproc>#.*\n)"
|
||||
r"|(?P<comment>/\*.*?\*/)"
|
||||
r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
|
||||
r"|(?P<ws>[ \t\n]+)"
|
||||
r"|(?P<other>.)",
|
||||
re.MULTILINE)
|
||||
|
||||
tokens = []
|
||||
source = sys.stdin.read()
|
||||
pos = 0
|
||||
while pos != len(source):
|
||||
m = tokenizer.match(source, pos)
|
||||
tokens.append([m.lastgroup, m.group()])
|
||||
pos += len(tokens[-1][1])
|
||||
if tokens[-1][0] == 'preproc':
|
||||
# continuation lines are considered
|
||||
# only in preprocess statements
|
||||
while tokens[-1][1].endswith('\\\n'):
|
||||
nl = source.find('\n', pos)
|
||||
if nl == -1:
|
||||
line = source[pos:]
|
||||
else:
|
||||
line = source[pos:nl+1]
|
||||
tokens[-1][1] += line
|
||||
pos += len(line)
|
||||
|
||||
# Main loop: replace all static PyTypeObjects until
|
||||
# there are none left.
|
||||
while 1:
|
||||
c = classify()
|
||||
m = re.search('(SW)?TWIW?=W?{.*?};', c)
|
||||
if not m:
|
||||
break
|
||||
start = m.start()
|
||||
end = m.end()
|
||||
name, fields = get_fields(start, end)
|
||||
tokens[start:end] = [('',make_slots(name, fields))]
|
||||
|
||||
# Output result to stdout
|
||||
for t, v in tokens:
|
||||
sys.stdout.write(v)
|
||||
129
Tools/scripts/analyze_dxp.py
Normal file
129
Tools/scripts/analyze_dxp.py
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
"""
|
||||
Some helper functions to analyze the output of sys.getdxp() (which is
|
||||
only available if Python was built with -DDYNAMIC_EXECUTION_PROFILE).
|
||||
These will tell you which opcodes have been executed most frequently
|
||||
in the current process, and, if Python was also built with -DDXPAIRS,
|
||||
will tell you which instruction _pairs_ were executed most frequently,
|
||||
which may help in choosing new instructions.
|
||||
|
||||
If Python was built without -DDYNAMIC_EXECUTION_PROFILE, importing
|
||||
this module will raise a RuntimeError.
|
||||
|
||||
If you're running a script you want to profile, a simple way to get
|
||||
the common pairs is:
|
||||
|
||||
$ PYTHONPATH=$PYTHONPATH:<python_srcdir>/Tools/scripts \
|
||||
./python -i -O the_script.py --args
|
||||
...
|
||||
> from analyze_dxp import *
|
||||
> s = render_common_pairs()
|
||||
> open('/tmp/some_file', 'w').write(s)
|
||||
"""
|
||||
|
||||
import copy
|
||||
import opcode
|
||||
import operator
|
||||
import sys
|
||||
import threading
|
||||
|
||||
if not hasattr(sys, "getdxp"):
|
||||
raise RuntimeError("Can't import analyze_dxp: Python built without"
|
||||
" -DDYNAMIC_EXECUTION_PROFILE.")
|
||||
|
||||
|
||||
_profile_lock = threading.RLock()
|
||||
_cumulative_profile = sys.getdxp()
|
||||
|
||||
# If Python was built with -DDXPAIRS, sys.getdxp() returns a list of
|
||||
# lists of ints. Otherwise it returns just a list of ints.
|
||||
def has_pairs(profile):
|
||||
"""Returns True if the Python that produced the argument profile
|
||||
was built with -DDXPAIRS."""
|
||||
|
||||
return len(profile) > 0 and isinstance(profile[0], list)
|
||||
|
||||
|
||||
def reset_profile():
|
||||
"""Forgets any execution profile that has been gathered so far."""
|
||||
with _profile_lock:
|
||||
sys.getdxp() # Resets the internal profile
|
||||
global _cumulative_profile
|
||||
_cumulative_profile = sys.getdxp() # 0s out our copy.
|
||||
|
||||
|
||||
def merge_profile():
|
||||
"""Reads sys.getdxp() and merges it into this module's cached copy.
|
||||
|
||||
We need this because sys.getdxp() 0s itself every time it's called."""
|
||||
|
||||
with _profile_lock:
|
||||
new_profile = sys.getdxp()
|
||||
if has_pairs(new_profile):
|
||||
for first_inst in range(len(_cumulative_profile)):
|
||||
for second_inst in range(len(_cumulative_profile[first_inst])):
|
||||
_cumulative_profile[first_inst][second_inst] += (
|
||||
new_profile[first_inst][second_inst])
|
||||
else:
|
||||
for inst in range(len(_cumulative_profile)):
|
||||
_cumulative_profile[inst] += new_profile[inst]
|
||||
|
||||
|
||||
def snapshot_profile():
|
||||
"""Returns the cumulative execution profile until this call."""
|
||||
with _profile_lock:
|
||||
merge_profile()
|
||||
return copy.deepcopy(_cumulative_profile)
|
||||
|
||||
|
||||
def common_instructions(profile):
|
||||
"""Returns the most common opcodes in order of descending frequency.
|
||||
|
||||
The result is a list of tuples of the form
|
||||
(opcode, opname, # of occurrences)
|
||||
|
||||
"""
|
||||
if has_pairs(profile) and profile:
|
||||
inst_list = profile[-1]
|
||||
else:
|
||||
inst_list = profile
|
||||
result = [(op, opcode.opname[op], count)
|
||||
for op, count in enumerate(inst_list)
|
||||
if count > 0]
|
||||
result.sort(key=operator.itemgetter(2), reverse=True)
|
||||
return result
|
||||
|
||||
|
||||
def common_pairs(profile):
|
||||
"""Returns the most common opcode pairs in order of descending frequency.
|
||||
|
||||
The result is a list of tuples of the form
|
||||
((1st opcode, 2nd opcode),
|
||||
(1st opname, 2nd opname),
|
||||
# of occurrences of the pair)
|
||||
|
||||
"""
|
||||
if not has_pairs(profile):
|
||||
return []
|
||||
result = [((op1, op2), (opcode.opname[op1], opcode.opname[op2]), count)
|
||||
# Drop the row of single-op profiles with [:-1]
|
||||
for op1, op1profile in enumerate(profile[:-1])
|
||||
for op2, count in enumerate(op1profile)
|
||||
if count > 0]
|
||||
result.sort(key=operator.itemgetter(2), reverse=True)
|
||||
return result
|
||||
|
||||
|
||||
def render_common_pairs(profile=None):
|
||||
"""Renders the most common opcode pairs to a string in order of
|
||||
descending frequency.
|
||||
|
||||
The result is a series of lines of the form:
|
||||
# of occurrences: ('1st opname', '2nd opname')
|
||||
|
||||
"""
|
||||
if profile is None:
|
||||
profile = snapshot_profile()
|
||||
def seq():
|
||||
for _, ops, count in common_pairs(profile):
|
||||
yield "%s: %s\n" % (count, ops)
|
||||
return ''.join(seq())
|
||||
132
Tools/scripts/byext.py
Normal file
132
Tools/scripts/byext.py
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Show file statistics by extension."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
class Stats:
|
||||
|
||||
def __init__(self):
|
||||
self.stats = {}
|
||||
|
||||
def statargs(self, args):
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
self.statdir(arg)
|
||||
elif os.path.isfile(arg):
|
||||
self.statfile(arg)
|
||||
else:
|
||||
sys.stderr.write("Can't find %s\n" % arg)
|
||||
self.addstats("<???>", "unknown", 1)
|
||||
|
||||
def statdir(self, dir):
|
||||
self.addstats("<dir>", "dirs", 1)
|
||||
try:
|
||||
names = os.listdir(dir)
|
||||
except OSError as err:
|
||||
sys.stderr.write("Can't list %s: %s\n" % (dir, err))
|
||||
self.addstats("<dir>", "unlistable", 1)
|
||||
return
|
||||
for name in sorted(names):
|
||||
if name.startswith(".#"):
|
||||
continue # Skip CVS temp files
|
||||
if name.endswith("~"):
|
||||
continue # Skip Emacs backup files
|
||||
full = os.path.join(dir, name)
|
||||
if os.path.islink(full):
|
||||
self.addstats("<lnk>", "links", 1)
|
||||
elif os.path.isdir(full):
|
||||
self.statdir(full)
|
||||
else:
|
||||
self.statfile(full)
|
||||
|
||||
def statfile(self, filename):
|
||||
head, ext = os.path.splitext(filename)
|
||||
head, base = os.path.split(filename)
|
||||
if ext == base:
|
||||
ext = "" # E.g. .cvsignore is deemed not to have an extension
|
||||
ext = os.path.normcase(ext)
|
||||
if not ext:
|
||||
ext = "<none>"
|
||||
self.addstats(ext, "files", 1)
|
||||
try:
|
||||
with open(filename, "rb") as f:
|
||||
data = f.read()
|
||||
except IOError as err:
|
||||
sys.stderr.write("Can't open %s: %s\n" % (filename, err))
|
||||
self.addstats(ext, "unopenable", 1)
|
||||
return
|
||||
self.addstats(ext, "bytes", len(data))
|
||||
if b'\0' in data:
|
||||
self.addstats(ext, "binary", 1)
|
||||
return
|
||||
if not data:
|
||||
self.addstats(ext, "empty", 1)
|
||||
# self.addstats(ext, "chars", len(data))
|
||||
lines = str(data, "latin-1").splitlines()
|
||||
self.addstats(ext, "lines", len(lines))
|
||||
del lines
|
||||
words = data.split()
|
||||
self.addstats(ext, "words", len(words))
|
||||
|
||||
def addstats(self, ext, key, n):
|
||||
d = self.stats.setdefault(ext, {})
|
||||
d[key] = d.get(key, 0) + n
|
||||
|
||||
def report(self):
|
||||
exts = sorted(self.stats)
|
||||
# Get the column keys
|
||||
columns = {}
|
||||
for ext in exts:
|
||||
columns.update(self.stats[ext])
|
||||
cols = sorted(columns)
|
||||
colwidth = {}
|
||||
colwidth["ext"] = max(map(len, exts))
|
||||
minwidth = 6
|
||||
self.stats["TOTAL"] = {}
|
||||
for col in cols:
|
||||
total = 0
|
||||
cw = max(minwidth, len(col))
|
||||
for ext in exts:
|
||||
value = self.stats[ext].get(col)
|
||||
if value is None:
|
||||
w = 0
|
||||
else:
|
||||
w = len("%d" % value)
|
||||
total += value
|
||||
cw = max(cw, w)
|
||||
cw = max(cw, len(str(total)))
|
||||
colwidth[col] = cw
|
||||
self.stats["TOTAL"][col] = total
|
||||
exts.append("TOTAL")
|
||||
for ext in exts:
|
||||
self.stats[ext]["ext"] = ext
|
||||
cols.insert(0, "ext")
|
||||
|
||||
def printheader():
|
||||
for col in cols:
|
||||
print("%*s" % (colwidth[col], col), end=' ')
|
||||
print()
|
||||
|
||||
printheader()
|
||||
for ext in exts:
|
||||
for col in cols:
|
||||
value = self.stats[ext].get(col, "")
|
||||
print("%*s" % (colwidth[col], value), end=' ')
|
||||
print()
|
||||
printheader() # Another header at the bottom
|
||||
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
args = [os.curdir]
|
||||
s = Stats()
|
||||
s.statargs(args)
|
||||
s.report()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
61
Tools/scripts/byteyears.py
Normal file
61
Tools/scripts/byteyears.py
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Print the product of age and size of each file, in suitable units.
|
||||
#
|
||||
# Usage: byteyears [ -a | -m | -c ] file ...
|
||||
#
|
||||
# Options -[amc] select atime, mtime (default) or ctime as age.
|
||||
|
||||
import sys, os, time
|
||||
from stat import *
|
||||
|
||||
def main():
|
||||
|
||||
# Use lstat() to stat files if it exists, else stat()
|
||||
try:
|
||||
statfunc = os.lstat
|
||||
except AttributeError:
|
||||
statfunc = os.stat
|
||||
|
||||
# Parse options
|
||||
if sys.argv[1] == '-m':
|
||||
itime = ST_MTIME
|
||||
del sys.argv[1]
|
||||
elif sys.argv[1] == '-c':
|
||||
itime = ST_CTIME
|
||||
del sys.argv[1]
|
||||
elif sys.argv[1] == '-a':
|
||||
itime = ST_CTIME
|
||||
del sys.argv[1]
|
||||
else:
|
||||
itime = ST_MTIME
|
||||
|
||||
secs_per_year = 365.0 * 24.0 * 3600.0 # Scale factor
|
||||
now = time.time() # Current time, for age computations
|
||||
status = 0 # Exit status, set to 1 on errors
|
||||
|
||||
# Compute max file name length
|
||||
maxlen = 1
|
||||
for filename in sys.argv[1:]:
|
||||
maxlen = max(maxlen, len(filename))
|
||||
|
||||
# Process each argument in turn
|
||||
for filename in sys.argv[1:]:
|
||||
try:
|
||||
st = statfunc(filename)
|
||||
except OSError as msg:
|
||||
sys.stderr.write("can't stat %r: %r\n" % (filename, msg))
|
||||
status = 1
|
||||
st = ()
|
||||
if st:
|
||||
anytime = st[itime]
|
||||
size = st[ST_SIZE]
|
||||
age = now - anytime
|
||||
byteyears = float(size) * float(age) / secs_per_year
|
||||
print(filename.ljust(maxlen), end=' ')
|
||||
print(repr(int(byteyears)).rjust(8))
|
||||
|
||||
sys.exit(status)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
32
Tools/scripts/checkpip.py
Normal file
32
Tools/scripts/checkpip.py
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Checks that the version of the projects bundled in ensurepip are the latest
|
||||
versions available.
|
||||
"""
|
||||
import ensurepip
|
||||
import json
|
||||
import urllib.request
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
outofdate = False
|
||||
|
||||
for project, version in ensurepip._PROJECTS:
|
||||
data = json.loads(urllib.request.urlopen(
|
||||
"https://pypi.org/pypi/{}/json".format(project),
|
||||
cadefault=True,
|
||||
).read().decode("utf8"))
|
||||
upstream_version = data["info"]["version"]
|
||||
|
||||
if version != upstream_version:
|
||||
outofdate = True
|
||||
print("The latest version of {} on PyPI is {}, but ensurepip "
|
||||
"has {}".format(project, upstream_version, version))
|
||||
|
||||
if outofdate:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
69
Tools/scripts/checkpyc.py
Normal file
69
Tools/scripts/checkpyc.py
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
#! /usr/bin/env python3
|
||||
# Check that all ".pyc" files exist and are up-to-date
|
||||
# Uses module 'os'
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import ST_MTIME
|
||||
import importlib.util
|
||||
|
||||
# PEP 3147 compatibility (PYC Repository Directories)
|
||||
cache_from_source = (importlib.util.cache_from_source if sys.implementation.cache_tag
|
||||
else lambda path: path + 'c')
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) > 1:
|
||||
verbose = (sys.argv[1] == '-v')
|
||||
silent = (sys.argv[1] == '-s')
|
||||
else:
|
||||
verbose = silent = False
|
||||
MAGIC = importlib.util.MAGIC_NUMBER
|
||||
if not silent:
|
||||
print('Using MAGIC word', repr(MAGIC))
|
||||
for dirname in sys.path:
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except OSError:
|
||||
print('Cannot list directory', repr(dirname))
|
||||
continue
|
||||
if not silent:
|
||||
print('Checking ', repr(dirname), '...')
|
||||
for name in sorted(names):
|
||||
if name.endswith('.py'):
|
||||
name = os.path.join(dirname, name)
|
||||
try:
|
||||
st = os.stat(name)
|
||||
except OSError:
|
||||
print('Cannot stat', repr(name))
|
||||
continue
|
||||
if verbose:
|
||||
print('Check', repr(name), '...')
|
||||
name_c = cache_from_source(name)
|
||||
try:
|
||||
with open(name_c, 'rb') as f:
|
||||
magic_str = f.read(4)
|
||||
mtime_str = f.read(4)
|
||||
except IOError:
|
||||
print('Cannot open', repr(name_c))
|
||||
continue
|
||||
if magic_str != MAGIC:
|
||||
print('Bad MAGIC word in ".pyc" file', end=' ')
|
||||
print(repr(name_c))
|
||||
continue
|
||||
mtime = get_long(mtime_str)
|
||||
if mtime in {0, -1}:
|
||||
print('Bad ".pyc" file', repr(name_c))
|
||||
elif mtime != st[ST_MTIME]:
|
||||
print('Out-of-date ".pyc" file', end=' ')
|
||||
print(repr(name_c))
|
||||
|
||||
|
||||
def get_long(s):
|
||||
if len(s) != 4:
|
||||
return -1
|
||||
return s[0] + (s[1] << 8) + (s[2] << 16) + (s[3] << 24)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
276
Tools/scripts/cleanfuture.py
Normal file
276
Tools/scripts/cleanfuture.py
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""cleanfuture [-d][-r][-v] path ...
|
||||
|
||||
-d Dry run. Analyze, but don't make any changes to, files.
|
||||
-r Recurse. Search for all .py files in subdirectories too.
|
||||
-v Verbose. Print informative msgs.
|
||||
|
||||
Search Python (.py) files for future statements, and remove the features
|
||||
from such statements that are already mandatory in the version of Python
|
||||
you're using.
|
||||
|
||||
Pass one or more file and/or directory paths. When a directory path, all
|
||||
.py files within the directory will be examined, and, if the -r option is
|
||||
given, likewise recursively for subdirectories.
|
||||
|
||||
Overwrites files in place, renaming the originals with a .bak extension. If
|
||||
cleanfuture finds nothing to change, the file is left alone. If cleanfuture
|
||||
does change a file, the changed file is a fixed-point (i.e., running
|
||||
cleanfuture on the resulting .py file won't change it again, at least not
|
||||
until you try it again with a later Python release).
|
||||
|
||||
Limitations: You can do these things, but this tool won't help you then:
|
||||
|
||||
+ A future statement cannot be mixed with any other statement on the same
|
||||
physical line (separated by semicolon).
|
||||
|
||||
+ A future statement cannot contain an "as" clause.
|
||||
|
||||
Example: Assuming you're using Python 2.2, if a file containing
|
||||
|
||||
from __future__ import nested_scopes, generators
|
||||
|
||||
is analyzed by cleanfuture, the line is rewritten to
|
||||
|
||||
from __future__ import generators
|
||||
|
||||
because nested_scopes is no longer optional in 2.2 but generators is.
|
||||
"""
|
||||
|
||||
import __future__
|
||||
import tokenize
|
||||
import os
|
||||
import sys
|
||||
|
||||
dryrun = 0
|
||||
recurse = 0
|
||||
verbose = 0
|
||||
|
||||
def errprint(*args):
|
||||
strings = map(str, args)
|
||||
msg = ' '.join(strings)
|
||||
if msg[-1:] != '\n':
|
||||
msg += '\n'
|
||||
sys.stderr.write(msg)
|
||||
|
||||
def main():
|
||||
import getopt
|
||||
global verbose, recurse, dryrun
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "drv")
|
||||
except getopt.error as msg:
|
||||
errprint(msg)
|
||||
return
|
||||
for o, a in opts:
|
||||
if o == '-d':
|
||||
dryrun += 1
|
||||
elif o == '-r':
|
||||
recurse += 1
|
||||
elif o == '-v':
|
||||
verbose += 1
|
||||
if not args:
|
||||
errprint("Usage:", __doc__)
|
||||
return
|
||||
for arg in args:
|
||||
check(arg)
|
||||
|
||||
def check(file):
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print("listing directory", file)
|
||||
names = os.listdir(file)
|
||||
for name in names:
|
||||
fullname = os.path.join(file, name)
|
||||
if ((recurse and os.path.isdir(fullname) and
|
||||
not os.path.islink(fullname))
|
||||
or name.lower().endswith(".py")):
|
||||
check(fullname)
|
||||
return
|
||||
|
||||
if verbose:
|
||||
print("checking", file, "...", end=' ')
|
||||
try:
|
||||
f = open(file)
|
||||
except IOError as msg:
|
||||
errprint("%r: I/O Error: %s" % (file, str(msg)))
|
||||
return
|
||||
|
||||
ff = FutureFinder(f, file)
|
||||
changed = ff.run()
|
||||
if changed:
|
||||
ff.gettherest()
|
||||
f.close()
|
||||
if changed:
|
||||
if verbose:
|
||||
print("changed.")
|
||||
if dryrun:
|
||||
print("But this is a dry run, so leaving it alone.")
|
||||
for s, e, line in changed:
|
||||
print("%r lines %d-%d" % (file, s+1, e+1))
|
||||
for i in range(s, e+1):
|
||||
print(ff.lines[i], end=' ')
|
||||
if line is None:
|
||||
print("-- deleted")
|
||||
else:
|
||||
print("-- change to:")
|
||||
print(line, end=' ')
|
||||
if not dryrun:
|
||||
bak = file + ".bak"
|
||||
if os.path.exists(bak):
|
||||
os.remove(bak)
|
||||
os.rename(file, bak)
|
||||
if verbose:
|
||||
print("renamed", file, "to", bak)
|
||||
g = open(file, "w")
|
||||
ff.write(g)
|
||||
g.close()
|
||||
if verbose:
|
||||
print("wrote new", file)
|
||||
else:
|
||||
if verbose:
|
||||
print("unchanged.")
|
||||
|
||||
class FutureFinder:
|
||||
|
||||
def __init__(self, f, fname):
|
||||
self.f = f
|
||||
self.fname = fname
|
||||
self.ateof = 0
|
||||
self.lines = [] # raw file lines
|
||||
|
||||
# List of (start_index, end_index, new_line) triples.
|
||||
self.changed = []
|
||||
|
||||
# Line-getter for tokenize.
|
||||
def getline(self):
|
||||
if self.ateof:
|
||||
return ""
|
||||
line = self.f.readline()
|
||||
if line == "":
|
||||
self.ateof = 1
|
||||
else:
|
||||
self.lines.append(line)
|
||||
return line
|
||||
|
||||
def run(self):
|
||||
STRING = tokenize.STRING
|
||||
NL = tokenize.NL
|
||||
NEWLINE = tokenize.NEWLINE
|
||||
COMMENT = tokenize.COMMENT
|
||||
NAME = tokenize.NAME
|
||||
OP = tokenize.OP
|
||||
|
||||
changed = self.changed
|
||||
get = tokenize.generate_tokens(self.getline).__next__
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Chew up initial comments and blank lines (if any).
|
||||
while type in (COMMENT, NL, NEWLINE):
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Chew up docstring (if any -- and it may be implicitly catenated!).
|
||||
while type is STRING:
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Analyze the future stmts.
|
||||
while 1:
|
||||
# Chew up comments and blank lines (if any).
|
||||
while type in (COMMENT, NL, NEWLINE):
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is NAME and token == "from"):
|
||||
break
|
||||
startline = srow - 1 # tokenize is one-based
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is NAME and token == "__future__"):
|
||||
break
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is NAME and token == "import"):
|
||||
break
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# Get the list of features.
|
||||
features = []
|
||||
while type is NAME:
|
||||
features.append(token)
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if not (type is OP and token == ','):
|
||||
break
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
# A trailing comment?
|
||||
comment = None
|
||||
if type is COMMENT:
|
||||
comment = token
|
||||
type, token, (srow, scol), (erow, ecol), line = get()
|
||||
|
||||
if type is not NEWLINE:
|
||||
errprint("Skipping file %r; can't parse line %d:\n%s" %
|
||||
(self.fname, srow, line))
|
||||
return []
|
||||
|
||||
endline = srow - 1
|
||||
|
||||
# Check for obsolete features.
|
||||
okfeatures = []
|
||||
for f in features:
|
||||
object = getattr(__future__, f, None)
|
||||
if object is None:
|
||||
# A feature we don't know about yet -- leave it in.
|
||||
# They'll get a compile-time error when they compile
|
||||
# this program, but that's not our job to sort out.
|
||||
okfeatures.append(f)
|
||||
else:
|
||||
released = object.getMandatoryRelease()
|
||||
if released is None or released <= sys.version_info:
|
||||
# Withdrawn or obsolete.
|
||||
pass
|
||||
else:
|
||||
okfeatures.append(f)
|
||||
|
||||
# Rewrite the line if at least one future-feature is obsolete.
|
||||
if len(okfeatures) < len(features):
|
||||
if len(okfeatures) == 0:
|
||||
line = None
|
||||
else:
|
||||
line = "from __future__ import "
|
||||
line += ', '.join(okfeatures)
|
||||
if comment is not None:
|
||||
line += ' ' + comment
|
||||
line += '\n'
|
||||
changed.append((startline, endline, line))
|
||||
|
||||
# Loop back for more future statements.
|
||||
|
||||
return changed
|
||||
|
||||
def gettherest(self):
|
||||
if self.ateof:
|
||||
self.therest = ''
|
||||
else:
|
||||
self.therest = self.f.read()
|
||||
|
||||
def write(self, f):
|
||||
changed = self.changed
|
||||
assert changed
|
||||
# Prevent calling this again.
|
||||
self.changed = []
|
||||
# Apply changes in reverse order.
|
||||
changed.reverse()
|
||||
for s, e, line in changed:
|
||||
if line is None:
|
||||
# pure deletion
|
||||
del self.lines[s:e+1]
|
||||
else:
|
||||
self.lines[s:e+1] = [line]
|
||||
f.writelines(self.lines)
|
||||
# Copy over the remainder of the file.
|
||||
if self.therest:
|
||||
f.write(self.therest)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
128
Tools/scripts/combinerefs.py
Normal file
128
Tools/scripts/combinerefs.py
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""
|
||||
combinerefs path
|
||||
|
||||
A helper for analyzing PYTHONDUMPREFS output.
|
||||
|
||||
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
|
||||
time Py_FinalizeEx() prints the list of all live objects twice: first it
|
||||
prints the repr() of each object while the interpreter is still fully intact.
|
||||
After cleaning up everything it can, it prints all remaining live objects
|
||||
again, but the second time just prints their addresses, refcounts, and type
|
||||
names (because the interpreter has been torn down, calling repr methods at
|
||||
this point can get into infinite loops or blow up).
|
||||
|
||||
Save all this output into a file, then run this script passing the path to
|
||||
that file. The script finds both output chunks, combines them, then prints
|
||||
a line of output for each object still alive at the end:
|
||||
|
||||
address refcnt typename repr
|
||||
|
||||
address is the address of the object, in whatever format the platform C
|
||||
produces for a %p format code.
|
||||
|
||||
refcnt is of the form
|
||||
|
||||
"[" ref "]"
|
||||
|
||||
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
|
||||
or
|
||||
|
||||
"[" ref_before "->" ref_after "]"
|
||||
|
||||
if the refcount changed.
|
||||
|
||||
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
|
||||
output block.
|
||||
|
||||
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
|
||||
CAUTION: If object is a container type, it may not actually contain all the
|
||||
objects shown in the repr: the repr was captured from the first output block,
|
||||
and some of the containees may have been released since then. For example,
|
||||
it's common for the line showing the dict of interned strings to display
|
||||
strings that no longer exist at the end of Py_FinalizeEx; this can be recognized
|
||||
(albeit painfully) because such containees don't have a line of their own.
|
||||
|
||||
The objects are listed in allocation order, with most-recently allocated
|
||||
printed first, and the first object allocated printed last.
|
||||
|
||||
|
||||
Simple examples:
|
||||
|
||||
00857060 [14] str '__len__'
|
||||
|
||||
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
|
||||
output blocks said there were 14 references to it. This is probably due to
|
||||
C modules that intern the string "__len__" and keep a reference to it in a
|
||||
file static.
|
||||
|
||||
00857038 [46->5] tuple ()
|
||||
|
||||
46-5 = 41 references to the empty tuple were removed by the cleanup actions
|
||||
between the times PYTHONDUMPREFS produced output.
|
||||
|
||||
00858028 [1025->1456] str '<dummy key>'
|
||||
|
||||
The string '<dummy key>', which is used in dictobject.c to overwrite a real
|
||||
key that gets deleted, grew several hundred references during cleanup. It
|
||||
suggests that stuff did get removed from dicts by cleanup, but that the dicts
|
||||
themselves are staying alive for some reason. """
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Generate lines from fileiter. If whilematch is true, continue reading
|
||||
# while the regexp object pat matches line. If whilematch is false, lines
|
||||
# are read so long as pat doesn't match them. In any case, the first line
|
||||
# that doesn't match pat (when whilematch is true), or that does match pat
|
||||
# (when whilematch is false), is lost, and fileiter will resume at the line
|
||||
# following it.
|
||||
def read(fileiter, pat, whilematch):
|
||||
for line in fileiter:
|
||||
if bool(pat.match(line)) == whilematch:
|
||||
yield line
|
||||
else:
|
||||
break
|
||||
|
||||
def combine(fname):
|
||||
f = open(fname)
|
||||
|
||||
fi = iter(f)
|
||||
|
||||
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
|
||||
pass
|
||||
|
||||
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
|
||||
addr2rc = {}
|
||||
addr2guts = {}
|
||||
before = 0
|
||||
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
|
||||
m = crack.match(line)
|
||||
if m:
|
||||
addr, addr2rc[addr], addr2guts[addr] = m.groups()
|
||||
before += 1
|
||||
else:
|
||||
print('??? skipped:', line)
|
||||
|
||||
after = 0
|
||||
for line in read(fi, crack, True):
|
||||
after += 1
|
||||
m = crack.match(line)
|
||||
assert m
|
||||
addr, rc, guts = m.groups() # guts is type name here
|
||||
if addr not in addr2rc:
|
||||
print('??? new object created while tearing down:', line.rstrip())
|
||||
continue
|
||||
print(addr, end=' ')
|
||||
if rc == addr2rc[addr]:
|
||||
print('[%s]' % rc, end=' ')
|
||||
else:
|
||||
print('[%s->%s]' % (addr2rc[addr], rc), end=' ')
|
||||
print(guts, addr2guts[addr])
|
||||
|
||||
f.close()
|
||||
print("%d objects before, %d after" % (before, after))
|
||||
|
||||
if __name__ == '__main__':
|
||||
combine(sys.argv[1])
|
||||
26
Tools/scripts/copytime.py
Normal file
26
Tools/scripts/copytime.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Copy one file's atime and mtime to another
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import ST_ATIME, ST_MTIME # Really constants 7 and 8
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 3:
|
||||
sys.stderr.write('usage: copytime source destination\n')
|
||||
sys.exit(2)
|
||||
file1, file2 = sys.argv[1], sys.argv[2]
|
||||
try:
|
||||
stat1 = os.stat(file1)
|
||||
except OSError:
|
||||
sys.stderr.write(file1 + ': cannot stat\n')
|
||||
sys.exit(1)
|
||||
try:
|
||||
os.utime(file2, (stat1[ST_ATIME], stat1[ST_MTIME]))
|
||||
except OSError:
|
||||
sys.stderr.write(file2 + ': cannot change time\n')
|
||||
sys.exit(2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
23
Tools/scripts/crlf.py
Normal file
23
Tools/scripts/crlf.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
#! /usr/bin/env python3
|
||||
"Replace CRLF with LF in argument files. Print names of changed files."
|
||||
|
||||
import sys, os
|
||||
|
||||
def main():
|
||||
for filename in sys.argv[1:]:
|
||||
if os.path.isdir(filename):
|
||||
print(filename, "Directory!")
|
||||
continue
|
||||
with open(filename, "rb") as f:
|
||||
data = f.read()
|
||||
if b'\0' in data:
|
||||
print(filename, "Binary!")
|
||||
continue
|
||||
newdata = data.replace(b"\r\n", b"\n")
|
||||
if newdata != data:
|
||||
print(filename)
|
||||
with open(filename, "wb") as f:
|
||||
f.write(newdata)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
135
Tools/scripts/db2pickle.py
Normal file
135
Tools/scripts/db2pickle.py
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Synopsis: %(prog)s [-h|-g|-b|-r|-a] dbfile [ picklefile ]
|
||||
|
||||
Convert the database file given on the command line to a pickle
|
||||
representation. The optional flags indicate the type of the database:
|
||||
|
||||
-a - open using dbm (any supported format)
|
||||
-b - open as bsddb btree file
|
||||
-d - open as dbm file
|
||||
-g - open as gdbm file
|
||||
-h - open as bsddb hash file
|
||||
-r - open as bsddb recno file
|
||||
|
||||
The default is hash. If a pickle file is named it is opened for write
|
||||
access (deleting any existing data). If no pickle file is named, the pickle
|
||||
output is written to standard output.
|
||||
|
||||
"""
|
||||
|
||||
import getopt
|
||||
try:
|
||||
import bsddb
|
||||
except ImportError:
|
||||
bsddb = None
|
||||
try:
|
||||
import dbm.ndbm as dbm
|
||||
except ImportError:
|
||||
dbm = None
|
||||
try:
|
||||
import dbm.gnu as gdbm
|
||||
except ImportError:
|
||||
gdbm = None
|
||||
try:
|
||||
import dbm.ndbm as anydbm
|
||||
except ImportError:
|
||||
anydbm = None
|
||||
import sys
|
||||
try:
|
||||
import pickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
prog = sys.argv[0]
|
||||
|
||||
def usage():
|
||||
sys.stderr.write(__doc__ % globals())
|
||||
|
||||
def main(args):
|
||||
try:
|
||||
opts, args = getopt.getopt(args, "hbrdag",
|
||||
["hash", "btree", "recno", "dbm",
|
||||
"gdbm", "anydbm"])
|
||||
except getopt.error:
|
||||
usage()
|
||||
return 1
|
||||
|
||||
if len(args) == 0 or len(args) > 2:
|
||||
usage()
|
||||
return 1
|
||||
elif len(args) == 1:
|
||||
dbfile = args[0]
|
||||
pfile = sys.stdout
|
||||
else:
|
||||
dbfile = args[0]
|
||||
try:
|
||||
pfile = open(args[1], 'wb')
|
||||
except IOError:
|
||||
sys.stderr.write("Unable to open %s\n" % args[1])
|
||||
return 1
|
||||
|
||||
dbopen = None
|
||||
for opt, arg in opts:
|
||||
if opt in ("-h", "--hash"):
|
||||
try:
|
||||
dbopen = bsddb.hashopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-b", "--btree"):
|
||||
try:
|
||||
dbopen = bsddb.btopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-r", "--recno"):
|
||||
try:
|
||||
dbopen = bsddb.rnopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-a", "--anydbm"):
|
||||
try:
|
||||
dbopen = anydbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-g", "--gdbm"):
|
||||
try:
|
||||
dbopen = gdbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm.gnu module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-d", "--dbm"):
|
||||
try:
|
||||
dbopen = dbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm.ndbm module unavailable.\n")
|
||||
return 1
|
||||
if dbopen is None:
|
||||
if bsddb is None:
|
||||
sys.stderr.write("bsddb module unavailable - ")
|
||||
sys.stderr.write("must specify dbtype.\n")
|
||||
return 1
|
||||
else:
|
||||
dbopen = bsddb.hashopen
|
||||
|
||||
try:
|
||||
db = dbopen(dbfile, 'r')
|
||||
except bsddb.error:
|
||||
sys.stderr.write("Unable to open %s. " % dbfile)
|
||||
sys.stderr.write("Check for format or version mismatch.\n")
|
||||
return 1
|
||||
|
||||
for k in db.keys():
|
||||
pickle.dump((k, db[k]), pfile, 1==1)
|
||||
|
||||
db.close()
|
||||
pfile.close()
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
60
Tools/scripts/diff.py
Normal file
60
Tools/scripts/diff.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
#!/usr/bin/env python3
|
||||
""" Command line interface to difflib.py providing diffs in four formats:
|
||||
|
||||
* ndiff: lists every line and highlights interline changes.
|
||||
* context: highlights clusters of changes in a before/after format.
|
||||
* unified: highlights clusters of changes in an inline format.
|
||||
* html: generates side by side comparison with change highlights.
|
||||
|
||||
"""
|
||||
|
||||
import sys, os, difflib, argparse
|
||||
from datetime import datetime, timezone
|
||||
|
||||
def file_mtime(path):
|
||||
t = datetime.fromtimestamp(os.stat(path).st_mtime,
|
||||
timezone.utc)
|
||||
return t.astimezone().isoformat()
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-c', action='store_true', default=False,
|
||||
help='Produce a context format diff (default)')
|
||||
parser.add_argument('-u', action='store_true', default=False,
|
||||
help='Produce a unified format diff')
|
||||
parser.add_argument('-m', action='store_true', default=False,
|
||||
help='Produce HTML side by side diff '
|
||||
'(can use -c and -l in conjunction)')
|
||||
parser.add_argument('-n', action='store_true', default=False,
|
||||
help='Produce a ndiff format diff')
|
||||
parser.add_argument('-l', '--lines', type=int, default=3,
|
||||
help='Set number of context lines (default 3)')
|
||||
parser.add_argument('fromfile')
|
||||
parser.add_argument('tofile')
|
||||
options = parser.parse_args()
|
||||
|
||||
n = options.lines
|
||||
fromfile = options.fromfile
|
||||
tofile = options.tofile
|
||||
|
||||
fromdate = file_mtime(fromfile)
|
||||
todate = file_mtime(tofile)
|
||||
with open(fromfile) as ff:
|
||||
fromlines = ff.readlines()
|
||||
with open(tofile) as tf:
|
||||
tolines = tf.readlines()
|
||||
|
||||
if options.u:
|
||||
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
|
||||
elif options.n:
|
||||
diff = difflib.ndiff(fromlines, tolines)
|
||||
elif options.m:
|
||||
diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
|
||||
else:
|
||||
diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
|
||||
|
||||
sys.stdout.writelines(diff)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
60
Tools/scripts/dutree.py
Normal file
60
Tools/scripts/dutree.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
#! /usr/bin/env python3
|
||||
# Format du output in a tree shape
|
||||
|
||||
import os, sys, errno
|
||||
|
||||
def main():
|
||||
p = os.popen('du ' + ' '.join(sys.argv[1:]), 'r')
|
||||
total, d = None, {}
|
||||
for line in p.readlines():
|
||||
i = 0
|
||||
while line[i] in '0123456789': i = i+1
|
||||
size = eval(line[:i])
|
||||
while line[i] in ' \t': i = i+1
|
||||
filename = line[i:-1]
|
||||
comps = filename.split('/')
|
||||
if comps[0] == '': comps[0] = '/'
|
||||
if comps[len(comps)-1] == '': del comps[len(comps)-1]
|
||||
total, d = store(size, comps, total, d)
|
||||
try:
|
||||
display(total, d)
|
||||
except IOError as e:
|
||||
if e.errno != errno.EPIPE:
|
||||
raise
|
||||
|
||||
def store(size, comps, total, d):
|
||||
if comps == []:
|
||||
return size, d
|
||||
if comps[0] not in d:
|
||||
d[comps[0]] = None, {}
|
||||
t1, d1 = d[comps[0]]
|
||||
d[comps[0]] = store(size, comps[1:], t1, d1)
|
||||
return total, d
|
||||
|
||||
def display(total, d):
|
||||
show(total, d, '')
|
||||
|
||||
def show(total, d, prefix):
|
||||
if not d: return
|
||||
list = []
|
||||
sum = 0
|
||||
for key in d.keys():
|
||||
tsub, dsub = d[key]
|
||||
list.append((tsub, key))
|
||||
if tsub is not None: sum = sum + tsub
|
||||
## if sum < total:
|
||||
## list.append((total - sum, os.curdir))
|
||||
list.sort()
|
||||
list.reverse()
|
||||
width = len(repr(list[0][0]))
|
||||
for tsub, key in list:
|
||||
if tsub is None:
|
||||
psub = prefix
|
||||
else:
|
||||
print(prefix + repr(tsub).rjust(width) + ' ' + key)
|
||||
psub = prefix + ' '*(width-1) + '|' + ' '*(len(key)+1)
|
||||
if key in d:
|
||||
show(tsub, d[key][1], psub)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
56
Tools/scripts/eptags.py
Normal file
56
Tools/scripts/eptags.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
#! /usr/bin/env python3
|
||||
"""Create a TAGS file for Python programs, usable with GNU Emacs.
|
||||
|
||||
usage: eptags pyfiles...
|
||||
|
||||
The output TAGS file is usable with Emacs version 18, 19, 20.
|
||||
Tagged are:
|
||||
- functions (even inside other defs or classes)
|
||||
- classes
|
||||
|
||||
eptags warns about files it cannot open.
|
||||
eptags will not give warnings about duplicate tags.
|
||||
|
||||
BUGS:
|
||||
Because of tag duplication (methods with the same name in different
|
||||
classes), TAGS files are not very useful for most object-oriented
|
||||
python projects.
|
||||
"""
|
||||
import sys,re
|
||||
|
||||
expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z_][a-zA-Z0-9_]*)[ \t]*[:\(]'
|
||||
matcher = re.compile(expr)
|
||||
|
||||
def treat_file(filename, outfp):
|
||||
"""Append tags found in file named 'filename' to the open file 'outfp'"""
|
||||
try:
|
||||
fp = open(filename, 'r')
|
||||
except OSError:
|
||||
sys.stderr.write('Cannot open %s\n'%filename)
|
||||
return
|
||||
charno = 0
|
||||
lineno = 0
|
||||
tags = []
|
||||
size = 0
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
lineno = lineno + 1
|
||||
m = matcher.search(line)
|
||||
if m:
|
||||
tag = m.group(0) + '\177%d,%d\n' % (lineno, charno)
|
||||
tags.append(tag)
|
||||
size = size + len(tag)
|
||||
charno = charno + len(line)
|
||||
outfp.write('\f\n%s,%d\n' % (filename,size))
|
||||
for tag in tags:
|
||||
outfp.write(tag)
|
||||
|
||||
def main():
|
||||
outfp = open('TAGS', 'w')
|
||||
for filename in sys.argv[1:]:
|
||||
treat_file(filename, outfp)
|
||||
|
||||
if __name__=="__main__":
|
||||
main()
|
||||
40
Tools/scripts/find-uname.py
Normal file
40
Tools/scripts/find-uname.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
For each argument on the command line, look for it in the set of all Unicode
|
||||
names. Arguments are treated as case-insensitive regular expressions, e.g.:
|
||||
|
||||
% find-uname 'small letter a$' 'horizontal line'
|
||||
*** small letter a$ matches ***
|
||||
LATIN SMALL LETTER A (97)
|
||||
COMBINING LATIN SMALL LETTER A (867)
|
||||
CYRILLIC SMALL LETTER A (1072)
|
||||
PARENTHESIZED LATIN SMALL LETTER A (9372)
|
||||
CIRCLED LATIN SMALL LETTER A (9424)
|
||||
FULLWIDTH LATIN SMALL LETTER A (65345)
|
||||
*** horizontal line matches ***
|
||||
HORIZONTAL LINE EXTENSION (9135)
|
||||
"""
|
||||
|
||||
import unicodedata
|
||||
import sys
|
||||
import re
|
||||
|
||||
def main(args):
|
||||
unicode_names = []
|
||||
for ix in range(sys.maxunicode+1):
|
||||
try:
|
||||
unicode_names.append((ix, unicodedata.name(chr(ix))))
|
||||
except ValueError: # no name for the character
|
||||
pass
|
||||
for arg in args:
|
||||
pat = re.compile(arg, re.I)
|
||||
matches = [(y,x) for (x,y) in unicode_names
|
||||
if pat.search(y) is not None]
|
||||
if matches:
|
||||
print("***", arg, "matches", "***")
|
||||
for match in matches:
|
||||
print("%s (%d)" % match)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
||||
128
Tools/scripts/find_recursionlimit.py
Normal file
128
Tools/scripts/find_recursionlimit.py
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
#! /usr/bin/env python3
|
||||
"""Find the maximum recursion limit that prevents interpreter termination.
|
||||
|
||||
This script finds the maximum safe recursion limit on a particular
|
||||
platform. If you need to change the recursion limit on your system,
|
||||
this script will tell you a safe upper bound. To use the new limit,
|
||||
call sys.setrecursionlimit().
|
||||
|
||||
This module implements several ways to create infinite recursion in
|
||||
Python. Different implementations end up pushing different numbers of
|
||||
C stack frames, depending on how many calls through Python's abstract
|
||||
C API occur.
|
||||
|
||||
After each round of tests, it prints a message:
|
||||
"Limit of NNNN is fine".
|
||||
|
||||
The highest printed value of "NNNN" is therefore the highest potentially
|
||||
safe limit for your system (which depends on the OS, architecture, but also
|
||||
the compilation flags). Please note that it is practically impossible to
|
||||
test all possible recursion paths in the interpreter, so the results of
|
||||
this test should not be trusted blindly -- although they give a good hint
|
||||
of which values are reasonable.
|
||||
|
||||
NOTE: When the C stack space allocated by your system is exceeded due
|
||||
to excessive recursion, exact behaviour depends on the platform, although
|
||||
the interpreter will always fail in a likely brutal way: either a
|
||||
segmentation fault, a MemoryError, or just a silent abort.
|
||||
|
||||
NB: A program that does not use __methods__ can set a higher limit.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import itertools
|
||||
|
||||
class RecursiveBlowup1:
|
||||
def __init__(self):
|
||||
self.__init__()
|
||||
|
||||
def test_init():
|
||||
return RecursiveBlowup1()
|
||||
|
||||
class RecursiveBlowup2:
|
||||
def __repr__(self):
|
||||
return repr(self)
|
||||
|
||||
def test_repr():
|
||||
return repr(RecursiveBlowup2())
|
||||
|
||||
class RecursiveBlowup4:
|
||||
def __add__(self, x):
|
||||
return x + self
|
||||
|
||||
def test_add():
|
||||
return RecursiveBlowup4() + RecursiveBlowup4()
|
||||
|
||||
class RecursiveBlowup5:
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self, attr)
|
||||
|
||||
def test_getattr():
|
||||
return RecursiveBlowup5().attr
|
||||
|
||||
class RecursiveBlowup6:
|
||||
def __getitem__(self, item):
|
||||
return self[item - 2] + self[item - 1]
|
||||
|
||||
def test_getitem():
|
||||
return RecursiveBlowup6()[5]
|
||||
|
||||
def test_recurse():
|
||||
return test_recurse()
|
||||
|
||||
def test_cpickle(_cache={}):
|
||||
import io
|
||||
try:
|
||||
import _pickle
|
||||
except ImportError:
|
||||
print("cannot import _pickle, skipped!")
|
||||
return
|
||||
k, l = None, None
|
||||
for n in itertools.count():
|
||||
try:
|
||||
l = _cache[n]
|
||||
continue # Already tried and it works, let's save some time
|
||||
except KeyError:
|
||||
for i in range(100):
|
||||
l = [k, l]
|
||||
k = {i: l}
|
||||
_pickle.Pickler(io.BytesIO(), protocol=-1).dump(l)
|
||||
_cache[n] = l
|
||||
|
||||
def test_compiler_recursion():
|
||||
# The compiler uses a scaling factor to support additional levels
|
||||
# of recursion. This is a sanity check of that scaling to ensure
|
||||
# it still raises RecursionError even at higher recursion limits
|
||||
compile("()" * (10 * sys.getrecursionlimit()), "<single>", "single")
|
||||
|
||||
def check_limit(n, test_func_name):
|
||||
sys.setrecursionlimit(n)
|
||||
if test_func_name.startswith("test_"):
|
||||
print(test_func_name[5:])
|
||||
else:
|
||||
print(test_func_name)
|
||||
test_func = globals()[test_func_name]
|
||||
try:
|
||||
test_func()
|
||||
# AttributeError can be raised because of the way e.g. PyDict_GetItem()
|
||||
# silences all exceptions and returns NULL, which is usually interpreted
|
||||
# as "missing attribute".
|
||||
except (RecursionError, AttributeError):
|
||||
pass
|
||||
else:
|
||||
print("Yikes!")
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
limit = 1000
|
||||
while 1:
|
||||
check_limit(limit, "test_recurse")
|
||||
check_limit(limit, "test_add")
|
||||
check_limit(limit, "test_repr")
|
||||
check_limit(limit, "test_init")
|
||||
check_limit(limit, "test_getattr")
|
||||
check_limit(limit, "test_getitem")
|
||||
check_limit(limit, "test_cpickle")
|
||||
check_limit(limit, "test_compiler_recursion")
|
||||
print("Limit of %d is fine" % limit)
|
||||
limit = limit + 100
|
||||
89
Tools/scripts/finddiv.py
Normal file
89
Tools/scripts/finddiv.py
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""finddiv - a grep-like tool that looks for division operators.
|
||||
|
||||
Usage: finddiv [-l] file_or_directory ...
|
||||
|
||||
For directory arguments, all files in the directory whose name ends in
|
||||
.py are processed, and subdirectories are processed recursively.
|
||||
|
||||
This actually tokenizes the files to avoid false hits in comments or
|
||||
strings literals.
|
||||
|
||||
By default, this prints all lines containing a / or /= operator, in
|
||||
grep -n style. With the -l option specified, it prints the filename
|
||||
of files that contain at least one / or /= operator.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import tokenize
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "lh")
|
||||
except getopt.error as msg:
|
||||
usage(msg)
|
||||
return 2
|
||||
if not args:
|
||||
usage("at least one file argument is required")
|
||||
return 2
|
||||
listnames = 0
|
||||
for o, a in opts:
|
||||
if o == "-h":
|
||||
print(__doc__)
|
||||
return
|
||||
if o == "-l":
|
||||
listnames = 1
|
||||
exit = None
|
||||
for filename in args:
|
||||
x = process(filename, listnames)
|
||||
exit = exit or x
|
||||
return exit
|
||||
|
||||
def usage(msg):
|
||||
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
|
||||
sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
|
||||
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
|
||||
|
||||
def process(filename, listnames):
|
||||
if os.path.isdir(filename):
|
||||
return processdir(filename, listnames)
|
||||
try:
|
||||
fp = open(filename)
|
||||
except IOError as msg:
|
||||
sys.stderr.write("Can't open: %s\n" % msg)
|
||||
return 1
|
||||
g = tokenize.generate_tokens(fp.readline)
|
||||
lastrow = None
|
||||
for type, token, (row, col), end, line in g:
|
||||
if token in ("/", "/="):
|
||||
if listnames:
|
||||
print(filename)
|
||||
break
|
||||
if row != lastrow:
|
||||
lastrow = row
|
||||
print("%s:%d:%s" % (filename, row, line), end=' ')
|
||||
fp.close()
|
||||
|
||||
def processdir(dir, listnames):
|
||||
try:
|
||||
names = os.listdir(dir)
|
||||
except OSError as msg:
|
||||
sys.stderr.write("Can't list directory: %s\n" % dir)
|
||||
return 1
|
||||
files = []
|
||||
for name in names:
|
||||
fn = os.path.join(dir, name)
|
||||
if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
|
||||
files.append(fn)
|
||||
files.sort(key=os.path.normcase)
|
||||
exit = None
|
||||
for fn in files:
|
||||
x = process(fn, listnames)
|
||||
exit = exit or x
|
||||
return exit
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
43
Tools/scripts/findlinksto.py
Normal file
43
Tools/scripts/findlinksto.py
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# findlinksto
|
||||
#
|
||||
# find symbolic links to a path matching a regular expression
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import getopt
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], '')
|
||||
if len(args) < 2:
|
||||
raise getopt.GetoptError('not enough arguments', None)
|
||||
except getopt.GetoptError as msg:
|
||||
sys.stdout = sys.stderr
|
||||
print(msg)
|
||||
print('usage: findlinksto pattern directory ...')
|
||||
sys.exit(2)
|
||||
pat, dirs = args[0], args[1:]
|
||||
prog = re.compile(pat)
|
||||
for dirname in dirs:
|
||||
os.walk(dirname, visit, prog)
|
||||
|
||||
def visit(prog, dirname, names):
|
||||
if os.path.islink(dirname):
|
||||
names[:] = []
|
||||
return
|
||||
if os.path.ismount(dirname):
|
||||
print('descend into', dirname)
|
||||
for name in names:
|
||||
name = os.path.join(dirname, name)
|
||||
try:
|
||||
linkto = os.readlink(name)
|
||||
if prog.search(linkto) is not None:
|
||||
print(name, '->', linkto)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
107
Tools/scripts/findnocoding.py
Normal file
107
Tools/scripts/findnocoding.py
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""List all those Python files that require a coding directive
|
||||
|
||||
Usage: findnocoding.py dir1 [dir2...]
|
||||
"""
|
||||
|
||||
__author__ = "Oleg Broytmann, Georg Brandl"
|
||||
|
||||
import sys, os, re, getopt
|
||||
|
||||
# our pysource module finds Python source files
|
||||
try:
|
||||
import pysource
|
||||
except ImportError:
|
||||
# emulate the module with a simple os.walk
|
||||
class pysource:
|
||||
has_python_ext = looks_like_python = can_be_compiled = None
|
||||
def walk_python_files(self, paths, *args, **kwargs):
|
||||
for path in paths:
|
||||
if os.path.isfile(path):
|
||||
yield path.endswith(".py")
|
||||
elif os.path.isdir(path):
|
||||
for root, dirs, files in os.walk(path):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
yield os.path.join(root, filename)
|
||||
pysource = pysource()
|
||||
|
||||
|
||||
print("The pysource module is not available; "
|
||||
"no sophisticated Python source file search will be done.", file=sys.stderr)
|
||||
|
||||
|
||||
decl_re = re.compile(rb'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)')
|
||||
blank_re = re.compile(rb'^[ \t\f]*(?:[#\r\n]|$)')
|
||||
|
||||
def get_declaration(line):
|
||||
match = decl_re.match(line)
|
||||
if match:
|
||||
return match.group(1)
|
||||
return b''
|
||||
|
||||
def has_correct_encoding(text, codec):
|
||||
try:
|
||||
str(text, codec)
|
||||
except UnicodeDecodeError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def needs_declaration(fullpath):
|
||||
try:
|
||||
infile = open(fullpath, 'rb')
|
||||
except IOError: # Oops, the file was removed - ignore it
|
||||
return None
|
||||
|
||||
with infile:
|
||||
line1 = infile.readline()
|
||||
line2 = infile.readline()
|
||||
|
||||
if (get_declaration(line1) or
|
||||
blank_re.match(line1) and get_declaration(line2)):
|
||||
# the file does have an encoding declaration, so trust it
|
||||
return False
|
||||
|
||||
# check the whole file for non utf-8 characters
|
||||
rest = infile.read()
|
||||
|
||||
if has_correct_encoding(line1+line2+rest, "utf-8"):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
usage = """Usage: %s [-cd] paths...
|
||||
-c: recognize Python source files trying to compile them
|
||||
-d: debug output""" % sys.argv[0]
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'cd')
|
||||
except getopt.error as msg:
|
||||
print(msg, file=sys.stderr)
|
||||
print(usage, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
is_python = pysource.looks_like_python
|
||||
debug = False
|
||||
|
||||
for o, a in opts:
|
||||
if o == '-c':
|
||||
is_python = pysource.can_be_compiled
|
||||
elif o == '-d':
|
||||
debug = True
|
||||
|
||||
if not args:
|
||||
print(usage, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
for fullpath in pysource.walk_python_files(args, is_python):
|
||||
if debug:
|
||||
print("Testing for coding: %s" % fullpath)
|
||||
result = needs_declaration(fullpath)
|
||||
if result:
|
||||
print(fullpath)
|
||||
316
Tools/scripts/fixcid.py
Normal file
316
Tools/scripts/fixcid.py
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Perform massive identifier substitution on C source files.
|
||||
# This actually tokenizes the files (to some extent) so it can
|
||||
# avoid making substitutions inside strings or comments.
|
||||
# Inside strings, substitutions are never made; inside comments,
|
||||
# it is a user option (off by default).
|
||||
#
|
||||
# The substitutions are read from one or more files whose lines,
|
||||
# when not empty, after stripping comments starting with #,
|
||||
# must contain exactly two words separated by whitespace: the
|
||||
# old identifier and its replacement.
|
||||
#
|
||||
# The option -r reverses the sense of the substitutions (this may be
|
||||
# useful to undo a particular substitution).
|
||||
#
|
||||
# If the old identifier is prefixed with a '*' (with no intervening
|
||||
# whitespace), then it will not be substituted inside comments.
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a C file (ends in .h or .c). The special filename '-' means
|
||||
# operate in filter mode: read stdin, write stdout.
|
||||
#
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments).
|
||||
#
|
||||
# The original files are kept as back-up with a "~" suffix.
|
||||
#
|
||||
# Changes made are reported to stdout in a diff-like format.
|
||||
#
|
||||
# NB: by changing only the function fixline() you can turn this
|
||||
# into a program for different changes to C source files; by
|
||||
# changing the function wanted() you can make a different selection of
|
||||
# files.
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
import getopt
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
def usage():
|
||||
progname = sys.argv[0]
|
||||
err('Usage: ' + progname +
|
||||
' [-c] [-r] [-s file] ... file-or-directory ...\n')
|
||||
err('\n')
|
||||
err('-c : substitute inside comments\n')
|
||||
err('-r : reverse direction for following -s options\n')
|
||||
err('-s substfile : add a file of substitutions\n')
|
||||
err('\n')
|
||||
err('Each non-empty non-comment line in a substitution file must\n')
|
||||
err('contain exactly two words: an identifier and its replacement.\n')
|
||||
err('Comments start with a # character and end at end of line.\n')
|
||||
err('If an identifier is preceded with a *, it is not substituted\n')
|
||||
err('inside a comment even when -c is specified.\n')
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'crs:')
|
||||
except getopt.error as msg:
|
||||
err('Options error: ' + str(msg) + '\n')
|
||||
usage()
|
||||
sys.exit(2)
|
||||
bad = 0
|
||||
if not args: # No arguments
|
||||
usage()
|
||||
sys.exit(2)
|
||||
for opt, arg in opts:
|
||||
if opt == '-c':
|
||||
setdocomments()
|
||||
if opt == '-r':
|
||||
setreverse()
|
||||
if opt == '-s':
|
||||
addsubst(arg)
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
# Change this regular expression to select a different set of files
|
||||
Wanted = r'^[a-zA-Z0-9_]+\.[ch]$'
|
||||
def wanted(name):
|
||||
return re.match(Wanted, name)
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except OSError as msg:
|
||||
err(dirname + ': cannot list directory: ' + str(msg) + '\n')
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif wanted(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
if filename == '-':
|
||||
# Filter mode
|
||||
f = sys.stdin
|
||||
g = sys.stdout
|
||||
else:
|
||||
# File replacement mode
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError as msg:
|
||||
err(filename + ': cannot open: ' + str(msg) + '\n')
|
||||
return 1
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
g = None
|
||||
# If we find a match, we rewind the file and start over but
|
||||
# now copy everything to a temp file.
|
||||
lineno = 0
|
||||
initfixline()
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = f.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
lineno = lineno + 1
|
||||
newline = fixline(line)
|
||||
if newline != line:
|
||||
if g is None:
|
||||
try:
|
||||
g = open(tempname, 'w')
|
||||
except IOError as msg:
|
||||
f.close()
|
||||
err(tempname+': cannot create: '+
|
||||
str(msg)+'\n')
|
||||
return 1
|
||||
f.seek(0)
|
||||
lineno = 0
|
||||
initfixline()
|
||||
rep(filename + ':\n')
|
||||
continue # restart from the beginning
|
||||
rep(repr(lineno) + '\n')
|
||||
rep('< ' + line)
|
||||
rep('> ' + newline)
|
||||
if g is not None:
|
||||
g.write(newline)
|
||||
|
||||
# End of file
|
||||
if filename == '-': return 0 # Done in filter mode
|
||||
f.close()
|
||||
if not g: return 0 # No changes
|
||||
g.close()
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 0o7777)
|
||||
except OSError as msg:
|
||||
err(tempname + ': warning: chmod failed (' + str(msg) + ')\n')
|
||||
# Then make a backup of the original file as filename~
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except OSError as msg:
|
||||
err(filename + ': warning: backup failed (' + str(msg) + ')\n')
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except OSError as msg:
|
||||
err(filename + ': rename failed (' + str(msg) + ')\n')
|
||||
return 1
|
||||
# Return success
|
||||
return 0
|
||||
|
||||
# Tokenizing ANSI C (partly)
|
||||
|
||||
Identifier = '(struct )?[a-zA-Z_][a-zA-Z0-9_]+'
|
||||
String = r'"([^\n\\"]|\\.)*"'
|
||||
Char = r"'([^\n\\']|\\.)*'"
|
||||
CommentStart = r'/\*'
|
||||
CommentEnd = r'\*/'
|
||||
|
||||
Hexnumber = '0[xX][0-9a-fA-F]*[uUlL]*'
|
||||
Octnumber = '0[0-7]*[uUlL]*'
|
||||
Decnumber = '[1-9][0-9]*[uUlL]*'
|
||||
Intnumber = Hexnumber + '|' + Octnumber + '|' + Decnumber
|
||||
Exponent = '[eE][-+]?[0-9]+'
|
||||
Pointfloat = r'([0-9]+\.[0-9]*|\.[0-9]+)(' + Exponent + r')?'
|
||||
Expfloat = '[0-9]+' + Exponent
|
||||
Floatnumber = Pointfloat + '|' + Expfloat
|
||||
Number = Floatnumber + '|' + Intnumber
|
||||
|
||||
# Anything else is an operator -- don't list this explicitly because of '/*'
|
||||
|
||||
OutsideComment = (Identifier, Number, String, Char, CommentStart)
|
||||
OutsideCommentPattern = '(' + '|'.join(OutsideComment) + ')'
|
||||
OutsideCommentProgram = re.compile(OutsideCommentPattern)
|
||||
|
||||
InsideComment = (Identifier, Number, CommentEnd)
|
||||
InsideCommentPattern = '(' + '|'.join(InsideComment) + ')'
|
||||
InsideCommentProgram = re.compile(InsideCommentPattern)
|
||||
|
||||
def initfixline():
|
||||
global Program
|
||||
Program = OutsideCommentProgram
|
||||
|
||||
def fixline(line):
|
||||
global Program
|
||||
## print('-->', repr(line))
|
||||
i = 0
|
||||
while i < len(line):
|
||||
match = Program.search(line, i)
|
||||
if match is None: break
|
||||
i = match.start()
|
||||
found = match.group(0)
|
||||
## if Program is InsideCommentProgram: print(end='... ')
|
||||
## else: print(end=' ')
|
||||
## print(found)
|
||||
if len(found) == 2:
|
||||
if found == '/*':
|
||||
Program = InsideCommentProgram
|
||||
elif found == '*/':
|
||||
Program = OutsideCommentProgram
|
||||
n = len(found)
|
||||
if found in Dict:
|
||||
subst = Dict[found]
|
||||
if Program is InsideCommentProgram:
|
||||
if not Docomments:
|
||||
print('Found in comment:', found)
|
||||
i = i + n
|
||||
continue
|
||||
if found in NotInComment:
|
||||
## print(end='Ignored in comment: ')
|
||||
## print(found, '-->', subst)
|
||||
## print('Line:', line, end='')
|
||||
subst = found
|
||||
## else:
|
||||
## print(end='Substituting in comment: ')
|
||||
## print(found, '-->', subst)
|
||||
## print('Line:', line, end='')
|
||||
line = line[:i] + subst + line[i+n:]
|
||||
n = len(subst)
|
||||
i = i + n
|
||||
return line
|
||||
|
||||
Docomments = 0
|
||||
def setdocomments():
|
||||
global Docomments
|
||||
Docomments = 1
|
||||
|
||||
Reverse = 0
|
||||
def setreverse():
|
||||
global Reverse
|
||||
Reverse = (not Reverse)
|
||||
|
||||
Dict = {}
|
||||
NotInComment = {}
|
||||
def addsubst(substfile):
|
||||
try:
|
||||
fp = open(substfile, 'r')
|
||||
except IOError as msg:
|
||||
err(substfile + ': cannot read substfile: ' + str(msg) + '\n')
|
||||
sys.exit(1)
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
try:
|
||||
i = line.index('#')
|
||||
except ValueError:
|
||||
i = -1 # Happens to delete trailing \n
|
||||
words = line[:i].split()
|
||||
if not words: continue
|
||||
if len(words) == 3 and words[0] == 'struct':
|
||||
words[:2] = [words[0] + ' ' + words[1]]
|
||||
elif len(words) != 2:
|
||||
err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line))
|
||||
continue
|
||||
if Reverse:
|
||||
[value, key] = words
|
||||
else:
|
||||
[key, value] = words
|
||||
if value[0] == '*':
|
||||
value = value[1:]
|
||||
if key[0] == '*':
|
||||
key = key[1:]
|
||||
NotInComment[key] = value
|
||||
if key in Dict:
|
||||
err('%s:%r: warning: overriding: %r %r\n' % (substfile, lineno, key, value))
|
||||
err('%s:%r: warning: previous: %r\n' % (substfile, lineno, Dict[key]))
|
||||
Dict[key] = value
|
||||
fp.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
378
Tools/scripts/fixdiv.py
Normal file
378
Tools/scripts/fixdiv.py
Normal file
|
|
@ -0,0 +1,378 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""fixdiv - tool to fix division operators.
|
||||
|
||||
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
|
||||
This runs the script `yourscript.py' while writing warning messages
|
||||
about all uses of the classic division operator to the file
|
||||
`warnings'. The warnings look like this:
|
||||
|
||||
<file>:<line>: DeprecationWarning: classic <type> division
|
||||
|
||||
The warnings are written to stderr, so you must use `2>' for the I/O
|
||||
redirect. I know of no way to redirect stderr on Windows in a DOS
|
||||
box, so you will have to modify the script to set sys.stderr to some
|
||||
kind of log file if you want to do this on Windows.
|
||||
|
||||
The warnings are not limited to the script; modules imported by the
|
||||
script may also trigger warnings. In fact a useful technique is to
|
||||
write a test script specifically intended to exercise all code in a
|
||||
particular module or set of modules.
|
||||
|
||||
Then run `python fixdiv.py warnings'. This first reads the warnings,
|
||||
looking for classic division warnings, and sorts them by file name and
|
||||
line number. Then, for each file that received at least one warning,
|
||||
it parses the file and tries to match the warnings up to the division
|
||||
operators found in the source code. If it is successful, it writes
|
||||
its findings to stdout, preceded by a line of dashes and a line of the
|
||||
form:
|
||||
|
||||
Index: <file>
|
||||
|
||||
If the only findings found are suggestions to change a / operator into
|
||||
a // operator, the output is acceptable input for the Unix 'patch'
|
||||
program.
|
||||
|
||||
Here are the possible messages on stdout (N stands for a line number):
|
||||
|
||||
- A plain-diff-style change ('NcN', a line marked by '<', a line
|
||||
containing '---', and a line marked by '>'):
|
||||
|
||||
A / operator was found that should be changed to //. This is the
|
||||
recommendation when only int and/or long arguments were seen.
|
||||
|
||||
- 'True division / operator at line N' and a line marked by '=':
|
||||
|
||||
A / operator was found that can remain unchanged. This is the
|
||||
recommendation when only float and/or complex arguments were seen.
|
||||
|
||||
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
|
||||
|
||||
A / operator was found for which int or long as well as float or
|
||||
complex arguments were seen. This is highly unlikely; if it occurs,
|
||||
you may have to restructure the code to keep the classic semantics,
|
||||
or maybe you don't care about the classic semantics.
|
||||
|
||||
- 'No conclusive evidence on line N', line marked by '*':
|
||||
|
||||
A / operator was found for which no warnings were seen. This could
|
||||
be code that was never executed, or code that was only executed
|
||||
with user-defined objects as arguments. You will have to
|
||||
investigate further. Note that // can be overloaded separately from
|
||||
/, using __floordiv__. True division can also be separately
|
||||
overloaded, using __truediv__. Classic division should be the same
|
||||
as either of those. (XXX should I add a warning for division on
|
||||
user-defined objects, to disambiguate this case from code that was
|
||||
never executed?)
|
||||
|
||||
- 'Phantom ... warnings for line N', line marked by '*':
|
||||
|
||||
A warning was seen for a line not containing a / operator. The most
|
||||
likely cause is a warning about code executed by 'exec' or eval()
|
||||
(see note below), or an indirect invocation of the / operator, for
|
||||
example via the div() function in the operator module. It could
|
||||
also be caused by a change to the file between the time the test
|
||||
script was run to collect warnings and the time fixdiv was run.
|
||||
|
||||
- 'More than one / operator in line N'; or
|
||||
'More than one / operator per statement in lines N-N':
|
||||
|
||||
The scanner found more than one / operator on a single line, or in a
|
||||
statement split across multiple lines. Because the warnings
|
||||
framework doesn't (and can't) show the offset within the line, and
|
||||
the code generator doesn't always give the correct line number for
|
||||
operations in a multi-line statement, we can't be sure whether all
|
||||
operators in the statement were executed. To be on the safe side,
|
||||
by default a warning is issued about this case. In practice, these
|
||||
cases are usually safe, and the -m option suppresses these warning.
|
||||
|
||||
- 'Can't find the / operator in line N', line marked by '*':
|
||||
|
||||
This really shouldn't happen. It means that the tokenize module
|
||||
reported a '/' operator but the line it returns didn't contain a '/'
|
||||
character at the indicated position.
|
||||
|
||||
- 'Bad warning for line N: XYZ', line marked by '*':
|
||||
|
||||
This really shouldn't happen. It means that a 'classic XYZ
|
||||
division' warning was read with XYZ being something other than
|
||||
'int', 'long', 'float', or 'complex'.
|
||||
|
||||
Notes:
|
||||
|
||||
- The augmented assignment operator /= is handled the same way as the
|
||||
/ operator.
|
||||
|
||||
- This tool never looks at the // operator; no warnings are ever
|
||||
generated for use of this operator.
|
||||
|
||||
- This tool never looks at the / operator when a future division
|
||||
statement is in effect; no warnings are generated in this case, and
|
||||
because the tool only looks at files for which at least one classic
|
||||
division warning was seen, it will never look at files containing a
|
||||
future division statement.
|
||||
|
||||
- Warnings may be issued for code not read from a file, but executed
|
||||
using the exec() or eval() functions. These may have
|
||||
<string> in the filename position, in which case the fixdiv script
|
||||
will attempt and fail to open a file named '<string>' and issue a
|
||||
warning about this failure; or these may be reported as 'Phantom'
|
||||
warnings (see above). You're on your own to deal with these. You
|
||||
could make all recommended changes and add a future division
|
||||
statement to all affected files, and then re-run the test script; it
|
||||
should not issue any warnings. If there are any, and you have a
|
||||
hard time tracking down where they are generated, you can use the
|
||||
-Werror option to force an error instead of a first warning,
|
||||
generating a traceback.
|
||||
|
||||
- The tool should be run from the same directory as that from which
|
||||
the original script was run, otherwise it won't be able to open
|
||||
files given by relative pathnames.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import getopt
|
||||
import re
|
||||
import tokenize
|
||||
|
||||
multi_ok = 0
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "hm")
|
||||
except getopt.error as msg:
|
||||
usage(msg)
|
||||
return 2
|
||||
for o, a in opts:
|
||||
if o == "-h":
|
||||
print(__doc__)
|
||||
return
|
||||
if o == "-m":
|
||||
global multi_ok
|
||||
multi_ok = 1
|
||||
if not args:
|
||||
usage("at least one file argument is required")
|
||||
return 2
|
||||
if args[1:]:
|
||||
sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
|
||||
warnings = readwarnings(args[0])
|
||||
if warnings is None:
|
||||
return 1
|
||||
files = list(warnings.keys())
|
||||
if not files:
|
||||
print("No classic division warnings read from", args[0])
|
||||
return
|
||||
files.sort()
|
||||
exit = None
|
||||
for filename in files:
|
||||
x = process(filename, warnings[filename])
|
||||
exit = exit or x
|
||||
return exit
|
||||
|
||||
def usage(msg):
|
||||
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
|
||||
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
|
||||
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
|
||||
|
||||
PATTERN = (r"^(.+?):(\d+): DeprecationWarning: "
|
||||
r"classic (int|long|float|complex) division$")
|
||||
|
||||
def readwarnings(warningsfile):
|
||||
prog = re.compile(PATTERN)
|
||||
try:
|
||||
f = open(warningsfile)
|
||||
except IOError as msg:
|
||||
sys.stderr.write("can't open: %s\n" % msg)
|
||||
return
|
||||
warnings = {}
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line:
|
||||
break
|
||||
m = prog.match(line)
|
||||
if not m:
|
||||
if line.find("division") >= 0:
|
||||
sys.stderr.write("Warning: ignored input " + line)
|
||||
continue
|
||||
filename, lineno, what = m.groups()
|
||||
list = warnings.get(filename)
|
||||
if list is None:
|
||||
warnings[filename] = list = []
|
||||
list.append((int(lineno), sys.intern(what)))
|
||||
f.close()
|
||||
return warnings
|
||||
|
||||
def process(filename, list):
|
||||
print("-"*70)
|
||||
assert list # if this fails, readwarnings() is broken
|
||||
try:
|
||||
fp = open(filename)
|
||||
except IOError as msg:
|
||||
sys.stderr.write("can't open: %s\n" % msg)
|
||||
return 1
|
||||
print("Index:", filename)
|
||||
f = FileContext(fp)
|
||||
list.sort()
|
||||
index = 0 # list[:index] has been processed, list[index:] is still to do
|
||||
g = tokenize.generate_tokens(f.readline)
|
||||
while 1:
|
||||
startlineno, endlineno, slashes = lineinfo = scanline(g)
|
||||
if startlineno is None:
|
||||
break
|
||||
assert startlineno <= endlineno is not None
|
||||
orphans = []
|
||||
while index < len(list) and list[index][0] < startlineno:
|
||||
orphans.append(list[index])
|
||||
index += 1
|
||||
if orphans:
|
||||
reportphantomwarnings(orphans, f)
|
||||
warnings = []
|
||||
while index < len(list) and list[index][0] <= endlineno:
|
||||
warnings.append(list[index])
|
||||
index += 1
|
||||
if not slashes and not warnings:
|
||||
pass
|
||||
elif slashes and not warnings:
|
||||
report(slashes, "No conclusive evidence")
|
||||
elif warnings and not slashes:
|
||||
reportphantomwarnings(warnings, f)
|
||||
else:
|
||||
if len(slashes) > 1:
|
||||
if not multi_ok:
|
||||
rows = []
|
||||
lastrow = None
|
||||
for (row, col), line in slashes:
|
||||
if row == lastrow:
|
||||
continue
|
||||
rows.append(row)
|
||||
lastrow = row
|
||||
assert rows
|
||||
if len(rows) == 1:
|
||||
print("*** More than one / operator in line", rows[0])
|
||||
else:
|
||||
print("*** More than one / operator per statement", end=' ')
|
||||
print("in lines %d-%d" % (rows[0], rows[-1]))
|
||||
intlong = []
|
||||
floatcomplex = []
|
||||
bad = []
|
||||
for lineno, what in warnings:
|
||||
if what in ("int", "long"):
|
||||
intlong.append(what)
|
||||
elif what in ("float", "complex"):
|
||||
floatcomplex.append(what)
|
||||
else:
|
||||
bad.append(what)
|
||||
lastrow = None
|
||||
for (row, col), line in slashes:
|
||||
if row == lastrow:
|
||||
continue
|
||||
lastrow = row
|
||||
line = chop(line)
|
||||
if line[col:col+1] != "/":
|
||||
print("*** Can't find the / operator in line %d:" % row)
|
||||
print("*", line)
|
||||
continue
|
||||
if bad:
|
||||
print("*** Bad warning for line %d:" % row, bad)
|
||||
print("*", line)
|
||||
elif intlong and not floatcomplex:
|
||||
print("%dc%d" % (row, row))
|
||||
print("<", line)
|
||||
print("---")
|
||||
print(">", line[:col] + "/" + line[col:])
|
||||
elif floatcomplex and not intlong:
|
||||
print("True division / operator at line %d:" % row)
|
||||
print("=", line)
|
||||
elif intlong and floatcomplex:
|
||||
print("*** Ambiguous / operator (%s, %s) at line %d:" % (
|
||||
"|".join(intlong), "|".join(floatcomplex), row))
|
||||
print("?", line)
|
||||
fp.close()
|
||||
|
||||
def reportphantomwarnings(warnings, f):
|
||||
blocks = []
|
||||
lastrow = None
|
||||
lastblock = None
|
||||
for row, what in warnings:
|
||||
if row != lastrow:
|
||||
lastblock = [row]
|
||||
blocks.append(lastblock)
|
||||
lastblock.append(what)
|
||||
for block in blocks:
|
||||
row = block[0]
|
||||
whats = "/".join(block[1:])
|
||||
print("*** Phantom %s warnings for line %d:" % (whats, row))
|
||||
f.report(row, mark="*")
|
||||
|
||||
def report(slashes, message):
|
||||
lastrow = None
|
||||
for (row, col), line in slashes:
|
||||
if row != lastrow:
|
||||
print("*** %s on line %d:" % (message, row))
|
||||
print("*", chop(line))
|
||||
lastrow = row
|
||||
|
||||
class FileContext:
|
||||
def __init__(self, fp, window=5, lineno=1):
|
||||
self.fp = fp
|
||||
self.window = 5
|
||||
self.lineno = 1
|
||||
self.eoflookahead = 0
|
||||
self.lookahead = []
|
||||
self.buffer = []
|
||||
def fill(self):
|
||||
while len(self.lookahead) < self.window and not self.eoflookahead:
|
||||
line = self.fp.readline()
|
||||
if not line:
|
||||
self.eoflookahead = 1
|
||||
break
|
||||
self.lookahead.append(line)
|
||||
def readline(self):
|
||||
self.fill()
|
||||
if not self.lookahead:
|
||||
return ""
|
||||
line = self.lookahead.pop(0)
|
||||
self.buffer.append(line)
|
||||
self.lineno += 1
|
||||
return line
|
||||
def __getitem__(self, index):
|
||||
self.fill()
|
||||
bufstart = self.lineno - len(self.buffer)
|
||||
lookend = self.lineno + len(self.lookahead)
|
||||
if bufstart <= index < self.lineno:
|
||||
return self.buffer[index - bufstart]
|
||||
if self.lineno <= index < lookend:
|
||||
return self.lookahead[index - self.lineno]
|
||||
raise KeyError
|
||||
def report(self, first, last=None, mark="*"):
|
||||
if last is None:
|
||||
last = first
|
||||
for i in range(first, last+1):
|
||||
try:
|
||||
line = self[first]
|
||||
except KeyError:
|
||||
line = "<missing line>"
|
||||
print(mark, chop(line))
|
||||
|
||||
def scanline(g):
|
||||
slashes = []
|
||||
startlineno = None
|
||||
endlineno = None
|
||||
for type, token, start, end, line in g:
|
||||
endlineno = end[0]
|
||||
if startlineno is None:
|
||||
startlineno = endlineno
|
||||
if token in ("/", "/="):
|
||||
slashes.append((start, line))
|
||||
if type == tokenize.NEWLINE:
|
||||
break
|
||||
return startlineno, endlineno, slashes
|
||||
|
||||
def chop(line):
|
||||
if line.endswith("\n"):
|
||||
return line[:-1]
|
||||
else:
|
||||
return line
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
49
Tools/scripts/fixheader.py
Normal file
49
Tools/scripts/fixheader.py
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Add some standard cpp magic to a header file
|
||||
|
||||
import sys
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
for filename in args:
|
||||
process(filename)
|
||||
|
||||
def process(filename):
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError as msg:
|
||||
sys.stderr.write('%s: can\'t open: %s\n' % (filename, str(msg)))
|
||||
return
|
||||
data = f.read()
|
||||
f.close()
|
||||
if data[:2] != '/*':
|
||||
sys.stderr.write('%s does not begin with C comment\n' % filename)
|
||||
return
|
||||
try:
|
||||
f = open(filename, 'w')
|
||||
except IOError as msg:
|
||||
sys.stderr.write('%s: can\'t write: %s\n' % (filename, str(msg)))
|
||||
return
|
||||
sys.stderr.write('Processing %s ...\n' % filename)
|
||||
magic = 'Py_'
|
||||
for c in filename:
|
||||
if ord(c)<=0x80 and c.isalnum():
|
||||
magic = magic + c.upper()
|
||||
else: magic = magic + '_'
|
||||
sys.stdout = f
|
||||
print('#ifndef', magic)
|
||||
print('#define', magic)
|
||||
print('#ifdef __cplusplus')
|
||||
print('extern "C" {')
|
||||
print('#endif')
|
||||
print()
|
||||
f.write(data)
|
||||
print()
|
||||
print('#ifdef __cplusplus')
|
||||
print('}')
|
||||
print('#endif')
|
||||
print('#endif /*', '!'+magic, '*/')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
113
Tools/scripts/fixnotice.py
Normal file
113
Tools/scripts/fixnotice.py
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""(Ostensibly) fix copyright notices in files.
|
||||
|
||||
Actually, this script will simply replace a block of text in a file from one
|
||||
string to another. It will only do this once though, i.e. not globally
|
||||
throughout the file. It writes a backup file and then does an os.rename()
|
||||
dance for atomicity.
|
||||
|
||||
Usage: fixnotices.py [options] [filenames]
|
||||
Options:
|
||||
-h / --help
|
||||
Print this message and exit
|
||||
|
||||
--oldnotice=file
|
||||
Use the notice in the file as the old (to be replaced) string, instead
|
||||
of the hard coded value in the script.
|
||||
|
||||
--newnotice=file
|
||||
Use the notice in the file as the new (replacement) string, instead of
|
||||
the hard coded value in the script.
|
||||
|
||||
--dry-run
|
||||
Don't actually make the changes, but print out the list of files that
|
||||
would change. When used with -v, a status will be printed for every
|
||||
file.
|
||||
|
||||
-v / --verbose
|
||||
Print a message for every file looked at, indicating whether the file
|
||||
is changed or not.
|
||||
"""
|
||||
|
||||
OLD_NOTICE = """/***********************************************************
|
||||
Copyright (c) 2000, BeOpen.com.
|
||||
Copyright (c) 1995-2000, Corporation for National Research Initiatives.
|
||||
Copyright (c) 1990-1995, Stichting Mathematisch Centrum.
|
||||
All rights reserved.
|
||||
|
||||
See the file "Misc/COPYRIGHT" for information on usage and
|
||||
redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
||||
******************************************************************/
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
NEW_NOTICE = ""
|
||||
DRYRUN = 0
|
||||
VERBOSE = 0
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print(__doc__ % globals())
|
||||
if msg:
|
||||
print(msg)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
def main():
|
||||
global DRYRUN, OLD_NOTICE, NEW_NOTICE, VERBOSE
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'hv',
|
||||
['help', 'oldnotice=', 'newnotice=',
|
||||
'dry-run', 'verbose'])
|
||||
except getopt.error as msg:
|
||||
usage(1, msg)
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-v', '--verbose'):
|
||||
VERBOSE = 1
|
||||
elif opt == '--dry-run':
|
||||
DRYRUN = 1
|
||||
elif opt == '--oldnotice':
|
||||
fp = open(arg)
|
||||
OLD_NOTICE = fp.read()
|
||||
fp.close()
|
||||
elif opt == '--newnotice':
|
||||
fp = open(arg)
|
||||
NEW_NOTICE = fp.read()
|
||||
fp.close()
|
||||
|
||||
for arg in args:
|
||||
process(arg)
|
||||
|
||||
|
||||
def process(file):
|
||||
f = open(file)
|
||||
data = f.read()
|
||||
f.close()
|
||||
i = data.find(OLD_NOTICE)
|
||||
if i < 0:
|
||||
if VERBOSE:
|
||||
print('no change:', file)
|
||||
return
|
||||
elif DRYRUN or VERBOSE:
|
||||
print(' change:', file)
|
||||
if DRYRUN:
|
||||
# Don't actually change the file
|
||||
return
|
||||
data = data[:i] + NEW_NOTICE + data[i+len(OLD_NOTICE):]
|
||||
new = file + ".new"
|
||||
backup = file + ".bak"
|
||||
f = open(new, "w")
|
||||
f.write(data)
|
||||
f.close()
|
||||
os.rename(file, backup)
|
||||
os.rename(new, file)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
33
Tools/scripts/fixps.py
Normal file
33
Tools/scripts/fixps.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Fix Python script(s) to reference the interpreter via /usr/bin/env python.
|
||||
# Warning: this overwrites the file without making a backup.
|
||||
|
||||
import sys
|
||||
import re
|
||||
|
||||
|
||||
def main():
|
||||
for filename in sys.argv[1:]:
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError as msg:
|
||||
print(filename, ': can\'t open :', msg)
|
||||
continue
|
||||
line = f.readline()
|
||||
if not re.match('^#! */usr/local/bin/python', line):
|
||||
print(filename, ': not a /usr/local/bin/python script')
|
||||
f.close()
|
||||
continue
|
||||
rest = f.read()
|
||||
f.close()
|
||||
line = re.sub('/usr/local/bin/python',
|
||||
'/usr/bin/env python', line)
|
||||
print(filename, ':', repr(line))
|
||||
f = open(filename, "w")
|
||||
f.write(line)
|
||||
f.write(rest)
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
62
Tools/scripts/generate_opcode_h.py
Normal file
62
Tools/scripts/generate_opcode_h.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
# This script generates the opcode.h header file.
|
||||
|
||||
import sys
|
||||
import tokenize
|
||||
|
||||
header = """/* Auto-generated by Tools/scripts/generate_opcode_h.py */
|
||||
#ifndef Py_OPCODE_H
|
||||
#define Py_OPCODE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* Instruction opcodes for compiled code */
|
||||
"""
|
||||
|
||||
footer = """
|
||||
/* EXCEPT_HANDLER is a special, implicit block type which is created when
|
||||
entering an except handler. It is not an opcode but we define it here
|
||||
as we want it to be available to both frameobject.c and ceval.c, while
|
||||
remaining private.*/
|
||||
#define EXCEPT_HANDLER 257
|
||||
|
||||
|
||||
enum cmp_op {PyCmp_LT=Py_LT, PyCmp_LE=Py_LE, PyCmp_EQ=Py_EQ, PyCmp_NE=Py_NE,
|
||||
PyCmp_GT=Py_GT, PyCmp_GE=Py_GE, PyCmp_IN, PyCmp_NOT_IN,
|
||||
PyCmp_IS, PyCmp_IS_NOT, PyCmp_EXC_MATCH, PyCmp_BAD};
|
||||
|
||||
#define HAS_ARG(op) ((op) >= HAVE_ARGUMENT)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_OPCODE_H */
|
||||
"""
|
||||
|
||||
|
||||
def main(opcode_py, outfile='Include/opcode.h'):
|
||||
opcode = {}
|
||||
if hasattr(tokenize, 'open'):
|
||||
fp = tokenize.open(opcode_py) # Python 3.2+
|
||||
else:
|
||||
fp = open(opcode_py) # Python 2.7
|
||||
with fp:
|
||||
code = fp.read()
|
||||
exec(code, opcode)
|
||||
opmap = opcode['opmap']
|
||||
with open(outfile, 'w') as fobj:
|
||||
fobj.write(header)
|
||||
for name in opcode['opname']:
|
||||
if name in opmap:
|
||||
fobj.write("#define %-23s %3s\n" % (name, opmap[name]))
|
||||
if name == 'POP_EXCEPT': # Special entry for HAVE_ARGUMENT
|
||||
fobj.write("#define %-23s %3d\n" %
|
||||
('HAVE_ARGUMENT', opcode['HAVE_ARGUMENT']))
|
||||
fobj.write(footer)
|
||||
|
||||
print("%s regenerated from %s" % (outfile, opcode_py))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1], sys.argv[2])
|
||||
84
Tools/scripts/get-remote-certificate.py
Normal file
84
Tools/scripts/get-remote-certificate.py
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# fetch the certificate that the server(s) are providing in PEM form
|
||||
#
|
||||
# args are HOST:PORT [, HOST:PORT...]
|
||||
#
|
||||
# By Bill Janssen.
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
|
||||
def fetch_server_certificate (host, port):
|
||||
|
||||
def subproc(cmd):
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
|
||||
status = proc.wait()
|
||||
output = proc.stdout.read()
|
||||
return status, output
|
||||
|
||||
def strip_to_x509_cert(certfile_contents, outfile=None):
|
||||
m = re.search(br"^([-]+BEGIN CERTIFICATE[-]+[\r]*\n"
|
||||
br".*[\r]*^[-]+END CERTIFICATE[-]+)$",
|
||||
certfile_contents, re.MULTILINE | re.DOTALL)
|
||||
if not m:
|
||||
return None
|
||||
else:
|
||||
tn = tempfile.mktemp()
|
||||
fp = open(tn, "wb")
|
||||
fp.write(m.group(1) + b"\n")
|
||||
fp.close()
|
||||
try:
|
||||
tn2 = (outfile or tempfile.mktemp())
|
||||
status, output = subproc(r'openssl x509 -in "%s" -out "%s"' %
|
||||
(tn, tn2))
|
||||
if status != 0:
|
||||
raise RuntimeError('OpenSSL x509 failed with status %s and '
|
||||
'output: %r' % (status, output))
|
||||
fp = open(tn2, 'rb')
|
||||
data = fp.read()
|
||||
fp.close()
|
||||
os.unlink(tn2)
|
||||
return data
|
||||
finally:
|
||||
os.unlink(tn)
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
tfile = tempfile.mktemp()
|
||||
fp = open(tfile, "w")
|
||||
fp.write("quit\n")
|
||||
fp.close()
|
||||
try:
|
||||
status, output = subproc(
|
||||
'openssl s_client -connect "%s:%s" -showcerts < "%s"' %
|
||||
(host, port, tfile))
|
||||
finally:
|
||||
os.unlink(tfile)
|
||||
else:
|
||||
status, output = subproc(
|
||||
'openssl s_client -connect "%s:%s" -showcerts < /dev/null' %
|
||||
(host, port))
|
||||
if status != 0:
|
||||
raise RuntimeError('OpenSSL connect failed with status %s and '
|
||||
'output: %r' % (status, output))
|
||||
certtext = strip_to_x509_cert(output)
|
||||
if not certtext:
|
||||
raise ValueError("Invalid response received from server at %s:%s" %
|
||||
(host, port))
|
||||
return certtext
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
sys.stderr.write(
|
||||
"Usage: %s HOSTNAME:PORTNUMBER [, HOSTNAME:PORTNUMBER...]\n" %
|
||||
sys.argv[0])
|
||||
sys.exit(1)
|
||||
for arg in sys.argv[1:]:
|
||||
host, port = arg.split(":")
|
||||
sys.stdout.buffer.write(fetch_server_certificate(host, int(port)))
|
||||
sys.exit(0)
|
||||
25
Tools/scripts/google.py
Normal file
25
Tools/scripts/google.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Script to search with Google
|
||||
|
||||
Usage:
|
||||
python3 google.py [search terms]
|
||||
"""
|
||||
|
||||
import sys
|
||||
import urllib.parse
|
||||
import webbrowser
|
||||
|
||||
|
||||
def main(args):
|
||||
def quote(arg):
|
||||
if ' ' in arg:
|
||||
arg = '"%s"' % arg
|
||||
return urllib.parse.quote_plus(arg)
|
||||
|
||||
qstring = '+'.join(quote(arg) for arg in args)
|
||||
url = urllib.parse.urljoin('https://www.google.com/search', '?q=' + qstring)
|
||||
webbrowser.open(url)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
||||
85
Tools/scripts/gprof2html.py
Normal file
85
Tools/scripts/gprof2html.py
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Transform gprof(1) output into useful HTML."""
|
||||
|
||||
import html
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import webbrowser
|
||||
|
||||
header = """\
|
||||
<html>
|
||||
<head>
|
||||
<title>gprof output (%s)</title>
|
||||
</head>
|
||||
<body>
|
||||
<pre>
|
||||
"""
|
||||
|
||||
trailer = """\
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
def add_escapes(filename):
|
||||
with open(filename) as fp:
|
||||
for line in fp:
|
||||
yield html.escape(line)
|
||||
|
||||
|
||||
def main():
|
||||
filename = "gprof.out"
|
||||
if sys.argv[1:]:
|
||||
filename = sys.argv[1]
|
||||
outputfilename = filename + ".html"
|
||||
input = add_escapes(filename)
|
||||
output = open(outputfilename, "w")
|
||||
output.write(header % filename)
|
||||
for line in input:
|
||||
output.write(line)
|
||||
if line.startswith(" time"):
|
||||
break
|
||||
labels = {}
|
||||
for line in input:
|
||||
m = re.match(r"(.* )(\w+)\n", line)
|
||||
if not m:
|
||||
output.write(line)
|
||||
break
|
||||
stuff, fname = m.group(1, 2)
|
||||
labels[fname] = fname
|
||||
output.write('%s<a name="flat:%s" href="#call:%s">%s</a>\n' %
|
||||
(stuff, fname, fname, fname))
|
||||
for line in input:
|
||||
output.write(line)
|
||||
if line.startswith("index % time"):
|
||||
break
|
||||
for line in input:
|
||||
m = re.match(r"(.* )(\w+)(( <cycle.*>)? \[\d+\])\n", line)
|
||||
if not m:
|
||||
output.write(line)
|
||||
if line.startswith("Index by function name"):
|
||||
break
|
||||
continue
|
||||
prefix, fname, suffix = m.group(1, 2, 3)
|
||||
if fname not in labels:
|
||||
output.write(line)
|
||||
continue
|
||||
if line.startswith("["):
|
||||
output.write('%s<a name="call:%s" href="#flat:%s">%s</a>%s\n' %
|
||||
(prefix, fname, fname, fname, suffix))
|
||||
else:
|
||||
output.write('%s<a href="#call:%s">%s</a>%s\n' %
|
||||
(prefix, fname, fname, suffix))
|
||||
for line in input:
|
||||
for part in re.findall(r"(\w+(?:\.c)?|\W+)", line):
|
||||
if part in labels:
|
||||
part = '<a href="#call:%s">%s</a>' % (part, part)
|
||||
output.write(part)
|
||||
output.write(trailer)
|
||||
output.close()
|
||||
webbrowser.open("file:" + os.path.abspath(outputfilename))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
172
Tools/scripts/h2py.py
Normal file
172
Tools/scripts/h2py.py
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Read #define's and translate to Python code.
|
||||
# Handle #include statements.
|
||||
# Handle #define macros with one argument.
|
||||
# Anything that isn't recognized or doesn't translate into valid
|
||||
# Python is ignored.
|
||||
|
||||
# Without filename arguments, acts as a filter.
|
||||
# If one or more filenames are given, output is written to corresponding
|
||||
# filenames in the local directory, translated to all uppercase, with
|
||||
# the extension replaced by ".py".
|
||||
|
||||
# By passing one or more options of the form "-i regular_expression"
|
||||
# you can specify additional strings to be ignored. This is useful
|
||||
# e.g. to ignore casts to u_long: simply specify "-i '(u_long)'".
|
||||
|
||||
# XXX To do:
|
||||
# - turn trailing C comments into Python comments
|
||||
# - turn C Boolean operators "&& || !" into Python "and or not"
|
||||
# - what to do about #if(def)?
|
||||
# - what to do about macros with multiple parameters?
|
||||
|
||||
import sys, re, getopt, os
|
||||
|
||||
p_define = re.compile(r'^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
|
||||
|
||||
p_macro = re.compile(
|
||||
r'^[\t ]*#[\t ]*define[\t ]+'
|
||||
r'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
|
||||
|
||||
p_include = re.compile(r'^[\t ]*#[\t ]*include[\t ]+<([^>\n]+)>')
|
||||
|
||||
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
|
||||
p_cpp_comment = re.compile('//.*')
|
||||
|
||||
ignores = [p_comment, p_cpp_comment]
|
||||
|
||||
p_char = re.compile(r"'(\\.[^\\]*|[^\\])'")
|
||||
|
||||
p_hex = re.compile(r"0x([0-9a-fA-F]+)L?")
|
||||
|
||||
filedict = {}
|
||||
importable = {}
|
||||
|
||||
try:
|
||||
searchdirs=os.environ['include'].split(';')
|
||||
except KeyError:
|
||||
try:
|
||||
searchdirs=os.environ['INCLUDE'].split(';')
|
||||
except KeyError:
|
||||
searchdirs=['/usr/include']
|
||||
try:
|
||||
searchdirs.insert(0, os.path.join('/usr/include',
|
||||
os.environ['MULTIARCH']))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def main():
|
||||
global filedict
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'i:')
|
||||
for o, a in opts:
|
||||
if o == '-i':
|
||||
ignores.append(re.compile(a))
|
||||
if not args:
|
||||
args = ['-']
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
sys.stdout.write('# Generated by h2py from stdin\n')
|
||||
process(sys.stdin, sys.stdout)
|
||||
else:
|
||||
fp = open(filename, 'r')
|
||||
outfile = os.path.basename(filename)
|
||||
i = outfile.rfind('.')
|
||||
if i > 0: outfile = outfile[:i]
|
||||
modname = outfile.upper()
|
||||
outfile = modname + '.py'
|
||||
outfp = open(outfile, 'w')
|
||||
outfp.write('# Generated by h2py from %s\n' % filename)
|
||||
filedict = {}
|
||||
for dir in searchdirs:
|
||||
if filename[:len(dir)] == dir:
|
||||
filedict[filename[len(dir)+1:]] = None # no '/' trailing
|
||||
importable[filename[len(dir)+1:]] = modname
|
||||
break
|
||||
process(fp, outfp)
|
||||
outfp.close()
|
||||
fp.close()
|
||||
|
||||
def pytify(body):
|
||||
# replace ignored patterns by spaces
|
||||
for p in ignores:
|
||||
body = p.sub(' ', body)
|
||||
# replace char literals by ord(...)
|
||||
body = p_char.sub("ord('\\1')", body)
|
||||
# Compute negative hexadecimal constants
|
||||
start = 0
|
||||
UMAX = 2*(sys.maxsize+1)
|
||||
while 1:
|
||||
m = p_hex.search(body, start)
|
||||
if not m: break
|
||||
s,e = m.span()
|
||||
val = int(body[slice(*m.span(1))], 16)
|
||||
if val > sys.maxsize:
|
||||
val -= UMAX
|
||||
body = body[:s] + "(" + str(val) + ")" + body[e:]
|
||||
start = s + 1
|
||||
return body
|
||||
|
||||
def process(fp, outfp, env = {}):
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
match = p_define.match(line)
|
||||
if match:
|
||||
# gobble up continuation lines
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = fp.readline()
|
||||
if not nextline: break
|
||||
lineno = lineno + 1
|
||||
line = line + nextline
|
||||
name = match.group(1)
|
||||
body = line[match.end():]
|
||||
body = pytify(body)
|
||||
ok = 0
|
||||
stmt = '%s = %s\n' % (name, body.strip())
|
||||
try:
|
||||
exec(stmt, env)
|
||||
except:
|
||||
sys.stderr.write('Skipping: %s' % stmt)
|
||||
else:
|
||||
outfp.write(stmt)
|
||||
match = p_macro.match(line)
|
||||
if match:
|
||||
macro, arg = match.group(1, 2)
|
||||
body = line[match.end():]
|
||||
body = pytify(body)
|
||||
stmt = 'def %s(%s): return %s\n' % (macro, arg, body)
|
||||
try:
|
||||
exec(stmt, env)
|
||||
except:
|
||||
sys.stderr.write('Skipping: %s' % stmt)
|
||||
else:
|
||||
outfp.write(stmt)
|
||||
match = p_include.match(line)
|
||||
if match:
|
||||
regs = match.regs
|
||||
a, b = regs[1]
|
||||
filename = line[a:b]
|
||||
if filename in importable:
|
||||
outfp.write('from %s import *\n' % importable[filename])
|
||||
elif filename not in filedict:
|
||||
filedict[filename] = None
|
||||
inclfp = None
|
||||
for dir in searchdirs:
|
||||
try:
|
||||
inclfp = open(dir + '/' + filename)
|
||||
break
|
||||
except IOError:
|
||||
pass
|
||||
if inclfp:
|
||||
outfp.write(
|
||||
'\n# Included from %s\n' % filename)
|
||||
process(inclfp, outfp, env)
|
||||
else:
|
||||
sys.stderr.write('Warning - could not find file %s\n' %
|
||||
filename)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
265
Tools/scripts/highlight.py
Normal file
265
Tools/scripts/highlight.py
Normal file
|
|
@ -0,0 +1,265 @@
|
|||
#!/usr/bin/env python3
|
||||
'''Add syntax highlighting to Python source code'''
|
||||
|
||||
__author__ = 'Raymond Hettinger'
|
||||
|
||||
import builtins
|
||||
import functools
|
||||
import html as html_module
|
||||
import keyword
|
||||
import re
|
||||
import tokenize
|
||||
|
||||
#### Analyze Python Source #################################
|
||||
|
||||
def is_builtin(s):
|
||||
'Return True if s is the name of a builtin'
|
||||
return hasattr(builtins, s)
|
||||
|
||||
def combine_range(lines, start, end):
|
||||
'Join content from a range of lines between start and end'
|
||||
(srow, scol), (erow, ecol) = start, end
|
||||
if srow == erow:
|
||||
return lines[srow-1][scol:ecol], end
|
||||
rows = [lines[srow-1][scol:]] + lines[srow: erow-1] + [lines[erow-1][:ecol]]
|
||||
return ''.join(rows), end
|
||||
|
||||
def analyze_python(source):
|
||||
'''Generate and classify chunks of Python for syntax highlighting.
|
||||
Yields tuples in the form: (category, categorized_text).
|
||||
'''
|
||||
lines = source.splitlines(True)
|
||||
lines.append('')
|
||||
readline = functools.partial(next, iter(lines), '')
|
||||
kind = tok_str = ''
|
||||
tok_type = tokenize.COMMENT
|
||||
written = (1, 0)
|
||||
for tok in tokenize.generate_tokens(readline):
|
||||
prev_tok_type, prev_tok_str = tok_type, tok_str
|
||||
tok_type, tok_str, (srow, scol), (erow, ecol), logical_lineno = tok
|
||||
kind = ''
|
||||
if tok_type == tokenize.COMMENT:
|
||||
kind = 'comment'
|
||||
elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;@':
|
||||
kind = 'operator'
|
||||
elif tok_type == tokenize.STRING:
|
||||
kind = 'string'
|
||||
if prev_tok_type == tokenize.INDENT or scol==0:
|
||||
kind = 'docstring'
|
||||
elif tok_type == tokenize.NAME:
|
||||
if tok_str in ('def', 'class', 'import', 'from'):
|
||||
kind = 'definition'
|
||||
elif prev_tok_str in ('def', 'class'):
|
||||
kind = 'defname'
|
||||
elif keyword.iskeyword(tok_str):
|
||||
kind = 'keyword'
|
||||
elif is_builtin(tok_str) and prev_tok_str != '.':
|
||||
kind = 'builtin'
|
||||
if kind:
|
||||
text, written = combine_range(lines, written, (srow, scol))
|
||||
yield '', text
|
||||
text, written = tok_str, (erow, ecol)
|
||||
yield kind, text
|
||||
line_upto_token, written = combine_range(lines, written, (erow, ecol))
|
||||
yield '', line_upto_token
|
||||
|
||||
#### Raw Output ###########################################
|
||||
|
||||
def raw_highlight(classified_text):
|
||||
'Straight text display of text classifications'
|
||||
result = []
|
||||
for kind, text in classified_text:
|
||||
result.append('%15s: %r\n' % (kind or 'plain', text))
|
||||
return ''.join(result)
|
||||
|
||||
#### ANSI Output ###########################################
|
||||
|
||||
default_ansi = {
|
||||
'comment': ('\033[0;31m', '\033[0m'),
|
||||
'string': ('\033[0;32m', '\033[0m'),
|
||||
'docstring': ('\033[0;32m', '\033[0m'),
|
||||
'keyword': ('\033[0;33m', '\033[0m'),
|
||||
'builtin': ('\033[0;35m', '\033[0m'),
|
||||
'definition': ('\033[0;33m', '\033[0m'),
|
||||
'defname': ('\033[0;34m', '\033[0m'),
|
||||
'operator': ('\033[0;33m', '\033[0m'),
|
||||
}
|
||||
|
||||
def ansi_highlight(classified_text, colors=default_ansi):
|
||||
'Add syntax highlighting to source code using ANSI escape sequences'
|
||||
# http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||
result = []
|
||||
for kind, text in classified_text:
|
||||
opener, closer = colors.get(kind, ('', ''))
|
||||
result += [opener, text, closer]
|
||||
return ''.join(result)
|
||||
|
||||
#### HTML Output ###########################################
|
||||
|
||||
def html_highlight(classified_text,opener='<pre class="python">\n', closer='</pre>\n'):
|
||||
'Convert classified text to an HTML fragment'
|
||||
result = [opener]
|
||||
for kind, text in classified_text:
|
||||
if kind:
|
||||
result.append('<span class="%s">' % kind)
|
||||
result.append(html_module.escape(text))
|
||||
if kind:
|
||||
result.append('</span>')
|
||||
result.append(closer)
|
||||
return ''.join(result)
|
||||
|
||||
default_css = {
|
||||
'.comment': '{color: crimson;}',
|
||||
'.string': '{color: forestgreen;}',
|
||||
'.docstring': '{color: forestgreen; font-style:italic;}',
|
||||
'.keyword': '{color: darkorange;}',
|
||||
'.builtin': '{color: purple;}',
|
||||
'.definition': '{color: darkorange; font-weight:bold;}',
|
||||
'.defname': '{color: blue;}',
|
||||
'.operator': '{color: brown;}',
|
||||
}
|
||||
|
||||
default_html = '''\
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
||||
"http://www.w3.org/TR/html4/strict.dtd">
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
|
||||
<title> {title} </title>
|
||||
<style type="text/css">
|
||||
{css}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
{body}
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
|
||||
def build_html_page(classified_text, title='python',
|
||||
css=default_css, html=default_html):
|
||||
'Create a complete HTML page with colorized source code'
|
||||
css_str = '\n'.join(['%s %s' % item for item in css.items()])
|
||||
result = html_highlight(classified_text)
|
||||
title = html_module.escape(title)
|
||||
return html.format(title=title, css=css_str, body=result)
|
||||
|
||||
#### LaTeX Output ##########################################
|
||||
|
||||
default_latex_commands = {
|
||||
'comment': r'{\color{red}#1}',
|
||||
'string': r'{\color{ForestGreen}#1}',
|
||||
'docstring': r'{\emph{\color{ForestGreen}#1}}',
|
||||
'keyword': r'{\color{orange}#1}',
|
||||
'builtin': r'{\color{purple}#1}',
|
||||
'definition': r'{\color{orange}#1}',
|
||||
'defname': r'{\color{blue}#1}',
|
||||
'operator': r'{\color{brown}#1}',
|
||||
}
|
||||
|
||||
default_latex_document = r'''
|
||||
\documentclass{article}
|
||||
\usepackage{alltt}
|
||||
\usepackage{upquote}
|
||||
\usepackage{color}
|
||||
\usepackage[usenames,dvipsnames]{xcolor}
|
||||
\usepackage[cm]{fullpage}
|
||||
%(macros)s
|
||||
\begin{document}
|
||||
\center{\LARGE{%(title)s}}
|
||||
\begin{alltt}
|
||||
%(body)s
|
||||
\end{alltt}
|
||||
\end{document}
|
||||
'''
|
||||
|
||||
def alltt_escape(s):
|
||||
'Replace backslash and braces with their escaped equivalents'
|
||||
xlat = {'{': r'\{', '}': r'\}', '\\': r'\textbackslash{}'}
|
||||
return re.sub(r'[\\{}]', lambda mo: xlat[mo.group()], s)
|
||||
|
||||
def latex_highlight(classified_text, title = 'python',
|
||||
commands = default_latex_commands,
|
||||
document = default_latex_document):
|
||||
'Create a complete LaTeX document with colorized source code'
|
||||
macros = '\n'.join(r'\newcommand{\py%s}[1]{%s}' % c for c in commands.items())
|
||||
result = []
|
||||
for kind, text in classified_text:
|
||||
if kind:
|
||||
result.append(r'\py%s{' % kind)
|
||||
result.append(alltt_escape(text))
|
||||
if kind:
|
||||
result.append('}')
|
||||
return default_latex_document % dict(title=title, macros=macros, body=''.join(result))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
import os.path
|
||||
import sys
|
||||
import textwrap
|
||||
import webbrowser
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description = 'Add syntax highlighting to Python source code',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog = textwrap.dedent('''
|
||||
examples:
|
||||
|
||||
# Show syntax highlighted code in the terminal window
|
||||
$ ./highlight.py myfile.py
|
||||
|
||||
# Colorize myfile.py and display in a browser
|
||||
$ ./highlight.py -b myfile.py
|
||||
|
||||
# Create an HTML section to embed in an existing webpage
|
||||
./highlight.py -s myfile.py
|
||||
|
||||
# Create a complete HTML file
|
||||
$ ./highlight.py -c myfile.py > myfile.html
|
||||
|
||||
# Create a PDF using LaTeX
|
||||
$ ./highlight.py -l myfile.py | pdflatex
|
||||
|
||||
'''))
|
||||
parser.add_argument('sourcefile', metavar = 'SOURCEFILE',
|
||||
help = 'file containing Python sourcecode')
|
||||
parser.add_argument('-b', '--browser', action = 'store_true',
|
||||
help = 'launch a browser to show results')
|
||||
parser.add_argument('-c', '--complete', action = 'store_true',
|
||||
help = 'build a complete html webpage')
|
||||
parser.add_argument('-l', '--latex', action = 'store_true',
|
||||
help = 'build a LaTeX document')
|
||||
parser.add_argument('-r', '--raw', action = 'store_true',
|
||||
help = 'raw parse of categorized text')
|
||||
parser.add_argument('-s', '--section', action = 'store_true',
|
||||
help = 'show an HTML section rather than a complete webpage')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.section and (args.browser or args.complete):
|
||||
parser.error('The -s/--section option is incompatible with '
|
||||
'the -b/--browser or -c/--complete options')
|
||||
|
||||
sourcefile = args.sourcefile
|
||||
with open(sourcefile) as f:
|
||||
source = f.read()
|
||||
classified_text = analyze_python(source)
|
||||
|
||||
if args.raw:
|
||||
encoded = raw_highlight(classified_text)
|
||||
elif args.complete or args.browser:
|
||||
encoded = build_html_page(classified_text, title=sourcefile)
|
||||
elif args.section:
|
||||
encoded = html_highlight(classified_text)
|
||||
elif args.latex:
|
||||
encoded = latex_highlight(classified_text, title=sourcefile)
|
||||
else:
|
||||
encoded = ansi_highlight(classified_text)
|
||||
|
||||
if args.browser:
|
||||
htmlfile = os.path.splitext(os.path.basename(sourcefile))[0] + '.html'
|
||||
with open(htmlfile, 'w') as f:
|
||||
f.write(encoded)
|
||||
webbrowser.open('file://' + os.path.abspath(htmlfile))
|
||||
else:
|
||||
sys.stdout.write(encoded)
|
||||
112
Tools/scripts/ifdef.py
Normal file
112
Tools/scripts/ifdef.py
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Selectively preprocess #ifdef / #ifndef statements.
|
||||
# Usage:
|
||||
# ifdef [-Dname] ... [-Uname] ... [file] ...
|
||||
#
|
||||
# This scans the file(s), looking for #ifdef and #ifndef preprocessor
|
||||
# commands that test for one of the names mentioned in the -D and -U
|
||||
# options. On standard output it writes a copy of the input file(s)
|
||||
# minus those code sections that are suppressed by the selected
|
||||
# combination of defined/undefined symbols. The #if(n)def/#else/#else
|
||||
# lines themselves (if the #if(n)def tests for one of the mentioned
|
||||
# names) are removed as well.
|
||||
|
||||
# Features: Arbitrary nesting of recognized and unrecognized
|
||||
# preprocessor statements works correctly. Unrecognized #if* commands
|
||||
# are left in place, so it will never remove too much, only too
|
||||
# little. It does accept whitespace around the '#' character.
|
||||
|
||||
# Restrictions: There should be no comments or other symbols on the
|
||||
# #if(n)def lines. The effect of #define/#undef commands in the input
|
||||
# file or in included files is not taken into account. Tests using
|
||||
# #if and the defined() pseudo function are not recognized. The #elif
|
||||
# command is not recognized. Improperly nesting is not detected.
|
||||
# Lines that look like preprocessor commands but which are actually
|
||||
# part of comments or string literals will be mistaken for
|
||||
# preprocessor commands.
|
||||
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
defs = []
|
||||
undefs = []
|
||||
|
||||
def main():
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
|
||||
for o, a in opts:
|
||||
if o == '-D':
|
||||
defs.append(a)
|
||||
if o == '-U':
|
||||
undefs.append(a)
|
||||
if not args:
|
||||
args = ['-']
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
process(sys.stdin, sys.stdout)
|
||||
else:
|
||||
f = open(filename, 'r')
|
||||
process(f, sys.stdout)
|
||||
f.close()
|
||||
|
||||
def process(fpi, fpo):
|
||||
keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
|
||||
ok = 1
|
||||
stack = []
|
||||
while 1:
|
||||
line = fpi.readline()
|
||||
if not line: break
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = fpi.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
tmp = line.strip()
|
||||
if tmp[:1] != '#':
|
||||
if ok: fpo.write(line)
|
||||
continue
|
||||
tmp = tmp[1:].strip()
|
||||
words = tmp.split()
|
||||
keyword = words[0]
|
||||
if keyword not in keywords:
|
||||
if ok: fpo.write(line)
|
||||
continue
|
||||
if keyword in ('ifdef', 'ifndef') and len(words) == 2:
|
||||
if keyword == 'ifdef':
|
||||
ko = 1
|
||||
else:
|
||||
ko = 0
|
||||
word = words[1]
|
||||
if word in defs:
|
||||
stack.append((ok, ko, word))
|
||||
if not ko: ok = 0
|
||||
elif word in undefs:
|
||||
stack.append((ok, not ko, word))
|
||||
if ko: ok = 0
|
||||
else:
|
||||
stack.append((ok, -1, word))
|
||||
if ok: fpo.write(line)
|
||||
elif keyword == 'if':
|
||||
stack.append((ok, -1, ''))
|
||||
if ok: fpo.write(line)
|
||||
elif keyword == 'else' and stack:
|
||||
s_ok, s_ko, s_word = stack[-1]
|
||||
if s_ko < 0:
|
||||
if ok: fpo.write(line)
|
||||
else:
|
||||
s_ko = not s_ko
|
||||
ok = s_ok
|
||||
if not s_ko: ok = 0
|
||||
stack[-1] = s_ok, s_ko, s_word
|
||||
elif keyword == 'endif' and stack:
|
||||
s_ok, s_ko, s_word = stack[-1]
|
||||
if s_ko < 0:
|
||||
if ok: fpo.write(line)
|
||||
del stack[-1]
|
||||
ok = s_ok
|
||||
else:
|
||||
sys.stderr.write('Unknown keyword %s\n' % keyword)
|
||||
if stack:
|
||||
sys.stderr.write('stack: %s\n' % stack)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
37
Tools/scripts/import_diagnostics.py
Normal file
37
Tools/scripts/import_diagnostics.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Miscellaneous diagnostics for the import system"""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
from pprint import pprint
|
||||
|
||||
def _dump_state(args):
|
||||
print(sys.version)
|
||||
for name in args.attributes:
|
||||
print("sys.{}:".format(name))
|
||||
pprint(getattr(sys, name))
|
||||
|
||||
def _add_dump_args(cmd):
|
||||
cmd.add_argument("attributes", metavar="ATTR", nargs="+",
|
||||
help="sys module attribute to display")
|
||||
|
||||
COMMANDS = (
|
||||
("dump", "Dump import state", _dump_state, _add_dump_args),
|
||||
)
|
||||
|
||||
def _make_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
sub = parser.add_subparsers(title="Commands")
|
||||
for name, description, implementation, add_args in COMMANDS:
|
||||
cmd = sub.add_parser(name, help=description)
|
||||
cmd.set_defaults(command=implementation)
|
||||
add_args(cmd)
|
||||
return parser
|
||||
|
||||
def main(args):
|
||||
parser = _make_parser()
|
||||
args = parser.parse_args(args)
|
||||
return args.command(args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
24
Tools/scripts/lfcr.py
Normal file
24
Tools/scripts/lfcr.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"Replace LF with CRLF in argument files. Print names of changed files."
|
||||
|
||||
import sys, re, os
|
||||
|
||||
def main():
|
||||
for filename in sys.argv[1:]:
|
||||
if os.path.isdir(filename):
|
||||
print(filename, "Directory!")
|
||||
continue
|
||||
with open(filename, "rb") as f:
|
||||
data = f.read()
|
||||
if b'\0' in data:
|
||||
print(filename, "Binary!")
|
||||
continue
|
||||
newdata = re.sub(b"\r?\n", b"\r\n", data)
|
||||
if newdata != data:
|
||||
print(filename)
|
||||
with open(filename, "wb") as f:
|
||||
f.write(newdata)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
80
Tools/scripts/linktree.py
Normal file
80
Tools/scripts/linktree.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# linktree
|
||||
#
|
||||
# Make a copy of a directory tree with symbolic links to all files in the
|
||||
# original tree.
|
||||
# All symbolic links go to a special symbolic link at the top, so you
|
||||
# can easily fix things if the original source tree moves.
|
||||
# See also "mkreal".
|
||||
#
|
||||
# usage: mklinks oldtree newtree
|
||||
|
||||
import sys, os
|
||||
|
||||
LINK = '.LINK' # Name of special symlink at the top.
|
||||
|
||||
debug = 0
|
||||
|
||||
def main():
|
||||
if not 3 <= len(sys.argv) <= 4:
|
||||
print('usage:', sys.argv[0], 'oldtree newtree [linkto]')
|
||||
return 2
|
||||
oldtree, newtree = sys.argv[1], sys.argv[2]
|
||||
if len(sys.argv) > 3:
|
||||
link = sys.argv[3]
|
||||
link_may_fail = 1
|
||||
else:
|
||||
link = LINK
|
||||
link_may_fail = 0
|
||||
if not os.path.isdir(oldtree):
|
||||
print(oldtree + ': not a directory')
|
||||
return 1
|
||||
try:
|
||||
os.mkdir(newtree, 0o777)
|
||||
except OSError as msg:
|
||||
print(newtree + ': cannot mkdir:', msg)
|
||||
return 1
|
||||
linkname = os.path.join(newtree, link)
|
||||
try:
|
||||
os.symlink(os.path.join(os.pardir, oldtree), linkname)
|
||||
except OSError as msg:
|
||||
if not link_may_fail:
|
||||
print(linkname + ': cannot symlink:', msg)
|
||||
return 1
|
||||
else:
|
||||
print(linkname + ': warning: cannot symlink:', msg)
|
||||
linknames(oldtree, newtree, link)
|
||||
return 0
|
||||
|
||||
def linknames(old, new, link):
|
||||
if debug: print('linknames', (old, new, link))
|
||||
try:
|
||||
names = os.listdir(old)
|
||||
except OSError as msg:
|
||||
print(old + ': warning: cannot listdir:', msg)
|
||||
return
|
||||
for name in names:
|
||||
if name not in (os.curdir, os.pardir):
|
||||
oldname = os.path.join(old, name)
|
||||
linkname = os.path.join(link, name)
|
||||
newname = os.path.join(new, name)
|
||||
if debug > 1: print(oldname, newname, linkname)
|
||||
if os.path.isdir(oldname) and \
|
||||
not os.path.islink(oldname):
|
||||
try:
|
||||
os.mkdir(newname, 0o777)
|
||||
ok = 1
|
||||
except:
|
||||
print(newname + \
|
||||
': warning: cannot mkdir:', msg)
|
||||
ok = 0
|
||||
if ok:
|
||||
linkname = os.path.join(os.pardir,
|
||||
linkname)
|
||||
linknames(oldname, newname, linkname)
|
||||
else:
|
||||
os.symlink(linkname, newname)
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
28
Tools/scripts/lll.py
Normal file
28
Tools/scripts/lll.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Find symbolic links and show where they point to.
|
||||
# Arguments are directories to search; default is current directory.
|
||||
# No recursion.
|
||||
# (This is a totally different program from "findsymlinks.py"!)
|
||||
|
||||
import sys, os
|
||||
|
||||
def lll(dirname):
|
||||
for name in os.listdir(dirname):
|
||||
if name not in (os.curdir, os.pardir):
|
||||
full = os.path.join(dirname, name)
|
||||
if os.path.islink(full):
|
||||
print(name, '->', os.readlink(full))
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
if not args: args = [os.curdir]
|
||||
first = 1
|
||||
for arg in args:
|
||||
if len(args) > 1:
|
||||
if not first: print()
|
||||
first = 0
|
||||
print(arg + ':')
|
||||
lll(arg)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
246
Tools/scripts/mailerdaemon.py
Normal file
246
Tools/scripts/mailerdaemon.py
Normal file
|
|
@ -0,0 +1,246 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Classes to parse mailer-daemon messages."""
|
||||
|
||||
import calendar
|
||||
import email.message
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
class Unparseable(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ErrorMessage(email.message.Message):
|
||||
def __init__(self):
|
||||
email.message.Message.__init__(self)
|
||||
self.sub = ''
|
||||
|
||||
def is_warning(self):
|
||||
sub = self.get('Subject')
|
||||
if not sub:
|
||||
return 0
|
||||
sub = sub.lower()
|
||||
if sub.startswith('waiting mail'):
|
||||
return 1
|
||||
if 'warning' in sub:
|
||||
return 1
|
||||
self.sub = sub
|
||||
return 0
|
||||
|
||||
def get_errors(self):
|
||||
for p in EMPARSERS:
|
||||
self.rewindbody()
|
||||
try:
|
||||
return p(self.fp, self.sub)
|
||||
except Unparseable:
|
||||
pass
|
||||
raise Unparseable
|
||||
|
||||
# List of re's or tuples of re's.
|
||||
# If a re, it should contain at least a group (?P<email>...) which
|
||||
# should refer to the email address. The re can also contain a group
|
||||
# (?P<reason>...) which should refer to the reason (error message).
|
||||
# If no reason is present, the emparse_list_reason list is used to
|
||||
# find a reason.
|
||||
# If a tuple, the tuple should contain 2 re's. The first re finds a
|
||||
# location, the second re is repeated one or more times to find
|
||||
# multiple email addresses. The second re is matched (not searched)
|
||||
# where the previous match ended.
|
||||
# The re's are compiled using the re module.
|
||||
emparse_list_list = [
|
||||
'error: (?P<reason>unresolvable): (?P<email>.+)',
|
||||
('----- The following addresses had permanent fatal errors -----\n',
|
||||
'(?P<email>[^ \n].*)\n( .*\n)?'),
|
||||
'remote execution.*\n.*rmail (?P<email>.+)',
|
||||
('The following recipients did not receive your message:\n\n',
|
||||
' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
|
||||
'------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
|
||||
'^<(?P<email>.*)>:\n(?P<reason>.*)',
|
||||
'^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
|
||||
'^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
|
||||
'^Original-Recipient: rfc822;(?P<email>.*)',
|
||||
'^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
|
||||
'^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
|
||||
'^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
|
||||
'^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
|
||||
'^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
|
||||
]
|
||||
# compile the re's in the list and store them in-place.
|
||||
for i in range(len(emparse_list_list)):
|
||||
x = emparse_list_list[i]
|
||||
if type(x) is type(''):
|
||||
x = re.compile(x, re.MULTILINE)
|
||||
else:
|
||||
xl = []
|
||||
for x in x:
|
||||
xl.append(re.compile(x, re.MULTILINE))
|
||||
x = tuple(xl)
|
||||
del xl
|
||||
emparse_list_list[i] = x
|
||||
del x
|
||||
del i
|
||||
|
||||
# list of re's used to find reasons (error messages).
|
||||
# if a string, "<>" is replaced by a copy of the email address.
|
||||
# The expressions are searched for in order. After the first match,
|
||||
# no more expressions are searched for. So, order is important.
|
||||
emparse_list_reason = [
|
||||
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
|
||||
r'<>\.\.\. (?P<reason>.*)',
|
||||
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
|
||||
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
|
||||
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
|
||||
]
|
||||
emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
|
||||
def emparse_list(fp, sub):
|
||||
data = fp.read()
|
||||
res = emparse_list_from.search(data)
|
||||
if res is None:
|
||||
from_index = len(data)
|
||||
else:
|
||||
from_index = res.start(0)
|
||||
errors = []
|
||||
emails = []
|
||||
reason = None
|
||||
for regexp in emparse_list_list:
|
||||
if type(regexp) is type(()):
|
||||
res = regexp[0].search(data, 0, from_index)
|
||||
if res is not None:
|
||||
try:
|
||||
reason = res.group('reason')
|
||||
except IndexError:
|
||||
pass
|
||||
while 1:
|
||||
res = regexp[1].match(data, res.end(0), from_index)
|
||||
if res is None:
|
||||
break
|
||||
emails.append(res.group('email'))
|
||||
break
|
||||
else:
|
||||
res = regexp.search(data, 0, from_index)
|
||||
if res is not None:
|
||||
emails.append(res.group('email'))
|
||||
try:
|
||||
reason = res.group('reason')
|
||||
except IndexError:
|
||||
pass
|
||||
break
|
||||
if not emails:
|
||||
raise Unparseable
|
||||
if not reason:
|
||||
reason = sub
|
||||
if reason[:15] == 'returned mail: ':
|
||||
reason = reason[15:]
|
||||
for regexp in emparse_list_reason:
|
||||
if type(regexp) is type(''):
|
||||
for i in range(len(emails)-1,-1,-1):
|
||||
email = emails[i]
|
||||
exp = re.compile(re.escape(email).join(regexp.split('<>')), re.MULTILINE)
|
||||
res = exp.search(data)
|
||||
if res is not None:
|
||||
errors.append(' '.join((email.strip()+': '+res.group('reason')).split()))
|
||||
del emails[i]
|
||||
continue
|
||||
res = regexp.search(data)
|
||||
if res is not None:
|
||||
reason = res.group('reason')
|
||||
break
|
||||
for email in emails:
|
||||
errors.append(' '.join((email.strip()+': '+reason).split()))
|
||||
return errors
|
||||
|
||||
EMPARSERS = [emparse_list]
|
||||
|
||||
def sort_numeric(a, b):
|
||||
a = int(a)
|
||||
b = int(b)
|
||||
if a < b:
|
||||
return -1
|
||||
elif a > b:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def parsedir(dir, modify):
|
||||
os.chdir(dir)
|
||||
pat = re.compile('^[0-9]*$')
|
||||
errordict = {}
|
||||
errorfirst = {}
|
||||
errorlast = {}
|
||||
nok = nwarn = nbad = 0
|
||||
|
||||
# find all numeric file names and sort them
|
||||
files = list(filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.')))
|
||||
files.sort(sort_numeric)
|
||||
|
||||
for fn in files:
|
||||
# Lets try to parse the file.
|
||||
fp = open(fn)
|
||||
m = email.message_from_file(fp, _class=ErrorMessage)
|
||||
sender = m.getaddr('From')
|
||||
print('%s\t%-40s\t'%(fn, sender[1]), end=' ')
|
||||
|
||||
if m.is_warning():
|
||||
fp.close()
|
||||
print('warning only')
|
||||
nwarn = nwarn + 1
|
||||
if modify:
|
||||
os.rename(fn, ','+fn)
|
||||
## os.unlink(fn)
|
||||
continue
|
||||
|
||||
try:
|
||||
errors = m.get_errors()
|
||||
except Unparseable:
|
||||
print('** Not parseable')
|
||||
nbad = nbad + 1
|
||||
fp.close()
|
||||
continue
|
||||
print(len(errors), 'errors')
|
||||
|
||||
# Remember them
|
||||
for e in errors:
|
||||
try:
|
||||
mm, dd = m.getdate('date')[1:1+2]
|
||||
date = '%s %02d' % (calendar.month_abbr[mm], dd)
|
||||
except:
|
||||
date = '??????'
|
||||
if e not in errordict:
|
||||
errordict[e] = 1
|
||||
errorfirst[e] = '%s (%s)' % (fn, date)
|
||||
else:
|
||||
errordict[e] = errordict[e] + 1
|
||||
errorlast[e] = '%s (%s)' % (fn, date)
|
||||
|
||||
fp.close()
|
||||
nok = nok + 1
|
||||
if modify:
|
||||
os.rename(fn, ','+fn)
|
||||
## os.unlink(fn)
|
||||
|
||||
print('--------------')
|
||||
print(nok, 'files parsed,',nwarn,'files warning-only,', end=' ')
|
||||
print(nbad,'files unparseable')
|
||||
print('--------------')
|
||||
list = []
|
||||
for e in errordict.keys():
|
||||
list.append((errordict[e], errorfirst[e], errorlast[e], e))
|
||||
list.sort()
|
||||
for num, first, last, e in list:
|
||||
print('%d %s - %s\t%s' % (num, first, last, e))
|
||||
|
||||
def main():
|
||||
modify = 0
|
||||
if len(sys.argv) > 1 and sys.argv[1] == '-d':
|
||||
modify = 1
|
||||
del sys.argv[1]
|
||||
if len(sys.argv) > 1:
|
||||
for folder in sys.argv[1:]:
|
||||
parsedir(folder, modify)
|
||||
else:
|
||||
parsedir('/ufs/jack/Mail/errorsinbox', modify)
|
||||
|
||||
if __name__ == '__main__' or sys.argv[0] == __name__:
|
||||
main()
|
||||
94
Tools/scripts/make_ctype.py
Normal file
94
Tools/scripts/make_ctype.py
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Script that generates the ctype.h-replacement in stringobject.c."""
|
||||
|
||||
NAMES = ("LOWER", "UPPER", "ALPHA", "DIGIT", "XDIGIT", "ALNUM", "SPACE")
|
||||
|
||||
print("""
|
||||
#define FLAG_LOWER 0x01
|
||||
#define FLAG_UPPER 0x02
|
||||
#define FLAG_ALPHA (FLAG_LOWER|FLAG_UPPER)
|
||||
#define FLAG_DIGIT 0x04
|
||||
#define FLAG_ALNUM (FLAG_ALPHA|FLAG_DIGIT)
|
||||
#define FLAG_SPACE 0x08
|
||||
#define FLAG_XDIGIT 0x10
|
||||
|
||||
static unsigned int ctype_table[256] = {""")
|
||||
|
||||
for i in range(128):
|
||||
c = chr(i)
|
||||
flags = []
|
||||
for name in NAMES:
|
||||
if name in ("ALPHA", "ALNUM"):
|
||||
continue
|
||||
if name == "XDIGIT":
|
||||
method = lambda: c.isdigit() or c.upper() in "ABCDEF"
|
||||
else:
|
||||
method = getattr(c, "is" + name.lower())
|
||||
if method():
|
||||
flags.append("FLAG_" + name)
|
||||
rc = repr(c)
|
||||
if c == '\v':
|
||||
rc = "'\\v'"
|
||||
elif c == '\f':
|
||||
rc = "'\\f'"
|
||||
if not flags:
|
||||
print(" 0, /* 0x%x %s */" % (i, rc))
|
||||
else:
|
||||
print(" %s, /* 0x%x %s */" % ("|".join(flags), i, rc))
|
||||
|
||||
for i in range(128, 256, 16):
|
||||
print(" %s," % ", ".join(16*["0"]))
|
||||
|
||||
print("};")
|
||||
print("")
|
||||
|
||||
for name in NAMES:
|
||||
print("#define IS%s(c) (ctype_table[Py_CHARMASK(c)] & FLAG_%s)" %
|
||||
(name, name))
|
||||
|
||||
print("")
|
||||
|
||||
for name in NAMES:
|
||||
name = "is" + name.lower()
|
||||
print("#undef %s" % name)
|
||||
print("#define %s(c) undefined_%s(c)" % (name, name))
|
||||
|
||||
print("""
|
||||
static unsigned char ctype_tolower[256] = {""")
|
||||
|
||||
for i in range(0, 256, 8):
|
||||
values = []
|
||||
for i in range(i, i+8):
|
||||
if i < 128:
|
||||
c = chr(i)
|
||||
if c.isupper():
|
||||
i = ord(c.lower())
|
||||
values.append("0x%02x" % i)
|
||||
print(" %s," % ", ".join(values))
|
||||
|
||||
print("};")
|
||||
|
||||
print("""
|
||||
static unsigned char ctype_toupper[256] = {""")
|
||||
|
||||
for i in range(0, 256, 8):
|
||||
values = []
|
||||
for i in range(i, i+8):
|
||||
if i < 128:
|
||||
c = chr(i)
|
||||
if c.islower():
|
||||
i = ord(c.upper())
|
||||
values.append("0x%02x" % i)
|
||||
print(" %s," % ", ".join(values))
|
||||
|
||||
print("};")
|
||||
|
||||
print("""
|
||||
#define TOLOWER(c) (ctype_tolower[Py_CHARMASK(c)])
|
||||
#define TOUPPER(c) (ctype_toupper[Py_CHARMASK(c)])
|
||||
|
||||
#undef tolower
|
||||
#define tolower(c) undefined_tolower(c)
|
||||
#undef toupper
|
||||
#define toupper(c) undefined_toupper(c)
|
||||
""")
|
||||
93
Tools/scripts/md5sum.py
Normal file
93
Tools/scripts/md5sum.py
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Python utility to print MD5 checksums of argument files.
|
||||
"""
|
||||
|
||||
|
||||
bufsize = 8096
|
||||
fnfilter = None
|
||||
rmode = 'rb'
|
||||
|
||||
usage = """
|
||||
usage: md5sum.py [-b] [-t] [-l] [-s bufsize] [file ...]
|
||||
-b : read files in binary mode (default)
|
||||
-t : read files in text mode (you almost certainly don't want this!)
|
||||
-l : print last pathname component only
|
||||
-s bufsize: read buffer size (default %d)
|
||||
file ... : files to sum; '-' or no files means stdin
|
||||
""" % bufsize
|
||||
|
||||
import io
|
||||
import sys
|
||||
import os
|
||||
import getopt
|
||||
from hashlib import md5
|
||||
|
||||
def sum(*files):
|
||||
sts = 0
|
||||
if files and isinstance(files[-1], io.IOBase):
|
||||
out, files = files[-1], files[:-1]
|
||||
else:
|
||||
out = sys.stdout
|
||||
if len(files) == 1 and not isinstance(files[0], str):
|
||||
files = files[0]
|
||||
for f in files:
|
||||
if isinstance(f, str):
|
||||
if f == '-':
|
||||
sts = printsumfp(sys.stdin, '<stdin>', out) or sts
|
||||
else:
|
||||
sts = printsum(f, out) or sts
|
||||
else:
|
||||
sts = sum(f, out) or sts
|
||||
return sts
|
||||
|
||||
def printsum(filename, out=sys.stdout):
|
||||
try:
|
||||
fp = open(filename, rmode)
|
||||
except IOError as msg:
|
||||
sys.stderr.write('%s: Can\'t open: %s\n' % (filename, msg))
|
||||
return 1
|
||||
if fnfilter:
|
||||
filename = fnfilter(filename)
|
||||
sts = printsumfp(fp, filename, out)
|
||||
fp.close()
|
||||
return sts
|
||||
|
||||
def printsumfp(fp, filename, out=sys.stdout):
|
||||
m = md5()
|
||||
try:
|
||||
while 1:
|
||||
data = fp.read(bufsize)
|
||||
if not data:
|
||||
break
|
||||
if isinstance(data, str):
|
||||
data = data.encode(fp.encoding)
|
||||
m.update(data)
|
||||
except IOError as msg:
|
||||
sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
|
||||
return 1
|
||||
out.write('%s %s\n' % (m.hexdigest(), filename))
|
||||
return 0
|
||||
|
||||
def main(args = sys.argv[1:], out=sys.stdout):
|
||||
global fnfilter, rmode, bufsize
|
||||
try:
|
||||
opts, args = getopt.getopt(args, 'blts:')
|
||||
except getopt.error as msg:
|
||||
sys.stderr.write('%s: %s\n%s' % (sys.argv[0], msg, usage))
|
||||
return 2
|
||||
for o, a in opts:
|
||||
if o == '-l':
|
||||
fnfilter = os.path.basename
|
||||
elif o == '-b':
|
||||
rmode = 'rb'
|
||||
elif o == '-t':
|
||||
rmode = 'r'
|
||||
elif o == '-s':
|
||||
bufsize = int(a)
|
||||
if not args:
|
||||
args = ['-']
|
||||
return sum(args, out)
|
||||
|
||||
if __name__ == '__main__' or __name__ == sys.argv[0]:
|
||||
sys.exit(main(sys.argv[1:], sys.stdout))
|
||||
66
Tools/scripts/mkreal.py
Normal file
66
Tools/scripts/mkreal.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# mkreal
|
||||
#
|
||||
# turn a symlink to a directory into a real directory
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import *
|
||||
|
||||
join = os.path.join
|
||||
|
||||
error = 'mkreal error'
|
||||
|
||||
BUFSIZE = 32*1024
|
||||
|
||||
def mkrealfile(name):
|
||||
st = os.stat(name) # Get the mode
|
||||
mode = S_IMODE(st[ST_MODE])
|
||||
linkto = os.readlink(name) # Make sure again it's a symlink
|
||||
f_in = open(name, 'r') # This ensures it's a file
|
||||
os.unlink(name)
|
||||
f_out = open(name, 'w')
|
||||
while 1:
|
||||
buf = f_in.read(BUFSIZE)
|
||||
if not buf: break
|
||||
f_out.write(buf)
|
||||
del f_out # Flush data to disk before changing mode
|
||||
os.chmod(name, mode)
|
||||
|
||||
def mkrealdir(name):
|
||||
st = os.stat(name) # Get the mode
|
||||
mode = S_IMODE(st[ST_MODE])
|
||||
linkto = os.readlink(name)
|
||||
files = os.listdir(name)
|
||||
os.unlink(name)
|
||||
os.mkdir(name, mode)
|
||||
os.chmod(name, mode)
|
||||
linkto = join(os.pardir, linkto)
|
||||
#
|
||||
for filename in files:
|
||||
if filename not in (os.curdir, os.pardir):
|
||||
os.symlink(join(linkto, filename), join(name, filename))
|
||||
|
||||
def main():
|
||||
sys.stdout = sys.stderr
|
||||
progname = os.path.basename(sys.argv[0])
|
||||
if progname == '-c': progname = 'mkreal'
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
print('usage:', progname, 'path ...')
|
||||
sys.exit(2)
|
||||
status = 0
|
||||
for name in args:
|
||||
if not os.path.islink(name):
|
||||
print(progname+':', name+':', 'not a symlink')
|
||||
status = 1
|
||||
else:
|
||||
if os.path.isdir(name):
|
||||
mkrealdir(name)
|
||||
else:
|
||||
mkrealfile(name)
|
||||
sys.exit(status)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
133
Tools/scripts/ndiff.py
Normal file
133
Tools/scripts/ndiff.py
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Module ndiff version 1.7.0
|
||||
# Released to the public domain 08-Dec-2000,
|
||||
# by Tim Peters (tim.one@home.com).
|
||||
|
||||
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
|
||||
|
||||
# ndiff.py is now simply a front-end to the difflib.ndiff() function.
|
||||
# Originally, it contained the difflib.SequenceMatcher class as well.
|
||||
# This completes the raiding of reusable code from this formerly
|
||||
# self-contained script.
|
||||
|
||||
"""ndiff [-q] file1 file2
|
||||
or
|
||||
ndiff (-r1 | -r2) < ndiff_output > file1_or_file2
|
||||
|
||||
Print a human-friendly file difference report to stdout. Both inter-
|
||||
and intra-line differences are noted. In the second form, recreate file1
|
||||
(-r1) or file2 (-r2) on stdout, from an ndiff report on stdin.
|
||||
|
||||
In the first form, if -q ("quiet") is not specified, the first two lines
|
||||
of output are
|
||||
|
||||
-: file1
|
||||
+: file2
|
||||
|
||||
Each remaining line begins with a two-letter code:
|
||||
|
||||
"- " line unique to file1
|
||||
"+ " line unique to file2
|
||||
" " line common to both files
|
||||
"? " line not present in either input file
|
||||
|
||||
Lines beginning with "? " attempt to guide the eye to intraline
|
||||
differences, and were not present in either input file. These lines can be
|
||||
confusing if the source files contain tab characters.
|
||||
|
||||
The first file can be recovered by retaining only lines that begin with
|
||||
" " or "- ", and deleting those 2-character prefixes; use ndiff with -r1.
|
||||
|
||||
The second file can be recovered similarly, but by retaining only " " and
|
||||
"+ " lines; use ndiff with -r2; or, on Unix, the second file can be
|
||||
recovered by piping the output through
|
||||
|
||||
sed -n '/^[+ ] /s/^..//p'
|
||||
"""
|
||||
|
||||
__version__ = 1, 7, 0
|
||||
|
||||
import difflib, sys
|
||||
|
||||
def fail(msg):
|
||||
out = sys.stderr.write
|
||||
out(msg + "\n\n")
|
||||
out(__doc__)
|
||||
return 0
|
||||
|
||||
# open a file & return the file object; gripe and return 0 if it
|
||||
# couldn't be opened
|
||||
def fopen(fname):
|
||||
try:
|
||||
return open(fname)
|
||||
except IOError as detail:
|
||||
return fail("couldn't open " + fname + ": " + str(detail))
|
||||
|
||||
# open two files & spray the diff to stdout; return false iff a problem
|
||||
def fcompare(f1name, f2name):
|
||||
f1 = fopen(f1name)
|
||||
f2 = fopen(f2name)
|
||||
if not f1 or not f2:
|
||||
return 0
|
||||
|
||||
a = f1.readlines(); f1.close()
|
||||
b = f2.readlines(); f2.close()
|
||||
for line in difflib.ndiff(a, b):
|
||||
print(line, end=' ')
|
||||
|
||||
return 1
|
||||
|
||||
# crack args (sys.argv[1:] is normal) & compare;
|
||||
# return false iff a problem
|
||||
|
||||
def main(args):
|
||||
import getopt
|
||||
try:
|
||||
opts, args = getopt.getopt(args, "qr:")
|
||||
except getopt.error as detail:
|
||||
return fail(str(detail))
|
||||
noisy = 1
|
||||
qseen = rseen = 0
|
||||
for opt, val in opts:
|
||||
if opt == "-q":
|
||||
qseen = 1
|
||||
noisy = 0
|
||||
elif opt == "-r":
|
||||
rseen = 1
|
||||
whichfile = val
|
||||
if qseen and rseen:
|
||||
return fail("can't specify both -q and -r")
|
||||
if rseen:
|
||||
if args:
|
||||
return fail("no args allowed with -r option")
|
||||
if whichfile in ("1", "2"):
|
||||
restore(whichfile)
|
||||
return 1
|
||||
return fail("-r value must be 1 or 2")
|
||||
if len(args) != 2:
|
||||
return fail("need 2 filename args")
|
||||
f1name, f2name = args
|
||||
if noisy:
|
||||
print('-:', f1name)
|
||||
print('+:', f2name)
|
||||
return fcompare(f1name, f2name)
|
||||
|
||||
# read ndiff output from stdin, and print file1 (which=='1') or
|
||||
# file2 (which=='2') to stdout
|
||||
|
||||
def restore(which):
|
||||
restored = difflib.restore(sys.stdin.readlines(), which)
|
||||
sys.stdout.writelines(restored)
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = sys.argv[1:]
|
||||
if "-profile" in args:
|
||||
import profile, pstats
|
||||
args.remove("-profile")
|
||||
statf = "ndiff.pro"
|
||||
profile.run("main(args)", statf)
|
||||
stats = pstats.Stats(statf)
|
||||
stats.strip_dirs().sort_stats('time').print_stats()
|
||||
else:
|
||||
main(args)
|
||||
103
Tools/scripts/nm2def.py
Normal file
103
Tools/scripts/nm2def.py
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
#! /usr/bin/env python3
|
||||
"""nm2def.py
|
||||
|
||||
Helpers to extract symbols from Unix libs and auto-generate
|
||||
Windows definition files from them. Depends on nm(1). Tested
|
||||
on Linux and Solaris only (-p option to nm is for Solaris only).
|
||||
|
||||
By Marc-Andre Lemburg, Aug 1998.
|
||||
|
||||
Additional notes: the output of nm is supposed to look like this:
|
||||
|
||||
acceler.o:
|
||||
000001fd T PyGrammar_AddAccelerators
|
||||
U PyGrammar_FindDFA
|
||||
00000237 T PyGrammar_RemoveAccelerators
|
||||
U _IO_stderr_
|
||||
U exit
|
||||
U fprintf
|
||||
U free
|
||||
U malloc
|
||||
U printf
|
||||
|
||||
grammar1.o:
|
||||
00000000 T PyGrammar_FindDFA
|
||||
00000034 T PyGrammar_LabelRepr
|
||||
U _PyParser_TokenNames
|
||||
U abort
|
||||
U printf
|
||||
U sprintf
|
||||
|
||||
...
|
||||
|
||||
Even if this isn't the default output of your nm, there is generally an
|
||||
option to produce this format (since it is the original v7 Unix format).
|
||||
|
||||
"""
|
||||
import os, sys
|
||||
|
||||
PYTHONLIB = 'libpython%d.%d.a' % sys.version_info[:2]
|
||||
PC_PYTHONLIB = 'Python%d%d.dll' % sys.version_info[:2]
|
||||
NM = 'nm -p -g %s' # For Linux, use "nm -g %s"
|
||||
|
||||
def symbols(lib=PYTHONLIB,types=('T','C','D')):
|
||||
|
||||
lines = os.popen(NM % lib).readlines()
|
||||
lines = [s.strip() for s in lines]
|
||||
symbols = {}
|
||||
for line in lines:
|
||||
if len(line) == 0 or ':' in line:
|
||||
continue
|
||||
items = line.split()
|
||||
if len(items) != 3:
|
||||
continue
|
||||
address, type, name = items
|
||||
if type not in types:
|
||||
continue
|
||||
symbols[name] = address,type
|
||||
return symbols
|
||||
|
||||
def export_list(symbols):
|
||||
|
||||
data = []
|
||||
code = []
|
||||
for name,(addr,type) in symbols.items():
|
||||
if type in ('C','D'):
|
||||
data.append('\t'+name)
|
||||
else:
|
||||
code.append('\t'+name)
|
||||
data.sort()
|
||||
data.append('')
|
||||
code.sort()
|
||||
return ' DATA\n'.join(data)+'\n'+'\n'.join(code)
|
||||
|
||||
# Definition file template
|
||||
DEF_TEMPLATE = """\
|
||||
EXPORTS
|
||||
%s
|
||||
"""
|
||||
|
||||
# Special symbols that have to be included even though they don't
|
||||
# pass the filter
|
||||
SPECIALS = (
|
||||
)
|
||||
|
||||
def filter_Python(symbols,specials=SPECIALS):
|
||||
|
||||
for name in list(symbols.keys()):
|
||||
if name[:2] == 'Py' or name[:3] == '_Py':
|
||||
pass
|
||||
elif name not in specials:
|
||||
del symbols[name]
|
||||
|
||||
def main():
|
||||
|
||||
s = symbols(PYTHONLIB)
|
||||
filter_Python(s)
|
||||
exports = export_list(s)
|
||||
f = sys.stdout # open('PC/python_nt.def','w')
|
||||
f.write(DEF_TEMPLATE % (exports))
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
210
Tools/scripts/objgraph.py
Normal file
210
Tools/scripts/objgraph.py
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# objgraph
|
||||
#
|
||||
# Read "nm -o" input of a set of libraries or modules and print various
|
||||
# interesting listings, such as:
|
||||
#
|
||||
# - which names are used but not defined in the set (and used where),
|
||||
# - which names are defined in the set (and where),
|
||||
# - which modules use which other modules,
|
||||
# - which modules are used by which other modules.
|
||||
#
|
||||
# Usage: objgraph [-cdu] [file] ...
|
||||
# -c: print callers per objectfile
|
||||
# -d: print callees per objectfile
|
||||
# -u: print usage of undefined symbols
|
||||
# If none of -cdu is specified, all are assumed.
|
||||
# Use "nm -o" to generate the input
|
||||
# e.g.: nm -o /lib/libc.a | objgraph
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
import getopt
|
||||
import re
|
||||
|
||||
# Types of symbols.
|
||||
#
|
||||
definitions = 'TRGDSBAEC'
|
||||
externals = 'UV'
|
||||
ignore = 'Nntrgdsbavuc'
|
||||
|
||||
# Regular expression to parse "nm -o" output.
|
||||
#
|
||||
matcher = re.compile('(.*):\t?........ (.) (.*)$')
|
||||
|
||||
# Store "item" in "dict" under "key".
|
||||
# The dictionary maps keys to lists of items.
|
||||
# If there is no list for the key yet, it is created.
|
||||
#
|
||||
def store(dict, key, item):
|
||||
if key in dict:
|
||||
dict[key].append(item)
|
||||
else:
|
||||
dict[key] = [item]
|
||||
|
||||
# Return a flattened version of a list of strings: the concatenation
|
||||
# of its elements with intervening spaces.
|
||||
#
|
||||
def flat(list):
|
||||
s = ''
|
||||
for item in list:
|
||||
s = s + ' ' + item
|
||||
return s[1:]
|
||||
|
||||
# Global variables mapping defined/undefined names to files and back.
|
||||
#
|
||||
file2undef = {}
|
||||
def2file = {}
|
||||
file2def = {}
|
||||
undef2file = {}
|
||||
|
||||
# Read one input file and merge the data into the tables.
|
||||
# Argument is an open file.
|
||||
#
|
||||
def readinput(fp):
|
||||
while 1:
|
||||
s = fp.readline()
|
||||
if not s:
|
||||
break
|
||||
# If you get any output from this line,
|
||||
# it is probably caused by an unexpected input line:
|
||||
if matcher.search(s) < 0: s; continue # Shouldn't happen
|
||||
(ra, rb), (r1a, r1b), (r2a, r2b), (r3a, r3b) = matcher.regs[:4]
|
||||
fn, name, type = s[r1a:r1b], s[r3a:r3b], s[r2a:r2b]
|
||||
if type in definitions:
|
||||
store(def2file, name, fn)
|
||||
store(file2def, fn, name)
|
||||
elif type in externals:
|
||||
store(file2undef, fn, name)
|
||||
store(undef2file, name, fn)
|
||||
elif not type in ignore:
|
||||
print(fn + ':' + name + ': unknown type ' + type)
|
||||
|
||||
# Print all names that were undefined in some module and where they are
|
||||
# defined.
|
||||
#
|
||||
def printcallee():
|
||||
flist = sorted(file2undef.keys())
|
||||
for filename in flist:
|
||||
print(filename + ':')
|
||||
elist = file2undef[filename]
|
||||
elist.sort()
|
||||
for ext in elist:
|
||||
if len(ext) >= 8:
|
||||
tabs = '\t'
|
||||
else:
|
||||
tabs = '\t\t'
|
||||
if ext not in def2file:
|
||||
print('\t' + ext + tabs + ' *undefined')
|
||||
else:
|
||||
print('\t' + ext + tabs + flat(def2file[ext]))
|
||||
|
||||
# Print for each module the names of the other modules that use it.
|
||||
#
|
||||
def printcaller():
|
||||
files = sorted(file2def.keys())
|
||||
for filename in files:
|
||||
callers = []
|
||||
for label in file2def[filename]:
|
||||
if label in undef2file:
|
||||
callers = callers + undef2file[label]
|
||||
if callers:
|
||||
callers.sort()
|
||||
print(filename + ':')
|
||||
lastfn = ''
|
||||
for fn in callers:
|
||||
if fn != lastfn:
|
||||
print('\t' + fn)
|
||||
lastfn = fn
|
||||
else:
|
||||
print(filename + ': unused')
|
||||
|
||||
# Print undefined names and where they are used.
|
||||
#
|
||||
def printundef():
|
||||
undefs = {}
|
||||
for filename in list(file2undef.keys()):
|
||||
for ext in file2undef[filename]:
|
||||
if ext not in def2file:
|
||||
store(undefs, ext, filename)
|
||||
elist = sorted(undefs.keys())
|
||||
for ext in elist:
|
||||
print(ext + ':')
|
||||
flist = sorted(undefs[ext])
|
||||
for filename in flist:
|
||||
print('\t' + filename)
|
||||
|
||||
# Print warning messages about names defined in more than one file.
|
||||
#
|
||||
def warndups():
|
||||
savestdout = sys.stdout
|
||||
sys.stdout = sys.stderr
|
||||
names = sorted(def2file.keys())
|
||||
for name in names:
|
||||
if len(def2file[name]) > 1:
|
||||
print('warning:', name, 'multiply defined:', end=' ')
|
||||
print(flat(def2file[name]))
|
||||
sys.stdout = savestdout
|
||||
|
||||
# Main program
|
||||
#
|
||||
def main():
|
||||
try:
|
||||
optlist, args = getopt.getopt(sys.argv[1:], 'cdu')
|
||||
except getopt.error:
|
||||
sys.stdout = sys.stderr
|
||||
print('Usage:', os.path.basename(sys.argv[0]), end=' ')
|
||||
print('[-cdu] [file] ...')
|
||||
print('-c: print callers per objectfile')
|
||||
print('-d: print callees per objectfile')
|
||||
print('-u: print usage of undefined symbols')
|
||||
print('If none of -cdu is specified, all are assumed.')
|
||||
print('Use "nm -o" to generate the input')
|
||||
print('e.g.: nm -o /lib/libc.a | objgraph')
|
||||
return 1
|
||||
optu = optc = optd = 0
|
||||
for opt, void in optlist:
|
||||
if opt == '-u':
|
||||
optu = 1
|
||||
elif opt == '-c':
|
||||
optc = 1
|
||||
elif opt == '-d':
|
||||
optd = 1
|
||||
if optu == optc == optd == 0:
|
||||
optu = optc = optd = 1
|
||||
if not args:
|
||||
args = ['-']
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
readinput(sys.stdin)
|
||||
else:
|
||||
readinput(open(filename, 'r'))
|
||||
#
|
||||
warndups()
|
||||
#
|
||||
more = (optu + optc + optd > 1)
|
||||
if optd:
|
||||
if more:
|
||||
print('---------------All callees------------------')
|
||||
printcallee()
|
||||
if optu:
|
||||
if more:
|
||||
print('---------------Undefined callees------------')
|
||||
printundef()
|
||||
if optc:
|
||||
if more:
|
||||
print('---------------All Callers------------------')
|
||||
printcaller()
|
||||
return 0
|
||||
|
||||
# Call the main program.
|
||||
# Use its return value as exit status.
|
||||
# Catch interrupts to avoid stack trace.
|
||||
#
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
sys.exit(main())
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
105
Tools/scripts/parse_html5_entities.py
Normal file
105
Tools/scripts/parse_html5_entities.py
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Utility for parsing HTML5 entity definitions available from:
|
||||
|
||||
http://dev.w3.org/html5/spec/entities.json
|
||||
|
||||
Written by Ezio Melotti and Iuliia Proskurnia.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
from urllib.request import urlopen
|
||||
from html.entities import html5
|
||||
|
||||
entities_url = 'http://dev.w3.org/html5/spec/entities.json'
|
||||
|
||||
def get_json(url):
|
||||
"""Download the json file from the url and returns a decoded object."""
|
||||
with urlopen(url) as f:
|
||||
data = f.read().decode('utf-8')
|
||||
return json.loads(data)
|
||||
|
||||
def create_dict(entities):
|
||||
"""Create the html5 dict from the decoded json object."""
|
||||
new_html5 = {}
|
||||
for name, value in entities.items():
|
||||
new_html5[name.lstrip('&')] = value['characters']
|
||||
return new_html5
|
||||
|
||||
def compare_dicts(old, new):
|
||||
"""Compare the old and new dicts and print the differences."""
|
||||
added = new.keys() - old.keys()
|
||||
if added:
|
||||
print('{} entitie(s) have been added:'.format(len(added)))
|
||||
for name in sorted(added):
|
||||
print(' {!r}: {!r}'.format(name, new[name]))
|
||||
removed = old.keys() - new.keys()
|
||||
if removed:
|
||||
print('{} entitie(s) have been removed:'.format(len(removed)))
|
||||
for name in sorted(removed):
|
||||
print(' {!r}: {!r}'.format(name, old[name]))
|
||||
changed = set()
|
||||
for name in (old.keys() & new.keys()):
|
||||
if old[name] != new[name]:
|
||||
changed.add((name, old[name], new[name]))
|
||||
if changed:
|
||||
print('{} entitie(s) have been modified:'.format(len(changed)))
|
||||
for item in sorted(changed):
|
||||
print(' {!r}: {!r} -> {!r}'.format(*item))
|
||||
|
||||
def write_items(entities, file=sys.stdout):
|
||||
"""Write the items of the dictionary in the specified file."""
|
||||
# The keys in the generated dictionary should be sorted
|
||||
# in a case-insensitive way, however, when two keys are equal,
|
||||
# the uppercase version should come first so that the result
|
||||
# looks like: ['Aacute', 'aacute', 'Aacute;', 'aacute;', ...]
|
||||
# To do this we first sort in a case-sensitive way (so all the
|
||||
# uppercase chars come first) and then sort with key=str.lower.
|
||||
# Since the sorting is stable the uppercase keys will eventually
|
||||
# be before their equivalent lowercase version.
|
||||
keys = sorted(entities.keys())
|
||||
keys = sorted(keys, key=str.lower)
|
||||
print('html5 = {', file=file)
|
||||
for name in keys:
|
||||
print(' {!r}: {!a},'.format(name, entities[name]), file=file)
|
||||
print('}', file=file)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# without args print a diff between html.entities.html5 and new_html5
|
||||
# with --create print the new html5 dict
|
||||
# with --patch patch the Lib/html/entities.py file
|
||||
new_html5 = create_dict(get_json(entities_url))
|
||||
if '--create' in sys.argv:
|
||||
print('# map the HTML5 named character references to the '
|
||||
'equivalent Unicode character(s)')
|
||||
print('# Generated by {}. Do not edit manually.'.format(__file__))
|
||||
write_items(new_html5)
|
||||
elif '--patch' in sys.argv:
|
||||
fname = 'Lib/html/entities.py'
|
||||
temp_fname = fname + '.temp'
|
||||
with open(fname) as f1, open(temp_fname, 'w') as f2:
|
||||
skip = False
|
||||
for line in f1:
|
||||
if line.startswith('html5 = {'):
|
||||
write_items(new_html5, file=f2)
|
||||
skip = True
|
||||
continue
|
||||
if skip:
|
||||
# skip the old items until the }
|
||||
if line.startswith('}'):
|
||||
skip = False
|
||||
continue
|
||||
f2.write(line)
|
||||
os.remove(fname)
|
||||
os.rename(temp_fname, fname)
|
||||
else:
|
||||
if html5 == new_html5:
|
||||
print('The current dictionary is updated.')
|
||||
else:
|
||||
compare_dicts(html5, new_html5)
|
||||
print('Run "./python {0} --patch" to update Lib/html/entities.html '
|
||||
'or "./python {0} --create" to see the generated ' 'dictionary.'.format(__file__))
|
||||
62
Tools/scripts/parseentities.py
Normal file
62
Tools/scripts/parseentities.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/env python3
|
||||
""" Utility for parsing HTML entity definitions available from:
|
||||
|
||||
http://www.w3.org/ as e.g.
|
||||
http://www.w3.org/TR/REC-html40/HTMLlat1.ent
|
||||
|
||||
Input is read from stdin, output is written to stdout in form of a
|
||||
Python snippet defining a dictionary "entitydefs" mapping literal
|
||||
entity name to character or numeric entity.
|
||||
|
||||
Marc-Andre Lemburg, mal@lemburg.com, 1999.
|
||||
Use as you like. NO WARRANTIES.
|
||||
|
||||
"""
|
||||
import re,sys
|
||||
|
||||
entityRE = re.compile(r'<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
|
||||
|
||||
def parse(text,pos=0,endpos=None):
|
||||
|
||||
pos = 0
|
||||
if endpos is None:
|
||||
endpos = len(text)
|
||||
d = {}
|
||||
while 1:
|
||||
m = entityRE.search(text,pos,endpos)
|
||||
if not m:
|
||||
break
|
||||
name,charcode,comment = m.groups()
|
||||
d[name] = charcode,comment
|
||||
pos = m.end()
|
||||
return d
|
||||
|
||||
def writefile(f,defs):
|
||||
|
||||
f.write("entitydefs = {\n")
|
||||
items = sorted(defs.items())
|
||||
for name, (charcode,comment) in items:
|
||||
if charcode[:2] == '&#':
|
||||
code = int(charcode[2:-1])
|
||||
if code < 256:
|
||||
charcode = r"'\%o'" % code
|
||||
else:
|
||||
charcode = repr(charcode)
|
||||
else:
|
||||
charcode = repr(charcode)
|
||||
comment = ' '.join(comment.split())
|
||||
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
|
||||
f.write('\n}\n')
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) > 1:
|
||||
infile = open(sys.argv[1])
|
||||
else:
|
||||
infile = sys.stdin
|
||||
if len(sys.argv) > 2:
|
||||
outfile = open(sys.argv[2],'w')
|
||||
else:
|
||||
outfile = sys.stdout
|
||||
text = infile.read()
|
||||
defs = parse(text)
|
||||
writefile(outfile,defs)
|
||||
285
Tools/scripts/patchcheck.py
Normal file
285
Tools/scripts/patchcheck.py
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Check proposed changes for common issues."""
|
||||
import re
|
||||
import sys
|
||||
import shutil
|
||||
import os.path
|
||||
import subprocess
|
||||
import sysconfig
|
||||
|
||||
import reindent
|
||||
import untabify
|
||||
|
||||
|
||||
# Excluded directories which are copies of external libraries:
|
||||
# don't check their coding style
|
||||
EXCLUDE_DIRS = [os.path.join('Modules', '_ctypes', 'libffi_osx'),
|
||||
os.path.join('Modules', '_ctypes', 'libffi_msvc'),
|
||||
os.path.join('Modules', '_decimal', 'libmpdec'),
|
||||
os.path.join('Modules', 'expat'),
|
||||
os.path.join('Modules', 'zlib')]
|
||||
SRCDIR = sysconfig.get_config_var('srcdir')
|
||||
|
||||
|
||||
def n_files_str(count):
|
||||
"""Return 'N file(s)' with the proper plurality on 'file'."""
|
||||
return "{} file{}".format(count, "s" if count != 1 else "")
|
||||
|
||||
|
||||
def status(message, modal=False, info=None):
|
||||
"""Decorator to output status info to stdout."""
|
||||
def decorated_fxn(fxn):
|
||||
def call_fxn(*args, **kwargs):
|
||||
sys.stdout.write(message + ' ... ')
|
||||
sys.stdout.flush()
|
||||
result = fxn(*args, **kwargs)
|
||||
if not modal and not info:
|
||||
print("done")
|
||||
elif info:
|
||||
print(info(result))
|
||||
else:
|
||||
print("yes" if result else "NO")
|
||||
return result
|
||||
return call_fxn
|
||||
return decorated_fxn
|
||||
|
||||
|
||||
def get_git_branch():
|
||||
"""Get the symbolic name for the current git branch"""
|
||||
cmd = "git rev-parse --abbrev-ref HEAD".split()
|
||||
try:
|
||||
return subprocess.check_output(cmd,
|
||||
stderr=subprocess.DEVNULL,
|
||||
cwd=SRCDIR)
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
|
||||
def get_git_upstream_remote():
|
||||
"""Get the remote name to use for upstream branches
|
||||
|
||||
Uses "upstream" if it exists, "origin" otherwise
|
||||
"""
|
||||
cmd = "git remote get-url upstream".split()
|
||||
try:
|
||||
subprocess.check_output(cmd,
|
||||
stderr=subprocess.DEVNULL,
|
||||
cwd=SRCDIR)
|
||||
except subprocess.CalledProcessError:
|
||||
return "origin"
|
||||
return "upstream"
|
||||
|
||||
|
||||
@status("Getting base branch for PR",
|
||||
info=lambda x: x if x is not None else "not a PR branch")
|
||||
def get_base_branch():
|
||||
if not os.path.exists(os.path.join(SRCDIR, '.git')):
|
||||
# Not a git checkout, so there's no base branch
|
||||
return None
|
||||
version = sys.version_info
|
||||
if version.releaselevel == 'alpha':
|
||||
base_branch = "master"
|
||||
else:
|
||||
base_branch = "{0.major}.{0.minor}".format(version)
|
||||
this_branch = get_git_branch()
|
||||
if this_branch is None or this_branch == base_branch:
|
||||
# Not on a git PR branch, so there's no base branch
|
||||
return None
|
||||
upstream_remote = get_git_upstream_remote()
|
||||
return upstream_remote + "/" + base_branch
|
||||
|
||||
|
||||
@status("Getting the list of files that have been added/changed",
|
||||
info=lambda x: n_files_str(len(x)))
|
||||
def changed_files(base_branch=None):
|
||||
"""Get the list of changed or added files from git."""
|
||||
if os.path.exists(os.path.join(SRCDIR, '.git')):
|
||||
# We just use an existence check here as:
|
||||
# directory = normal git checkout/clone
|
||||
# file = git worktree directory
|
||||
if base_branch:
|
||||
cmd = 'git diff --name-status ' + base_branch
|
||||
else:
|
||||
cmd = 'git status --porcelain'
|
||||
filenames = []
|
||||
with subprocess.Popen(cmd.split(),
|
||||
stdout=subprocess.PIPE,
|
||||
cwd=SRCDIR) as st:
|
||||
for line in st.stdout:
|
||||
line = line.decode().rstrip()
|
||||
status_text, filename = line.split(maxsplit=1)
|
||||
status = set(status_text)
|
||||
# modified, added or unmerged files
|
||||
if not status.intersection('MAU'):
|
||||
continue
|
||||
if ' -> ' in filename:
|
||||
# file is renamed
|
||||
filename = filename.split(' -> ', 2)[1].strip()
|
||||
filenames.append(filename)
|
||||
else:
|
||||
sys.exit('need a git checkout to get modified files')
|
||||
|
||||
filenames2 = []
|
||||
for filename in filenames:
|
||||
# Normalize the path to be able to match using .startswith()
|
||||
filename = os.path.normpath(filename)
|
||||
if any(filename.startswith(path) for path in EXCLUDE_DIRS):
|
||||
# Exclude the file
|
||||
continue
|
||||
filenames2.append(filename)
|
||||
|
||||
return filenames2
|
||||
|
||||
|
||||
def report_modified_files(file_paths):
|
||||
count = len(file_paths)
|
||||
if count == 0:
|
||||
return n_files_str(count)
|
||||
else:
|
||||
lines = ["{}:".format(n_files_str(count))]
|
||||
for path in file_paths:
|
||||
lines.append(" {}".format(path))
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
@status("Fixing Python file whitespace", info=report_modified_files)
|
||||
def normalize_whitespace(file_paths):
|
||||
"""Make sure that the whitespace for .py files have been normalized."""
|
||||
reindent.makebackup = False # No need to create backups.
|
||||
fixed = [path for path in file_paths if path.endswith('.py') and
|
||||
reindent.check(os.path.join(SRCDIR, path))]
|
||||
return fixed
|
||||
|
||||
|
||||
@status("Fixing C file whitespace", info=report_modified_files)
|
||||
def normalize_c_whitespace(file_paths):
|
||||
"""Report if any C files """
|
||||
fixed = []
|
||||
for path in file_paths:
|
||||
abspath = os.path.join(SRCDIR, path)
|
||||
with open(abspath, 'r') as f:
|
||||
if '\t' not in f.read():
|
||||
continue
|
||||
untabify.process(abspath, 8, verbose=False)
|
||||
fixed.append(path)
|
||||
return fixed
|
||||
|
||||
|
||||
ws_re = re.compile(br'\s+(\r?\n)$')
|
||||
|
||||
@status("Fixing docs whitespace", info=report_modified_files)
|
||||
def normalize_docs_whitespace(file_paths):
|
||||
fixed = []
|
||||
for path in file_paths:
|
||||
abspath = os.path.join(SRCDIR, path)
|
||||
try:
|
||||
with open(abspath, 'rb') as f:
|
||||
lines = f.readlines()
|
||||
new_lines = [ws_re.sub(br'\1', line) for line in lines]
|
||||
if new_lines != lines:
|
||||
shutil.copyfile(abspath, abspath + '.bak')
|
||||
with open(abspath, 'wb') as f:
|
||||
f.writelines(new_lines)
|
||||
fixed.append(path)
|
||||
except Exception as err:
|
||||
print('Cannot fix %s: %s' % (path, err))
|
||||
return fixed
|
||||
|
||||
|
||||
@status("Docs modified", modal=True)
|
||||
def docs_modified(file_paths):
|
||||
"""Report if any file in the Doc directory has been changed."""
|
||||
return bool(file_paths)
|
||||
|
||||
|
||||
@status("Misc/ACKS updated", modal=True)
|
||||
def credit_given(file_paths):
|
||||
"""Check if Misc/ACKS has been changed."""
|
||||
return os.path.join('Misc', 'ACKS') in file_paths
|
||||
|
||||
|
||||
@status("Misc/NEWS.d updated with `blurb`", modal=True)
|
||||
def reported_news(file_paths):
|
||||
"""Check if Misc/NEWS.d has been changed."""
|
||||
return any(p.startswith(os.path.join('Misc', 'NEWS.d', 'next'))
|
||||
for p in file_paths)
|
||||
|
||||
@status("configure regenerated", modal=True, info=str)
|
||||
def regenerated_configure(file_paths):
|
||||
"""Check if configure has been regenerated."""
|
||||
if 'configure.ac' in file_paths:
|
||||
return "yes" if 'configure' in file_paths else "no"
|
||||
else:
|
||||
return "not needed"
|
||||
|
||||
@status("pyconfig.h.in regenerated", modal=True, info=str)
|
||||
def regenerated_pyconfig_h_in(file_paths):
|
||||
"""Check if pyconfig.h.in has been regenerated."""
|
||||
if 'configure.ac' in file_paths:
|
||||
return "yes" if 'pyconfig.h.in' in file_paths else "no"
|
||||
else:
|
||||
return "not needed"
|
||||
|
||||
def travis(pull_request):
|
||||
if pull_request == 'false':
|
||||
print('Not a pull request; skipping')
|
||||
return
|
||||
base_branch = get_base_branch()
|
||||
file_paths = changed_files(base_branch)
|
||||
python_files = [fn for fn in file_paths if fn.endswith('.py')]
|
||||
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
|
||||
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
|
||||
fn.endswith(('.rst', '.inc'))]
|
||||
fixed = []
|
||||
fixed.extend(normalize_whitespace(python_files))
|
||||
fixed.extend(normalize_c_whitespace(c_files))
|
||||
fixed.extend(normalize_docs_whitespace(doc_files))
|
||||
if not fixed:
|
||||
print('No whitespace issues found')
|
||||
else:
|
||||
print(f'Please fix the {len(fixed)} file(s) with whitespace issues')
|
||||
print('(on UNIX you can run `make patchcheck` to make the fixes)')
|
||||
sys.exit(1)
|
||||
|
||||
def main():
|
||||
base_branch = get_base_branch()
|
||||
file_paths = changed_files(base_branch)
|
||||
python_files = [fn for fn in file_paths if fn.endswith('.py')]
|
||||
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
|
||||
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
|
||||
fn.endswith(('.rst', '.inc'))]
|
||||
misc_files = {p for p in file_paths if p.startswith('Misc')}
|
||||
# PEP 8 whitespace rules enforcement.
|
||||
normalize_whitespace(python_files)
|
||||
# C rules enforcement.
|
||||
normalize_c_whitespace(c_files)
|
||||
# Doc whitespace enforcement.
|
||||
normalize_docs_whitespace(doc_files)
|
||||
# Docs updated.
|
||||
docs_modified(doc_files)
|
||||
# Misc/ACKS changed.
|
||||
credit_given(misc_files)
|
||||
# Misc/NEWS changed.
|
||||
reported_news(misc_files)
|
||||
# Regenerated configure, if necessary.
|
||||
regenerated_configure(file_paths)
|
||||
# Regenerated pyconfig.h.in, if necessary.
|
||||
regenerated_pyconfig_h_in(file_paths)
|
||||
|
||||
# Test suite run and passed.
|
||||
if python_files or c_files:
|
||||
end = " and check for refleaks?" if c_files else "?"
|
||||
print()
|
||||
print("Did you run the test suite" + end)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument('--travis',
|
||||
help='Perform pass/fail checks')
|
||||
args = parser.parse_args()
|
||||
if args.travis:
|
||||
travis(args.travis)
|
||||
else:
|
||||
main()
|
||||
177
Tools/scripts/pathfix.py
Normal file
177
Tools/scripts/pathfix.py
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Change the #! line occurring in Python scripts. The new interpreter
|
||||
# pathname must be given with a -i option.
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a python module.
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments).
|
||||
# The original file is kept as a back-up (with a "~" attached to its name),
|
||||
# -n flag can be used to disable this.
|
||||
#
|
||||
# Undoubtedly you can do this using find and sed or perl, but this is
|
||||
# a nice example of Python code that recurses down a directory tree
|
||||
# and uses regular expressions. Also note several subtleties like
|
||||
# preserving the file's mode and avoiding to even write a temp file
|
||||
# when no changes are needed for a file.
|
||||
#
|
||||
# NB: by changing only the function fixfile() you can turn this
|
||||
# into a program for a different change to Python programs...
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
import getopt
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
new_interpreter = None
|
||||
preserve_timestamps = False
|
||||
create_backup = True
|
||||
|
||||
|
||||
def main():
|
||||
global new_interpreter
|
||||
global preserve_timestamps
|
||||
global create_backup
|
||||
usage = ('usage: %s -i /interpreter -p -n file-or-directory ...\n' %
|
||||
sys.argv[0])
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'i:pn')
|
||||
except getopt.error as msg:
|
||||
err(str(msg) + '\n')
|
||||
err(usage)
|
||||
sys.exit(2)
|
||||
for o, a in opts:
|
||||
if o == '-i':
|
||||
new_interpreter = a.encode()
|
||||
if o == '-p':
|
||||
preserve_timestamps = True
|
||||
if o == '-n':
|
||||
create_backup = False
|
||||
if not new_interpreter or not new_interpreter.startswith(b'/') or \
|
||||
not args:
|
||||
err('-i option or file-or-directory missing\n')
|
||||
err(usage)
|
||||
sys.exit(2)
|
||||
bad = 0
|
||||
for arg in args:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
ispythonprog = re.compile(r'^[a-zA-Z0-9_]+\.py$')
|
||||
def ispython(name):
|
||||
return bool(ispythonprog.match(name))
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except OSError as msg:
|
||||
err('%s: cannot list directory: %r\n' % (dirname, msg))
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif ispython(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
try:
|
||||
f = open(filename, 'rb')
|
||||
except IOError as msg:
|
||||
err('%s: cannot open: %r\n' % (filename, msg))
|
||||
return 1
|
||||
line = f.readline()
|
||||
fixed = fixline(line)
|
||||
if line == fixed:
|
||||
rep(filename+': no change\n')
|
||||
f.close()
|
||||
return
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
try:
|
||||
g = open(tempname, 'wb')
|
||||
except IOError as msg:
|
||||
f.close()
|
||||
err('%s: cannot create: %r\n' % (tempname, msg))
|
||||
return 1
|
||||
rep(filename + ': updating\n')
|
||||
g.write(fixed)
|
||||
BUFSIZE = 8*1024
|
||||
while 1:
|
||||
buf = f.read(BUFSIZE)
|
||||
if not buf: break
|
||||
g.write(buf)
|
||||
g.close()
|
||||
f.close()
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
mtime = None
|
||||
atime = None
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
mtime = statbuf.st_mtime
|
||||
atime = statbuf.st_atime
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 0o7777)
|
||||
except OSError as msg:
|
||||
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
|
||||
# Then make a backup of the original file as filename~
|
||||
if create_backup:
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except OSError as msg:
|
||||
err('%s: warning: backup failed (%r)\n' % (filename, msg))
|
||||
else:
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError as msg:
|
||||
err('%s: warning: removing failed (%r)\n' % (filename, msg))
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except OSError as msg:
|
||||
err('%s: rename failed (%r)\n' % (filename, msg))
|
||||
return 1
|
||||
if preserve_timestamps:
|
||||
if atime and mtime:
|
||||
try:
|
||||
os.utime(filename, (atime, mtime))
|
||||
except OSError as msg:
|
||||
err('%s: reset of timestamp failed (%r)\n' % (filename, msg))
|
||||
return 1
|
||||
# Return success
|
||||
return 0
|
||||
|
||||
def fixline(line):
|
||||
if not line.startswith(b'#!'):
|
||||
return line
|
||||
if b"python" not in line:
|
||||
return line
|
||||
return b'#! ' + new_interpreter + b'\n'
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
165
Tools/scripts/pdeps.py
Normal file
165
Tools/scripts/pdeps.py
Normal file
|
|
@ -0,0 +1,165 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# pdeps
|
||||
#
|
||||
# Find dependencies between a bunch of Python modules.
|
||||
#
|
||||
# Usage:
|
||||
# pdeps file1.py file2.py ...
|
||||
#
|
||||
# Output:
|
||||
# Four tables separated by lines like '--- Closure ---':
|
||||
# 1) Direct dependencies, listing which module imports which other modules
|
||||
# 2) The inverse of (1)
|
||||
# 3) Indirect dependencies, or the closure of the above
|
||||
# 4) The inverse of (3)
|
||||
#
|
||||
# To do:
|
||||
# - command line options to select output type
|
||||
# - option to automatically scan the Python library for referenced modules
|
||||
# - option to limit output to particular modules
|
||||
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
# Main program
|
||||
#
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
if not args:
|
||||
print('usage: pdeps file.py file.py ...')
|
||||
return 2
|
||||
#
|
||||
table = {}
|
||||
for arg in args:
|
||||
process(arg, table)
|
||||
#
|
||||
print('--- Uses ---')
|
||||
printresults(table)
|
||||
#
|
||||
print('--- Used By ---')
|
||||
inv = inverse(table)
|
||||
printresults(inv)
|
||||
#
|
||||
print('--- Closure of Uses ---')
|
||||
reach = closure(table)
|
||||
printresults(reach)
|
||||
#
|
||||
print('--- Closure of Used By ---')
|
||||
invreach = inverse(reach)
|
||||
printresults(invreach)
|
||||
#
|
||||
return 0
|
||||
|
||||
|
||||
# Compiled regular expressions to search for import statements
|
||||
#
|
||||
m_import = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
|
||||
m_from = re.compile('^[ \t]*import[ \t]+([^#]+)')
|
||||
|
||||
|
||||
# Collect data from one file
|
||||
#
|
||||
def process(filename, table):
|
||||
fp = open(filename, 'r')
|
||||
mod = os.path.basename(filename)
|
||||
if mod[-3:] == '.py':
|
||||
mod = mod[:-3]
|
||||
table[mod] = list = []
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line: break
|
||||
while line[-1:] == '\\':
|
||||
nextline = fp.readline()
|
||||
if not nextline: break
|
||||
line = line[:-1] + nextline
|
||||
m_found = m_import.match(line) or m_from.match(line)
|
||||
if m_found:
|
||||
(a, b), (a1, b1) = m_found.regs[:2]
|
||||
else: continue
|
||||
words = line[a1:b1].split(',')
|
||||
# print '#', line, words
|
||||
for word in words:
|
||||
word = word.strip()
|
||||
if word not in list:
|
||||
list.append(word)
|
||||
fp.close()
|
||||
|
||||
|
||||
# Compute closure (this is in fact totally general)
|
||||
#
|
||||
def closure(table):
|
||||
modules = list(table.keys())
|
||||
#
|
||||
# Initialize reach with a copy of table
|
||||
#
|
||||
reach = {}
|
||||
for mod in modules:
|
||||
reach[mod] = table[mod][:]
|
||||
#
|
||||
# Iterate until no more change
|
||||
#
|
||||
change = 1
|
||||
while change:
|
||||
change = 0
|
||||
for mod in modules:
|
||||
for mo in reach[mod]:
|
||||
if mo in modules:
|
||||
for m in reach[mo]:
|
||||
if m not in reach[mod]:
|
||||
reach[mod].append(m)
|
||||
change = 1
|
||||
#
|
||||
return reach
|
||||
|
||||
|
||||
# Invert a table (this is again totally general).
|
||||
# All keys of the original table are made keys of the inverse,
|
||||
# so there may be empty lists in the inverse.
|
||||
#
|
||||
def inverse(table):
|
||||
inv = {}
|
||||
for key in table.keys():
|
||||
if key not in inv:
|
||||
inv[key] = []
|
||||
for item in table[key]:
|
||||
store(inv, item, key)
|
||||
return inv
|
||||
|
||||
|
||||
# Store "item" in "dict" under "key".
|
||||
# The dictionary maps keys to lists of items.
|
||||
# If there is no list for the key yet, it is created.
|
||||
#
|
||||
def store(dict, key, item):
|
||||
if key in dict:
|
||||
dict[key].append(item)
|
||||
else:
|
||||
dict[key] = [item]
|
||||
|
||||
|
||||
# Tabulate results neatly
|
||||
#
|
||||
def printresults(table):
|
||||
modules = sorted(table.keys())
|
||||
maxlen = 0
|
||||
for mod in modules: maxlen = max(maxlen, len(mod))
|
||||
for mod in modules:
|
||||
list = sorted(table[mod])
|
||||
print(mod.ljust(maxlen), ':', end=' ')
|
||||
if mod in list:
|
||||
print('(*)', end=' ')
|
||||
for ref in list:
|
||||
print(ref, end=' ')
|
||||
print()
|
||||
|
||||
|
||||
# Call main and honor exit status
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
sys.exit(main())
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
147
Tools/scripts/pickle2db.py
Normal file
147
Tools/scripts/pickle2db.py
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile
|
||||
|
||||
Read the given picklefile as a series of key/value pairs and write to a new
|
||||
database. If the database already exists, any contents are deleted. The
|
||||
optional flags indicate the type of the output database:
|
||||
|
||||
-a - open using dbm (open any supported format)
|
||||
-b - open as bsddb btree file
|
||||
-d - open as dbm.ndbm file
|
||||
-g - open as dbm.gnu file
|
||||
-h - open as bsddb hash file
|
||||
-r - open as bsddb recno file
|
||||
|
||||
The default is hash. If a pickle file is named it is opened for read
|
||||
access. If no pickle file is named, the pickle input is read from standard
|
||||
input.
|
||||
|
||||
Note that recno databases can only contain integer keys, so you can't dump a
|
||||
hash or btree database using db2pickle.py and reconstitute it to a recno
|
||||
database with %(prog)s unless your keys are integers.
|
||||
|
||||
"""
|
||||
|
||||
import getopt
|
||||
try:
|
||||
import bsddb
|
||||
except ImportError:
|
||||
bsddb = None
|
||||
try:
|
||||
import dbm.ndbm as dbm
|
||||
except ImportError:
|
||||
dbm = None
|
||||
try:
|
||||
import dbm.gnu as gdbm
|
||||
except ImportError:
|
||||
gdbm = None
|
||||
try:
|
||||
import dbm.ndbm as anydbm
|
||||
except ImportError:
|
||||
anydbm = None
|
||||
import sys
|
||||
try:
|
||||
import pickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
prog = sys.argv[0]
|
||||
|
||||
def usage():
|
||||
sys.stderr.write(__doc__ % globals())
|
||||
|
||||
def main(args):
|
||||
try:
|
||||
opts, args = getopt.getopt(args, "hbrdag",
|
||||
["hash", "btree", "recno", "dbm", "anydbm",
|
||||
"gdbm"])
|
||||
except getopt.error:
|
||||
usage()
|
||||
return 1
|
||||
|
||||
if len(args) == 0 or len(args) > 2:
|
||||
usage()
|
||||
return 1
|
||||
elif len(args) == 1:
|
||||
pfile = sys.stdin
|
||||
dbfile = args[0]
|
||||
else:
|
||||
try:
|
||||
pfile = open(args[0], 'rb')
|
||||
except IOError:
|
||||
sys.stderr.write("Unable to open %s\n" % args[0])
|
||||
return 1
|
||||
dbfile = args[1]
|
||||
|
||||
dbopen = None
|
||||
for opt, arg in opts:
|
||||
if opt in ("-h", "--hash"):
|
||||
try:
|
||||
dbopen = bsddb.hashopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-b", "--btree"):
|
||||
try:
|
||||
dbopen = bsddb.btopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-r", "--recno"):
|
||||
try:
|
||||
dbopen = bsddb.rnopen
|
||||
except AttributeError:
|
||||
sys.stderr.write("bsddb module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-a", "--anydbm"):
|
||||
try:
|
||||
dbopen = anydbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-g", "--gdbm"):
|
||||
try:
|
||||
dbopen = gdbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm.gnu module unavailable.\n")
|
||||
return 1
|
||||
elif opt in ("-d", "--dbm"):
|
||||
try:
|
||||
dbopen = dbm.open
|
||||
except AttributeError:
|
||||
sys.stderr.write("dbm.ndbm module unavailable.\n")
|
||||
return 1
|
||||
if dbopen is None:
|
||||
if bsddb is None:
|
||||
sys.stderr.write("bsddb module unavailable - ")
|
||||
sys.stderr.write("must specify dbtype.\n")
|
||||
return 1
|
||||
else:
|
||||
dbopen = bsddb.hashopen
|
||||
|
||||
try:
|
||||
db = dbopen(dbfile, 'c')
|
||||
except bsddb.error:
|
||||
sys.stderr.write("Unable to open %s. " % dbfile)
|
||||
sys.stderr.write("Check for format or version mismatch.\n")
|
||||
return 1
|
||||
else:
|
||||
for k in list(db.keys()):
|
||||
del db[k]
|
||||
|
||||
while 1:
|
||||
try:
|
||||
(key, val) = pickle.load(pfile)
|
||||
except EOFError:
|
||||
break
|
||||
db[key] = val
|
||||
|
||||
db.close()
|
||||
pfile.close()
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
506
Tools/scripts/pindent.py
Normal file
506
Tools/scripts/pindent.py
Normal file
|
|
@ -0,0 +1,506 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# This file contains a class and a main program that perform three
|
||||
# related (though complimentary) formatting operations on Python
|
||||
# programs. When called as "pindent -c", it takes a valid Python
|
||||
# program as input and outputs a version augmented with block-closing
|
||||
# comments. When called as "pindent -d", it assumes its input is a
|
||||
# Python program with block-closing comments and outputs a commentless
|
||||
# version. When called as "pindent -r" it assumes its input is a
|
||||
# Python program with block-closing comments but with its indentation
|
||||
# messed up, and outputs a properly indented version.
|
||||
|
||||
# A "block-closing comment" is a comment of the form '# end <keyword>'
|
||||
# where <keyword> is the keyword that opened the block. If the
|
||||
# opening keyword is 'def' or 'class', the function or class name may
|
||||
# be repeated in the block-closing comment as well. Here is an
|
||||
# example of a program fully augmented with block-closing comments:
|
||||
|
||||
# def foobar(a, b):
|
||||
# if a == b:
|
||||
# a = a+1
|
||||
# elif a < b:
|
||||
# b = b-1
|
||||
# if b > a: a = a-1
|
||||
# # end if
|
||||
# else:
|
||||
# print 'oops!'
|
||||
# # end if
|
||||
# # end def foobar
|
||||
|
||||
# Note that only the last part of an if...elif...else... block needs a
|
||||
# block-closing comment; the same is true for other compound
|
||||
# statements (e.g. try...except). Also note that "short-form" blocks
|
||||
# like the second 'if' in the example must be closed as well;
|
||||
# otherwise the 'else' in the example would be ambiguous (remember
|
||||
# that indentation is not significant when interpreting block-closing
|
||||
# comments).
|
||||
|
||||
# The operations are idempotent (i.e. applied to their own output
|
||||
# they yield an identical result). Running first "pindent -c" and
|
||||
# then "pindent -r" on a valid Python program produces a program that
|
||||
# is semantically identical to the input (though its indentation may
|
||||
# be different). Running "pindent -e" on that output produces a
|
||||
# program that only differs from the original in indentation.
|
||||
|
||||
# Other options:
|
||||
# -s stepsize: set the indentation step size (default 8)
|
||||
# -t tabsize : set the number of spaces a tab character is worth (default 8)
|
||||
# -e : expand TABs into spaces
|
||||
# file ... : input file(s) (default standard input)
|
||||
# The results always go to standard output
|
||||
|
||||
# Caveats:
|
||||
# - comments ending in a backslash will be mistaken for continued lines
|
||||
# - continuations using backslash are always left unchanged
|
||||
# - continuations inside parentheses are not extra indented by -r
|
||||
# but must be indented for -c to work correctly (this breaks
|
||||
# idempotency!)
|
||||
# - continued lines inside triple-quoted strings are totally garbled
|
||||
|
||||
# Secret feature:
|
||||
# - On input, a block may also be closed with an "end statement" --
|
||||
# this is a block-closing comment without the '#' sign.
|
||||
|
||||
# Possible improvements:
|
||||
# - check syntax based on transitions in 'next' table
|
||||
# - better error reporting
|
||||
# - better error recovery
|
||||
# - check identifier after class/def
|
||||
|
||||
# The following wishes need a more complete tokenization of the source:
|
||||
# - Don't get fooled by comments ending in backslash
|
||||
# - reindent continuation lines indicated by backslash
|
||||
# - handle continuation lines inside parentheses/braces/brackets
|
||||
# - handle triple quoted strings spanning lines
|
||||
# - realign comments
|
||||
# - optionally do much more thorough reformatting, a la C indent
|
||||
|
||||
# Defaults
|
||||
STEPSIZE = 8
|
||||
TABSIZE = 8
|
||||
EXPANDTABS = False
|
||||
|
||||
import io
|
||||
import re
|
||||
import sys
|
||||
|
||||
next = {}
|
||||
next['if'] = next['elif'] = 'elif', 'else', 'end'
|
||||
next['while'] = next['for'] = 'else', 'end'
|
||||
next['try'] = 'except', 'finally'
|
||||
next['except'] = 'except', 'else', 'finally', 'end'
|
||||
next['else'] = next['finally'] = next['with'] = \
|
||||
next['def'] = next['class'] = 'end'
|
||||
next['end'] = ()
|
||||
start = 'if', 'while', 'for', 'try', 'with', 'def', 'class'
|
||||
|
||||
class PythonIndenter:
|
||||
|
||||
def __init__(self, fpi = sys.stdin, fpo = sys.stdout,
|
||||
indentsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
self.fpi = fpi
|
||||
self.fpo = fpo
|
||||
self.indentsize = indentsize
|
||||
self.tabsize = tabsize
|
||||
self.lineno = 0
|
||||
self.expandtabs = expandtabs
|
||||
self._write = fpo.write
|
||||
self.kwprog = re.compile(
|
||||
r'^(?:\s|\\\n)*(?P<kw>[a-z]+)'
|
||||
r'((?:\s|\\\n)+(?P<id>[a-zA-Z_]\w*))?'
|
||||
r'[^\w]')
|
||||
self.endprog = re.compile(
|
||||
r'^(?:\s|\\\n)*#?\s*end\s+(?P<kw>[a-z]+)'
|
||||
r'(\s+(?P<id>[a-zA-Z_]\w*))?'
|
||||
r'[^\w]')
|
||||
self.wsprog = re.compile(r'^[ \t]*')
|
||||
# end def __init__
|
||||
|
||||
def write(self, line):
|
||||
if self.expandtabs:
|
||||
self._write(line.expandtabs(self.tabsize))
|
||||
else:
|
||||
self._write(line)
|
||||
# end if
|
||||
# end def write
|
||||
|
||||
def readline(self):
|
||||
line = self.fpi.readline()
|
||||
if line: self.lineno += 1
|
||||
# end if
|
||||
return line
|
||||
# end def readline
|
||||
|
||||
def error(self, fmt, *args):
|
||||
if args: fmt = fmt % args
|
||||
# end if
|
||||
sys.stderr.write('Error at line %d: %s\n' % (self.lineno, fmt))
|
||||
self.write('### %s ###\n' % fmt)
|
||||
# end def error
|
||||
|
||||
def getline(self):
|
||||
line = self.readline()
|
||||
while line[-2:] == '\\\n':
|
||||
line2 = self.readline()
|
||||
if not line2: break
|
||||
# end if
|
||||
line += line2
|
||||
# end while
|
||||
return line
|
||||
# end def getline
|
||||
|
||||
def putline(self, line, indent):
|
||||
tabs, spaces = divmod(indent*self.indentsize, self.tabsize)
|
||||
i = self.wsprog.match(line).end()
|
||||
line = line[i:]
|
||||
if line[:1] not in ('\n', '\r', ''):
|
||||
line = '\t'*tabs + ' '*spaces + line
|
||||
# end if
|
||||
self.write(line)
|
||||
# end def putline
|
||||
|
||||
def reformat(self):
|
||||
stack = []
|
||||
while True:
|
||||
line = self.getline()
|
||||
if not line: break # EOF
|
||||
# end if
|
||||
m = self.endprog.match(line)
|
||||
if m:
|
||||
kw = 'end'
|
||||
kw2 = m.group('kw')
|
||||
if not stack:
|
||||
self.error('unexpected end')
|
||||
elif stack.pop()[0] != kw2:
|
||||
self.error('unmatched end')
|
||||
# end if
|
||||
self.putline(line, len(stack))
|
||||
continue
|
||||
# end if
|
||||
m = self.kwprog.match(line)
|
||||
if m:
|
||||
kw = m.group('kw')
|
||||
if kw in start:
|
||||
self.putline(line, len(stack))
|
||||
stack.append((kw, kw))
|
||||
continue
|
||||
# end if
|
||||
if kw in next and stack:
|
||||
self.putline(line, len(stack)-1)
|
||||
kwa, kwb = stack[-1]
|
||||
stack[-1] = kwa, kw
|
||||
continue
|
||||
# end if
|
||||
# end if
|
||||
self.putline(line, len(stack))
|
||||
# end while
|
||||
if stack:
|
||||
self.error('unterminated keywords')
|
||||
for kwa, kwb in stack:
|
||||
self.write('\t%s\n' % kwa)
|
||||
# end for
|
||||
# end if
|
||||
# end def reformat
|
||||
|
||||
def delete(self):
|
||||
begin_counter = 0
|
||||
end_counter = 0
|
||||
while True:
|
||||
line = self.getline()
|
||||
if not line: break # EOF
|
||||
# end if
|
||||
m = self.endprog.match(line)
|
||||
if m:
|
||||
end_counter += 1
|
||||
continue
|
||||
# end if
|
||||
m = self.kwprog.match(line)
|
||||
if m:
|
||||
kw = m.group('kw')
|
||||
if kw in start:
|
||||
begin_counter += 1
|
||||
# end if
|
||||
# end if
|
||||
self.write(line)
|
||||
# end while
|
||||
if begin_counter - end_counter < 0:
|
||||
sys.stderr.write('Warning: input contained more end tags than expected\n')
|
||||
elif begin_counter - end_counter > 0:
|
||||
sys.stderr.write('Warning: input contained less end tags than expected\n')
|
||||
# end if
|
||||
# end def delete
|
||||
|
||||
def complete(self):
|
||||
stack = []
|
||||
todo = []
|
||||
currentws = thisid = firstkw = lastkw = topid = ''
|
||||
while True:
|
||||
line = self.getline()
|
||||
i = self.wsprog.match(line).end()
|
||||
m = self.endprog.match(line)
|
||||
if m:
|
||||
thiskw = 'end'
|
||||
endkw = m.group('kw')
|
||||
thisid = m.group('id')
|
||||
else:
|
||||
m = self.kwprog.match(line)
|
||||
if m:
|
||||
thiskw = m.group('kw')
|
||||
if thiskw not in next:
|
||||
thiskw = ''
|
||||
# end if
|
||||
if thiskw in ('def', 'class'):
|
||||
thisid = m.group('id')
|
||||
else:
|
||||
thisid = ''
|
||||
# end if
|
||||
elif line[i:i+1] in ('\n', '#'):
|
||||
todo.append(line)
|
||||
continue
|
||||
else:
|
||||
thiskw = ''
|
||||
# end if
|
||||
# end if
|
||||
indentws = line[:i]
|
||||
indent = len(indentws.expandtabs(self.tabsize))
|
||||
current = len(currentws.expandtabs(self.tabsize))
|
||||
while indent < current:
|
||||
if firstkw:
|
||||
if topid:
|
||||
s = '# end %s %s\n' % (
|
||||
firstkw, topid)
|
||||
else:
|
||||
s = '# end %s\n' % firstkw
|
||||
# end if
|
||||
self.write(currentws + s)
|
||||
firstkw = lastkw = ''
|
||||
# end if
|
||||
currentws, firstkw, lastkw, topid = stack.pop()
|
||||
current = len(currentws.expandtabs(self.tabsize))
|
||||
# end while
|
||||
if indent == current and firstkw:
|
||||
if thiskw == 'end':
|
||||
if endkw != firstkw:
|
||||
self.error('mismatched end')
|
||||
# end if
|
||||
firstkw = lastkw = ''
|
||||
elif not thiskw or thiskw in start:
|
||||
if topid:
|
||||
s = '# end %s %s\n' % (
|
||||
firstkw, topid)
|
||||
else:
|
||||
s = '# end %s\n' % firstkw
|
||||
# end if
|
||||
self.write(currentws + s)
|
||||
firstkw = lastkw = topid = ''
|
||||
# end if
|
||||
# end if
|
||||
if indent > current:
|
||||
stack.append((currentws, firstkw, lastkw, topid))
|
||||
if thiskw and thiskw not in start:
|
||||
# error
|
||||
thiskw = ''
|
||||
# end if
|
||||
currentws, firstkw, lastkw, topid = \
|
||||
indentws, thiskw, thiskw, thisid
|
||||
# end if
|
||||
if thiskw:
|
||||
if thiskw in start:
|
||||
firstkw = lastkw = thiskw
|
||||
topid = thisid
|
||||
else:
|
||||
lastkw = thiskw
|
||||
# end if
|
||||
# end if
|
||||
for l in todo: self.write(l)
|
||||
# end for
|
||||
todo = []
|
||||
if not line: break
|
||||
# end if
|
||||
self.write(line)
|
||||
# end while
|
||||
# end def complete
|
||||
# end class PythonIndenter
|
||||
|
||||
# Simplified user interface
|
||||
# - xxx_filter(input, output): read and write file objects
|
||||
# - xxx_string(s): take and return string object
|
||||
# - xxx_file(filename): process file in place, return true iff changed
|
||||
|
||||
def complete_filter(input = sys.stdin, output = sys.stdout,
|
||||
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.complete()
|
||||
# end def complete_filter
|
||||
|
||||
def delete_filter(input= sys.stdin, output = sys.stdout,
|
||||
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.delete()
|
||||
# end def delete_filter
|
||||
|
||||
def reformat_filter(input = sys.stdin, output = sys.stdout,
|
||||
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.reformat()
|
||||
# end def reformat_filter
|
||||
|
||||
def complete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
input = io.StringIO(source)
|
||||
output = io.StringIO()
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.complete()
|
||||
return output.getvalue()
|
||||
# end def complete_string
|
||||
|
||||
def delete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
input = io.StringIO(source)
|
||||
output = io.StringIO()
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.delete()
|
||||
return output.getvalue()
|
||||
# end def delete_string
|
||||
|
||||
def reformat_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
input = io.StringIO(source)
|
||||
output = io.StringIO()
|
||||
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
|
||||
pi.reformat()
|
||||
return output.getvalue()
|
||||
# end def reformat_string
|
||||
|
||||
def make_backup(filename):
|
||||
import os, os.path
|
||||
backup = filename + '~'
|
||||
if os.path.lexists(backup):
|
||||
try:
|
||||
os.remove(backup)
|
||||
except OSError:
|
||||
print("Can't remove backup %r" % (backup,), file=sys.stderr)
|
||||
# end try
|
||||
# end if
|
||||
try:
|
||||
os.rename(filename, backup)
|
||||
except OSError:
|
||||
print("Can't rename %r to %r" % (filename, backup), file=sys.stderr)
|
||||
# end try
|
||||
# end def make_backup
|
||||
|
||||
def complete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
with open(filename, 'r') as f:
|
||||
source = f.read()
|
||||
# end with
|
||||
result = complete_string(source, stepsize, tabsize, expandtabs)
|
||||
if source == result: return 0
|
||||
# end if
|
||||
make_backup(filename)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(result)
|
||||
# end with
|
||||
return 1
|
||||
# end def complete_file
|
||||
|
||||
def delete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
with open(filename, 'r') as f:
|
||||
source = f.read()
|
||||
# end with
|
||||
result = delete_string(source, stepsize, tabsize, expandtabs)
|
||||
if source == result: return 0
|
||||
# end if
|
||||
make_backup(filename)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(result)
|
||||
# end with
|
||||
return 1
|
||||
# end def delete_file
|
||||
|
||||
def reformat_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
|
||||
with open(filename, 'r') as f:
|
||||
source = f.read()
|
||||
# end with
|
||||
result = reformat_string(source, stepsize, tabsize, expandtabs)
|
||||
if source == result: return 0
|
||||
# end if
|
||||
make_backup(filename)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(result)
|
||||
# end with
|
||||
return 1
|
||||
# end def reformat_file
|
||||
|
||||
# Test program when called as a script
|
||||
|
||||
usage = """
|
||||
usage: pindent (-c|-d|-r) [-s stepsize] [-t tabsize] [-e] [file] ...
|
||||
-c : complete a correctly indented program (add #end directives)
|
||||
-d : delete #end directives
|
||||
-r : reformat a completed program (use #end directives)
|
||||
-s stepsize: indentation step (default %(STEPSIZE)d)
|
||||
-t tabsize : the worth in spaces of a tab (default %(TABSIZE)d)
|
||||
-e : expand TABs into spaces (default OFF)
|
||||
[file] ... : files are changed in place, with backups in file~
|
||||
If no files are specified or a single - is given,
|
||||
the program acts as a filter (reads stdin, writes stdout).
|
||||
""" % vars()
|
||||
|
||||
def error_both(op1, op2):
|
||||
sys.stderr.write('Error: You can not specify both '+op1+' and -'+op2[0]+' at the same time\n')
|
||||
sys.stderr.write(usage)
|
||||
sys.exit(2)
|
||||
# end def error_both
|
||||
|
||||
def test():
|
||||
import getopt
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'cdrs:t:e')
|
||||
except getopt.error as msg:
|
||||
sys.stderr.write('Error: %s\n' % msg)
|
||||
sys.stderr.write(usage)
|
||||
sys.exit(2)
|
||||
# end try
|
||||
action = None
|
||||
stepsize = STEPSIZE
|
||||
tabsize = TABSIZE
|
||||
expandtabs = EXPANDTABS
|
||||
for o, a in opts:
|
||||
if o == '-c':
|
||||
if action: error_both(o, action)
|
||||
# end if
|
||||
action = 'complete'
|
||||
elif o == '-d':
|
||||
if action: error_both(o, action)
|
||||
# end if
|
||||
action = 'delete'
|
||||
elif o == '-r':
|
||||
if action: error_both(o, action)
|
||||
# end if
|
||||
action = 'reformat'
|
||||
elif o == '-s':
|
||||
stepsize = int(a)
|
||||
elif o == '-t':
|
||||
tabsize = int(a)
|
||||
elif o == '-e':
|
||||
expandtabs = True
|
||||
# end if
|
||||
# end for
|
||||
if not action:
|
||||
sys.stderr.write(
|
||||
'You must specify -c(omplete), -d(elete) or -r(eformat)\n')
|
||||
sys.stderr.write(usage)
|
||||
sys.exit(2)
|
||||
# end if
|
||||
if not args or args == ['-']:
|
||||
action = eval(action + '_filter')
|
||||
action(sys.stdin, sys.stdout, stepsize, tabsize, expandtabs)
|
||||
else:
|
||||
action = eval(action + '_file')
|
||||
for filename in args:
|
||||
action(filename, stepsize, tabsize, expandtabs)
|
||||
# end for
|
||||
# end if
|
||||
# end def test
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
# end if
|
||||
53
Tools/scripts/ptags.py
Normal file
53
Tools/scripts/ptags.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# ptags
|
||||
#
|
||||
# Create a tags file for Python programs, usable with vi.
|
||||
# Tagged are:
|
||||
# - functions (even inside other defs or classes)
|
||||
# - classes
|
||||
# - filenames
|
||||
# Warns about files it cannot open.
|
||||
# No warnings about duplicate tags.
|
||||
|
||||
import sys, re, os
|
||||
|
||||
tags = [] # Modified global variable!
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
for filename in args:
|
||||
treat_file(filename)
|
||||
if tags:
|
||||
fp = open('tags', 'w')
|
||||
tags.sort()
|
||||
for s in tags: fp.write(s)
|
||||
|
||||
|
||||
expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]'
|
||||
matcher = re.compile(expr)
|
||||
|
||||
def treat_file(filename):
|
||||
try:
|
||||
fp = open(filename, 'r')
|
||||
except:
|
||||
sys.stderr.write('Cannot open %s\n' % filename)
|
||||
return
|
||||
base = os.path.basename(filename)
|
||||
if base[-3:] == '.py':
|
||||
base = base[:-3]
|
||||
s = base + '\t' + filename + '\t' + '1\n'
|
||||
tags.append(s)
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
m = matcher.match(line)
|
||||
if m:
|
||||
content = m.group(0)
|
||||
name = m.group(2)
|
||||
s = name + '\t' + filename + '\t/^' + content + '/\n'
|
||||
tags.append(s)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
5
Tools/scripts/pydoc3.py
Normal file
5
Tools/scripts/pydoc3.py
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import pydoc
|
||||
if __name__ == '__main__':
|
||||
pydoc.cli()
|
||||
130
Tools/scripts/pysource.py
Normal file
130
Tools/scripts/pysource.py
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""\
|
||||
List python source files.
|
||||
|
||||
There are three functions to check whether a file is a Python source, listed
|
||||
here with increasing complexity:
|
||||
|
||||
- has_python_ext() checks whether a file name ends in '.py[w]'.
|
||||
- look_like_python() checks whether the file is not binary and either has
|
||||
the '.py[w]' extension or the first line contains the word 'python'.
|
||||
- can_be_compiled() checks whether the file can be compiled by compile().
|
||||
|
||||
The file also must be of appropriate size - not bigger than a megabyte.
|
||||
|
||||
walk_python_files() recursively lists all Python files under the given directories.
|
||||
"""
|
||||
__author__ = "Oleg Broytmann, Georg Brandl"
|
||||
|
||||
__all__ = ["has_python_ext", "looks_like_python", "can_be_compiled", "walk_python_files"]
|
||||
|
||||
|
||||
import os, re
|
||||
|
||||
binary_re = re.compile(br'[\x00-\x08\x0E-\x1F\x7F]')
|
||||
|
||||
debug = False
|
||||
|
||||
def print_debug(msg):
|
||||
if debug: print(msg)
|
||||
|
||||
|
||||
def _open(fullpath):
|
||||
try:
|
||||
size = os.stat(fullpath).st_size
|
||||
except OSError as err: # Permission denied - ignore the file
|
||||
print_debug("%s: permission denied: %s" % (fullpath, err))
|
||||
return None
|
||||
|
||||
if size > 1024*1024: # too big
|
||||
print_debug("%s: the file is too big: %d bytes" % (fullpath, size))
|
||||
return None
|
||||
|
||||
try:
|
||||
return open(fullpath, "rb")
|
||||
except IOError as err: # Access denied, or a special file - ignore it
|
||||
print_debug("%s: access denied: %s" % (fullpath, err))
|
||||
return None
|
||||
|
||||
def has_python_ext(fullpath):
|
||||
return fullpath.endswith(".py") or fullpath.endswith(".pyw")
|
||||
|
||||
def looks_like_python(fullpath):
|
||||
infile = _open(fullpath)
|
||||
if infile is None:
|
||||
return False
|
||||
|
||||
with infile:
|
||||
line = infile.readline()
|
||||
|
||||
if binary_re.search(line):
|
||||
# file appears to be binary
|
||||
print_debug("%s: appears to be binary" % fullpath)
|
||||
return False
|
||||
|
||||
if fullpath.endswith(".py") or fullpath.endswith(".pyw"):
|
||||
return True
|
||||
elif b"python" in line:
|
||||
# disguised Python script (e.g. CGI)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def can_be_compiled(fullpath):
|
||||
infile = _open(fullpath)
|
||||
if infile is None:
|
||||
return False
|
||||
|
||||
with infile:
|
||||
code = infile.read()
|
||||
|
||||
try:
|
||||
compile(code, fullpath, "exec")
|
||||
except Exception as err:
|
||||
print_debug("%s: cannot compile: %s" % (fullpath, err))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None):
|
||||
"""\
|
||||
Recursively yield all Python source files below the given paths.
|
||||
|
||||
paths: a list of files and/or directories to be checked.
|
||||
is_python: a function that takes a file name and checks whether it is a
|
||||
Python source file
|
||||
exclude_dirs: a list of directory base names that should be excluded in
|
||||
the search
|
||||
"""
|
||||
if exclude_dirs is None:
|
||||
exclude_dirs=[]
|
||||
|
||||
for path in paths:
|
||||
print_debug("testing: %s" % path)
|
||||
if os.path.isfile(path):
|
||||
if is_python(path):
|
||||
yield path
|
||||
elif os.path.isdir(path):
|
||||
print_debug(" it is a directory")
|
||||
for dirpath, dirnames, filenames in os.walk(path):
|
||||
for exclude in exclude_dirs:
|
||||
if exclude in dirnames:
|
||||
dirnames.remove(exclude)
|
||||
for filename in filenames:
|
||||
fullpath = os.path.join(dirpath, filename)
|
||||
print_debug("testing: %s" % fullpath)
|
||||
if is_python(fullpath):
|
||||
yield fullpath
|
||||
else:
|
||||
print_debug(" unknown type")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Two simple examples/tests
|
||||
for fullpath in walk_python_files(['.']):
|
||||
print(fullpath)
|
||||
print("----------")
|
||||
for fullpath in walk_python_files(['.'], is_python=can_be_compiled):
|
||||
print(fullpath)
|
||||
17
Tools/scripts/pyvenv.py
Normal file
17
Tools/scripts/pyvenv.py
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
#!/usr/bin/env python3
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import pathlib
|
||||
|
||||
executable = pathlib.Path(sys.executable or 'python3').name
|
||||
print('WARNING: the pyenv script is deprecated in favour of '
|
||||
f'`{executable} -m venv`', file=sys.stderr)
|
||||
|
||||
rc = 1
|
||||
try:
|
||||
import venv
|
||||
venv.main()
|
||||
rc = 0
|
||||
except Exception as e:
|
||||
print('Error: %s' % e, file=sys.stderr)
|
||||
sys.exit(rc)
|
||||
14
Tools/scripts/reindent-rst.py
Normal file
14
Tools/scripts/reindent-rst.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Make a reST file compliant to our pre-commit hook.
|
||||
# Currently just remove trailing whitespace.
|
||||
|
||||
import sys
|
||||
|
||||
import patchcheck
|
||||
|
||||
def main(argv=sys.argv):
|
||||
patchcheck.normalize_docs_whitespace(argv[1:])
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
333
Tools/scripts/reindent.py
Normal file
333
Tools/scripts/reindent.py
Normal file
|
|
@ -0,0 +1,333 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Released to the public domain, by Tim Peters, 03 October 2000.
|
||||
|
||||
"""reindent [-d][-r][-v] [ path ... ]
|
||||
|
||||
-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
|
||||
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
|
||||
-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
|
||||
-v (--verbose) Verbose. Print informative msgs; else no output.
|
||||
(--newline) Newline. Specify the newline character to use (CRLF, LF).
|
||||
Default is the same as the original file.
|
||||
-h (--help) Help. Print this usage information and exit.
|
||||
|
||||
Change Python (.py) files to use 4-space indents and no hard tab characters.
|
||||
Also trim excess spaces and tabs from ends of lines, and remove empty lines
|
||||
at the end of files. Also ensure the last line ends with a newline.
|
||||
|
||||
If no paths are given on the command line, reindent operates as a filter,
|
||||
reading a single source file from standard input and writing the transformed
|
||||
source to standard output. In this case, the -d, -r and -v flags are
|
||||
ignored.
|
||||
|
||||
You can pass one or more file and/or directory paths. When a directory
|
||||
path, all .py files within the directory will be examined, and, if the -r
|
||||
option is given, likewise recursively for subdirectories.
|
||||
|
||||
If output is not to standard output, reindent overwrites files in place,
|
||||
renaming the originals with a .bak extension. If it finds nothing to
|
||||
change, the file is left alone. If reindent does change a file, the changed
|
||||
file is a fixed-point for future runs (i.e., running reindent on the
|
||||
resulting .py file won't change it again).
|
||||
|
||||
The hard part of reindenting is figuring out what to do with comment
|
||||
lines. So long as the input files get a clean bill of health from
|
||||
tabnanny.py, reindent should do a good job.
|
||||
|
||||
The backup file is a copy of the one that is being reindented. The ".bak"
|
||||
file is generated with shutil.copy(), but some corner cases regarding
|
||||
user/group and permissions could leave the backup file more readable than
|
||||
you'd prefer. You can always use the --nobackup option to prevent this.
|
||||
"""
|
||||
|
||||
__version__ = "1"
|
||||
|
||||
import tokenize
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
verbose = False
|
||||
recurse = False
|
||||
dryrun = False
|
||||
makebackup = True
|
||||
# A specified newline to be used in the output (set by --newline option)
|
||||
spec_newline = None
|
||||
|
||||
|
||||
def usage(msg=None):
|
||||
if msg is None:
|
||||
msg = __doc__
|
||||
print(msg, file=sys.stderr)
|
||||
|
||||
|
||||
def errprint(*args):
|
||||
sys.stderr.write(" ".join(str(arg) for arg in args))
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def main():
|
||||
import getopt
|
||||
global verbose, recurse, dryrun, makebackup, spec_newline
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "drnvh",
|
||||
["dryrun", "recurse", "nobackup", "verbose", "newline=", "help"])
|
||||
except getopt.error as msg:
|
||||
usage(msg)
|
||||
return
|
||||
for o, a in opts:
|
||||
if o in ('-d', '--dryrun'):
|
||||
dryrun = True
|
||||
elif o in ('-r', '--recurse'):
|
||||
recurse = True
|
||||
elif o in ('-n', '--nobackup'):
|
||||
makebackup = False
|
||||
elif o in ('-v', '--verbose'):
|
||||
verbose = True
|
||||
elif o in ('--newline',):
|
||||
if not a.upper() in ('CRLF', 'LF'):
|
||||
usage()
|
||||
return
|
||||
spec_newline = dict(CRLF='\r\n', LF='\n')[a.upper()]
|
||||
elif o in ('-h', '--help'):
|
||||
usage()
|
||||
return
|
||||
if not args:
|
||||
r = Reindenter(sys.stdin)
|
||||
r.run()
|
||||
r.write(sys.stdout)
|
||||
return
|
||||
for arg in args:
|
||||
check(arg)
|
||||
|
||||
|
||||
def check(file):
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print("listing directory", file)
|
||||
names = os.listdir(file)
|
||||
for name in names:
|
||||
fullname = os.path.join(file, name)
|
||||
if ((recurse and os.path.isdir(fullname) and
|
||||
not os.path.islink(fullname) and
|
||||
not os.path.split(fullname)[1].startswith("."))
|
||||
or name.lower().endswith(".py")):
|
||||
check(fullname)
|
||||
return
|
||||
|
||||
if verbose:
|
||||
print("checking", file, "...", end=' ')
|
||||
with open(file, 'rb') as f:
|
||||
try:
|
||||
encoding, _ = tokenize.detect_encoding(f.readline)
|
||||
except SyntaxError as se:
|
||||
errprint("%s: SyntaxError: %s" % (file, str(se)))
|
||||
return
|
||||
try:
|
||||
with open(file, encoding=encoding) as f:
|
||||
r = Reindenter(f)
|
||||
except IOError as msg:
|
||||
errprint("%s: I/O Error: %s" % (file, str(msg)))
|
||||
return
|
||||
|
||||
newline = spec_newline if spec_newline else r.newlines
|
||||
if isinstance(newline, tuple):
|
||||
errprint("%s: mixed newlines detected; cannot continue without --newline" % file)
|
||||
return
|
||||
|
||||
if r.run():
|
||||
if verbose:
|
||||
print("changed.")
|
||||
if dryrun:
|
||||
print("But this is a dry run, so leaving it alone.")
|
||||
if not dryrun:
|
||||
bak = file + ".bak"
|
||||
if makebackup:
|
||||
shutil.copyfile(file, bak)
|
||||
if verbose:
|
||||
print("backed up", file, "to", bak)
|
||||
with open(file, "w", encoding=encoding, newline=newline) as f:
|
||||
r.write(f)
|
||||
if verbose:
|
||||
print("wrote new", file)
|
||||
return True
|
||||
else:
|
||||
if verbose:
|
||||
print("unchanged.")
|
||||
return False
|
||||
|
||||
|
||||
def _rstrip(line, JUNK='\n \t'):
|
||||
"""Return line stripped of trailing spaces, tabs, newlines.
|
||||
|
||||
Note that line.rstrip() instead also strips sundry control characters,
|
||||
but at least one known Emacs user expects to keep junk like that, not
|
||||
mentioning Barry by name or anything <wink>.
|
||||
"""
|
||||
|
||||
i = len(line)
|
||||
while i > 0 and line[i - 1] in JUNK:
|
||||
i -= 1
|
||||
return line[:i]
|
||||
|
||||
|
||||
class Reindenter:
|
||||
|
||||
def __init__(self, f):
|
||||
self.find_stmt = 1 # next token begins a fresh stmt?
|
||||
self.level = 0 # current indent level
|
||||
|
||||
# Raw file lines.
|
||||
self.raw = f.readlines()
|
||||
|
||||
# File lines, rstripped & tab-expanded. Dummy at start is so
|
||||
# that we can use tokenize's 1-based line numbering easily.
|
||||
# Note that a line is all-blank iff it's "\n".
|
||||
self.lines = [_rstrip(line).expandtabs() + "\n"
|
||||
for line in self.raw]
|
||||
self.lines.insert(0, None)
|
||||
self.index = 1 # index into self.lines of next line
|
||||
|
||||
# List of (lineno, indentlevel) pairs, one for each stmt and
|
||||
# comment line. indentlevel is -1 for comment lines, as a
|
||||
# signal that tokenize doesn't know what to do about them;
|
||||
# indeed, they're our headache!
|
||||
self.stats = []
|
||||
|
||||
# Save the newlines found in the file so they can be used to
|
||||
# create output without mutating the newlines.
|
||||
self.newlines = f.newlines
|
||||
|
||||
def run(self):
|
||||
tokens = tokenize.generate_tokens(self.getline)
|
||||
for _token in tokens:
|
||||
self.tokeneater(*_token)
|
||||
# Remove trailing empty lines.
|
||||
lines = self.lines
|
||||
while lines and lines[-1] == "\n":
|
||||
lines.pop()
|
||||
# Sentinel.
|
||||
stats = self.stats
|
||||
stats.append((len(lines), 0))
|
||||
# Map count of leading spaces to # we want.
|
||||
have2want = {}
|
||||
# Program after transformation.
|
||||
after = self.after = []
|
||||
# Copy over initial empty lines -- there's nothing to do until
|
||||
# we see a line with *something* on it.
|
||||
i = stats[0][0]
|
||||
after.extend(lines[1:i])
|
||||
for i in range(len(stats) - 1):
|
||||
thisstmt, thislevel = stats[i]
|
||||
nextstmt = stats[i + 1][0]
|
||||
have = getlspace(lines[thisstmt])
|
||||
want = thislevel * 4
|
||||
if want < 0:
|
||||
# A comment line.
|
||||
if have:
|
||||
# An indented comment line. If we saw the same
|
||||
# indentation before, reuse what it most recently
|
||||
# mapped to.
|
||||
want = have2want.get(have, -1)
|
||||
if want < 0:
|
||||
# Then it probably belongs to the next real stmt.
|
||||
for j in range(i + 1, len(stats) - 1):
|
||||
jline, jlevel = stats[j]
|
||||
if jlevel >= 0:
|
||||
if have == getlspace(lines[jline]):
|
||||
want = jlevel * 4
|
||||
break
|
||||
if want < 0: # Maybe it's a hanging
|
||||
# comment like this one,
|
||||
# in which case we should shift it like its base
|
||||
# line got shifted.
|
||||
for j in range(i - 1, -1, -1):
|
||||
jline, jlevel = stats[j]
|
||||
if jlevel >= 0:
|
||||
want = have + (getlspace(after[jline - 1]) -
|
||||
getlspace(lines[jline]))
|
||||
break
|
||||
if want < 0:
|
||||
# Still no luck -- leave it alone.
|
||||
want = have
|
||||
else:
|
||||
want = 0
|
||||
assert want >= 0
|
||||
have2want[have] = want
|
||||
diff = want - have
|
||||
if diff == 0 or have == 0:
|
||||
after.extend(lines[thisstmt:nextstmt])
|
||||
else:
|
||||
for line in lines[thisstmt:nextstmt]:
|
||||
if diff > 0:
|
||||
if line == "\n":
|
||||
after.append(line)
|
||||
else:
|
||||
after.append(" " * diff + line)
|
||||
else:
|
||||
remove = min(getlspace(line), -diff)
|
||||
after.append(line[remove:])
|
||||
return self.raw != self.after
|
||||
|
||||
def write(self, f):
|
||||
f.writelines(self.after)
|
||||
|
||||
# Line-getter for tokenize.
|
||||
def getline(self):
|
||||
if self.index >= len(self.lines):
|
||||
line = ""
|
||||
else:
|
||||
line = self.lines[self.index]
|
||||
self.index += 1
|
||||
return line
|
||||
|
||||
# Line-eater for tokenize.
|
||||
def tokeneater(self, type, token, slinecol, end, line,
|
||||
INDENT=tokenize.INDENT,
|
||||
DEDENT=tokenize.DEDENT,
|
||||
NEWLINE=tokenize.NEWLINE,
|
||||
COMMENT=tokenize.COMMENT,
|
||||
NL=tokenize.NL):
|
||||
|
||||
if type == NEWLINE:
|
||||
# A program statement, or ENDMARKER, will eventually follow,
|
||||
# after some (possibly empty) run of tokens of the form
|
||||
# (NL | COMMENT)* (INDENT | DEDENT+)?
|
||||
self.find_stmt = 1
|
||||
|
||||
elif type == INDENT:
|
||||
self.find_stmt = 1
|
||||
self.level += 1
|
||||
|
||||
elif type == DEDENT:
|
||||
self.find_stmt = 1
|
||||
self.level -= 1
|
||||
|
||||
elif type == COMMENT:
|
||||
if self.find_stmt:
|
||||
self.stats.append((slinecol[0], -1))
|
||||
# but we're still looking for a new stmt, so leave
|
||||
# find_stmt alone
|
||||
|
||||
elif type == NL:
|
||||
pass
|
||||
|
||||
elif self.find_stmt:
|
||||
# This is the first "real token" following a NEWLINE, so it
|
||||
# must be the first token of the next program statement, or an
|
||||
# ENDMARKER.
|
||||
self.find_stmt = 0
|
||||
if line: # not endmarker
|
||||
self.stats.append((slinecol[0], self.level))
|
||||
|
||||
|
||||
# Count number of leading blanks.
|
||||
def getlspace(line):
|
||||
i, n = 0, len(line)
|
||||
while i < n and line[i] == " ":
|
||||
i += 1
|
||||
return i
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
66
Tools/scripts/rgrep.py
Normal file
66
Tools/scripts/rgrep.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Reverse grep.
|
||||
|
||||
Usage: rgrep [-i] pattern file
|
||||
"""
|
||||
|
||||
import sys
|
||||
import re
|
||||
import getopt
|
||||
|
||||
|
||||
def main():
|
||||
bufsize = 64 * 1024
|
||||
reflags = 0
|
||||
opts, args = getopt.getopt(sys.argv[1:], "i")
|
||||
for o, a in opts:
|
||||
if o == '-i':
|
||||
reflags = reflags | re.IGNORECASE
|
||||
if len(args) < 2:
|
||||
usage("not enough arguments")
|
||||
if len(args) > 2:
|
||||
usage("exactly one file argument required")
|
||||
pattern, filename = args
|
||||
try:
|
||||
prog = re.compile(pattern, reflags)
|
||||
except re.error as msg:
|
||||
usage("error in regular expression: %s" % msg)
|
||||
try:
|
||||
f = open(filename)
|
||||
except IOError as msg:
|
||||
usage("can't open %r: %s" % (filename, msg), 1)
|
||||
f.seek(0, 2)
|
||||
pos = f.tell()
|
||||
leftover = None
|
||||
while pos > 0:
|
||||
size = min(pos, bufsize)
|
||||
pos = pos - size
|
||||
f.seek(pos)
|
||||
buffer = f.read(size)
|
||||
lines = buffer.split("\n")
|
||||
del buffer
|
||||
if leftover is None:
|
||||
if not lines[-1]:
|
||||
del lines[-1]
|
||||
else:
|
||||
lines[-1] = lines[-1] + leftover
|
||||
if pos > 0:
|
||||
leftover = lines[0]
|
||||
del lines[0]
|
||||
else:
|
||||
leftover = None
|
||||
for line in reversed(lines):
|
||||
if prog.search(line):
|
||||
print(line)
|
||||
|
||||
|
||||
def usage(msg, code=2):
|
||||
sys.stdout = sys.stderr
|
||||
print(msg)
|
||||
print(__doc__)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
53
Tools/scripts/run_tests.py
Normal file
53
Tools/scripts/run_tests.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
"""Run Python's test suite in a fast, rigorous way.
|
||||
|
||||
The defaults are meant to be reasonably thorough, while skipping certain
|
||||
tests that can be time-consuming or resource-intensive (e.g. largefile),
|
||||
or distracting (e.g. audio and gui). These defaults can be overridden by
|
||||
simply passing a -u option to this script.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import test.support
|
||||
|
||||
|
||||
def is_multiprocess_flag(arg):
|
||||
return arg.startswith('-j') or arg.startswith('--multiprocess')
|
||||
|
||||
|
||||
def is_resource_use_flag(arg):
|
||||
return arg.startswith('-u') or arg.startswith('--use')
|
||||
|
||||
|
||||
def main(regrtest_args):
|
||||
args = [sys.executable,
|
||||
'-u', # Unbuffered stdout and stderr
|
||||
'-W', 'default', # Warnings set to 'default'
|
||||
'-bb', # Warnings about bytes/bytearray
|
||||
'-E', # Ignore environment variables
|
||||
]
|
||||
# Allow user-specified interpreter options to override our defaults.
|
||||
args.extend(test.support.args_from_interpreter_flags())
|
||||
|
||||
args.extend(['-m', 'test', # Run the test suite
|
||||
'-r', # Randomize test order
|
||||
'-w', # Re-run failed tests in verbose mode
|
||||
])
|
||||
if sys.platform == 'win32':
|
||||
args.append('-n') # Silence alerts under Windows
|
||||
if not any(is_multiprocess_flag(arg) for arg in regrtest_args):
|
||||
args.extend(['-j', '0']) # Use all CPU cores
|
||||
if not any(is_resource_use_flag(arg) for arg in regrtest_args):
|
||||
args.extend(['-u', 'all,-largefile,-audio,-gui'])
|
||||
args.extend(regrtest_args)
|
||||
print(' '.join(args))
|
||||
if sys.platform == 'win32':
|
||||
from subprocess import call
|
||||
sys.exit(call(args))
|
||||
else:
|
||||
os.execv(sys.executable, args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
||||
35
Tools/scripts/serve.py
Normal file
35
Tools/scripts/serve.py
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env python3
|
||||
'''
|
||||
Small wsgiref based web server. Takes a path to serve from and an
|
||||
optional port number (defaults to 8000), then tries to serve files.
|
||||
Mime types are guessed from the file names, 404 errors are raised
|
||||
if the file is not found. Used for the make serve target in Doc.
|
||||
'''
|
||||
import sys
|
||||
import os
|
||||
import mimetypes
|
||||
from wsgiref import simple_server, util
|
||||
|
||||
def app(environ, respond):
|
||||
|
||||
fn = os.path.join(path, environ['PATH_INFO'][1:])
|
||||
if '.' not in fn.split(os.path.sep)[-1]:
|
||||
fn = os.path.join(fn, 'index.html')
|
||||
type = mimetypes.guess_type(fn)[0]
|
||||
|
||||
if os.path.exists(fn):
|
||||
respond('200 OK', [('Content-Type', type)])
|
||||
return util.FileWrapper(open(fn, "rb"))
|
||||
else:
|
||||
respond('404 Not Found', [('Content-Type', 'text/plain')])
|
||||
return [b'not found']
|
||||
|
||||
if __name__ == '__main__':
|
||||
path = sys.argv[1]
|
||||
port = int(sys.argv[2]) if len(sys.argv) > 2 else 8000
|
||||
httpd = simple_server.make_server('', port, app)
|
||||
print("Serving {} on port {}, control-C to stop".format(path, port))
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
print("\b\bShutting down.")
|
||||
78
Tools/scripts/smelly.py
Normal file
78
Tools/scripts/smelly.py
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python
|
||||
# Script checking that all symbols exported by libpython start with Py or _Py
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import sysconfig
|
||||
|
||||
|
||||
def get_exported_symbols():
|
||||
LIBRARY = sysconfig.get_config_var('LIBRARY')
|
||||
if not LIBRARY:
|
||||
raise Exception("failed to get LIBRARY")
|
||||
|
||||
args = ('nm', '-p', LIBRARY)
|
||||
print("+ %s" % ' '.join(args))
|
||||
proc = subprocess.run(args, stdout=subprocess.PIPE, universal_newlines=True)
|
||||
if proc.returncode:
|
||||
sys.stdout.write(proc.stdout)
|
||||
sys.exit(proc.returncode)
|
||||
|
||||
stdout = proc.stdout.rstrip()
|
||||
if not stdout:
|
||||
raise Exception("command output is empty")
|
||||
return stdout
|
||||
|
||||
|
||||
def get_smelly_symbols(stdout):
|
||||
symbols = []
|
||||
ignored_symtypes = set()
|
||||
for line in stdout.splitlines():
|
||||
# Split line '0000000000001b80 D PyTextIOWrapper_Type'
|
||||
if not line:
|
||||
continue
|
||||
|
||||
parts = line.split(maxsplit=2)
|
||||
if len(parts) < 3:
|
||||
continue
|
||||
|
||||
symtype = parts[1].strip()
|
||||
# Ignore private symbols.
|
||||
#
|
||||
# If lowercase, the symbol is usually local; if uppercase, the symbol
|
||||
# is global (external). There are however a few lowercase symbols that
|
||||
# are shown for special global symbols ("u", "v" and "w").
|
||||
if symtype.islower() and symtype not in "uvw":
|
||||
ignored_symtypes.add(symtype)
|
||||
continue
|
||||
|
||||
symbol = parts[-1]
|
||||
if symbol.startswith(('Py', '_Py')):
|
||||
continue
|
||||
symbol = '%s (type: %s)' % (symbol, symtype)
|
||||
symbols.append(symbol)
|
||||
|
||||
if ignored_symtypes:
|
||||
print("Ignored symbol types: %s" % ', '.join(sorted(ignored_symtypes)))
|
||||
print()
|
||||
return symbols
|
||||
|
||||
|
||||
def main():
|
||||
nm_output = get_exported_symbols()
|
||||
symbols = get_smelly_symbols(nm_output)
|
||||
|
||||
if not symbols:
|
||||
print("OK: no smelly symbol found")
|
||||
sys.exit(0)
|
||||
|
||||
symbols.sort()
|
||||
for symbol in symbols:
|
||||
print("Smelly symbol: %s" % symbol)
|
||||
print()
|
||||
print("ERROR: Found %s smelly symbols!" % len(symbols))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
26
Tools/scripts/suff.py
Normal file
26
Tools/scripts/suff.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# suff
|
||||
#
|
||||
# show different suffixes amongst arguments
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
files = sys.argv[1:]
|
||||
suffixes = {}
|
||||
for filename in files:
|
||||
suff = getsuffix(filename)
|
||||
suffixes.setdefault(suff, []).append(filename)
|
||||
for suff, filenames in sorted(suffixes.items()):
|
||||
print(repr(suff), len(filenames))
|
||||
|
||||
|
||||
def getsuffix(filename):
|
||||
name, sep, suff = filename.rpartition('.')
|
||||
return sep + suff if sep else ''
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
2075
Tools/scripts/texi2html.py
Normal file
2075
Tools/scripts/texi2html.py
Normal file
File diff suppressed because it is too large
Load diff
55
Tools/scripts/untabify.py
Normal file
55
Tools/scripts/untabify.py
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"Replace tabs with spaces in argument files. Print names of changed files."
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import tokenize
|
||||
|
||||
def main():
|
||||
tabsize = 8
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "t:")
|
||||
if not args:
|
||||
raise getopt.error("At least one file argument required")
|
||||
except getopt.error as msg:
|
||||
print(msg)
|
||||
print("usage:", sys.argv[0], "[-t tabwidth] file ...")
|
||||
return
|
||||
for optname, optvalue in opts:
|
||||
if optname == '-t':
|
||||
tabsize = int(optvalue)
|
||||
|
||||
for filename in args:
|
||||
process(filename, tabsize)
|
||||
|
||||
|
||||
def process(filename, tabsize, verbose=True):
|
||||
try:
|
||||
with tokenize.open(filename) as f:
|
||||
text = f.read()
|
||||
encoding = f.encoding
|
||||
except IOError as msg:
|
||||
print("%r: I/O error: %s" % (filename, msg))
|
||||
return
|
||||
newtext = text.expandtabs(tabsize)
|
||||
if newtext == text:
|
||||
return
|
||||
backup = filename + "~"
|
||||
try:
|
||||
os.unlink(backup)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
os.rename(filename, backup)
|
||||
except OSError:
|
||||
pass
|
||||
with open(filename, "w", encoding=encoding) as f:
|
||||
f.write(newtext)
|
||||
if verbose:
|
||||
print(filename)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
28
Tools/scripts/update_file.py
Normal file
28
Tools/scripts/update_file.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
"""
|
||||
A script that replaces an old file with a new one, only if the contents
|
||||
actually changed. If not, the new file is simply deleted.
|
||||
|
||||
This avoids wholesale rebuilds when a code (re)generation phase does not
|
||||
actually change the in-tree generated code.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def main(old_path, new_path):
|
||||
with open(old_path, 'rb') as f:
|
||||
old_contents = f.read()
|
||||
with open(new_path, 'rb') as f:
|
||||
new_contents = f.read()
|
||||
if old_contents != new_contents:
|
||||
os.replace(new_path, old_path)
|
||||
else:
|
||||
os.unlink(new_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: %s <path to be updated> <path with new contents>" % (sys.argv[0],))
|
||||
sys.exit(1)
|
||||
main(sys.argv[1], sys.argv[2])
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue