From 0e60db07ffc0bf33a704e1784b39351915c768d2 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Wed, 25 Jan 2017 19:47:41 -0600 Subject: [PATCH 01/31] Working on mangled entity --- src/entity.py | 6 ++++-- src/util.py | 13 +++++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/entity.py b/src/entity.py index adc9eb4..1d49957 100644 --- a/src/entity.py +++ b/src/entity.py @@ -167,6 +167,7 @@ class Entity(object): if not self.is_main: result = [] else: + from util import mangled name = self.name result = [ \ "subroutine writer_%s(irp_num)"%(name), @@ -184,7 +185,7 @@ class Entity(object): " if (.not.%s_is_built) then"%(self.same_as), " call provide_%s"%(self.same_as), " endif" ] - result += map(lambda x: " call writer_%s(irp_num)" % (x), self.needs) + result += map(lambda x: " call writer_%s(irp_num)" % (x), mangles(self.needs)) result += [ \ " irp_is_open = .True.", " irp_iunit = 9", @@ -213,6 +214,7 @@ class Entity(object): if not self.is_main: result = [] else: + from util import mangled name = self.name result = [ \ "subroutine reader_%s(irp_num)"%(name), @@ -226,7 +228,7 @@ class Entity(object): result += [\ " character*(%d) :: irp_here = 'reader_%s'"%(length,name), " call irp_enter(irp_here)" ] - result += map(lambda x: " call reader_%s(irp_num)" % (x), self.needs) + result += map(lambda x: " call reader_%s(irp_num)" % (x), mangled(self.needs)) result += [ \ " irp_is_open = .True.", " irp_iunit = 9", diff --git a/src/util.py b/src/util.py index c3e1656..7072a3a 100644 --- a/src/util.py +++ b/src/util.py @@ -285,6 +285,10 @@ def build_dim(l_dim, colons=False): return "(%s)" % (",".join(l_dim_colons)) +def mangled(l_ent, d_ent): + # (List, Dict[str,Entity]) -> list + '''Create a uniq list of provider''' + return OrderedUniqueList(d_ent[name].same_as for name in l_ent) def build_use(l_ent, d_ent): # (List, Dict[str,Entity]) -> list @@ -294,12 +298,13 @@ def build_use(l_ent, d_ent): def build_call_provide(l_ent, d_ent): # (List, Dict[str,Entity]) -> list '''Construct the fortran 90 call the provider needed by the list of entity''' - def fun(x): + + # Get the corect name (in the case of multiple provider line) + l_same_as = mangled(l_ent,d_ent) + def bld_f90(x): return [ " if (.not.%s_is_built) then" % x, " call provide_%s" % x, " endif"] - # Get the corect name (in the case of multiple provider line) - l_same_as = OrderedUniqueList(d_ent[x].same_as for x in l_ent) - return flatten(map(fun, l_same_as)) + return flatten(map(bld_f90, l_same_as)) From 590a1ceef54834bdffc6963dcdc4b95318fd757e Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 26 Jan 2017 10:54:07 -0600 Subject: [PATCH 02/31] Fix pertinant children in graph --- src/entity.py | 5 +++++ src/irpy_files.py | 4 ++-- src/parsed_text.py | 20 +++++++++++++------- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/entity.py b/src/entity.py index 1d49957..98b8bef 100644 --- a/src/entity.py +++ b/src/entity.py @@ -691,6 +691,11 @@ class Entity(object): #Set by parsed_text.build_needs(...) raise AttributeError + @irpy.lazy_property_mutable + def needed_by(self): + #Set by parsed_text.build_needs(...) + return [] + @irpy.lazy_property def children(self): diff --git a/src/irpy_files.py b/src/irpy_files.py index 3632a2d..dc0a19b 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -114,7 +114,7 @@ class Irpy_comm_world(object): Ent_part = partial(Entity,buf,icount,comm_world=self) ent = Ent_part() - l_ent += [ent] + [Ent_part(other) for other in ent.others_entity_name] + l_ent += [ent] + [Ent_part(name) for name in ent.others_entity_name] # O(2) but who care l_duplicate = [x for x in l_ent if l_ent.count(x) > 1] @@ -227,7 +227,7 @@ class Irpy_comm_world(object): d_routine = self.d_routine import parsed_text - vtuple = [(v, s.same_as, s.regexp) for v, s in d_entity.iteritems()] + vtuple = [(v, s.regexp) for v, s in d_entity.iteritems()] def worker_parsed(filename_text): filename, text = filename_text return parsed_text.get_parsed_text(filename, text, d_entity, d_routine, vtuple) diff --git a/src/parsed_text.py b/src/parsed_text.py index be9dd7c..0731797 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -34,7 +34,7 @@ regexps_re_string_sub = regexps.re_string.sub def find_variables_in_line(line, vtuple): line_lower = regexps_re_string_sub('', line.lower) - return [same_as for v, same_as, regexp in vtuple if v in line_lower and regexp(line_lower)] + return [v for v, regexp in vtuple if v in line_lower and regexp(line_lower)] def find_funcs_in_line(line, stuple): @@ -96,7 +96,7 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): varlist.append(v) variable_list = find_variables_in_line(line, vtuple) - variable_list.remove(variables[v].same_as) + variable_list.remove(v) append(Parsed_text(variable_list, line)) @@ -459,6 +459,7 @@ def build_needs(parsed_text, subroutines, stuple, variables): # Needs and to_provide # ~#~#~#~#~# + # Loop of the main Entity for filename, text in parsed_text: l_begin = [i for i, (_, line) in enumerate(text) if isinstance(line, Begin_provider)] @@ -485,6 +486,8 @@ def build_needs(parsed_text, subroutines, stuple, variables): entity.needs = uniquify(l_needs) + + # Now do the Other entity for v in variables: main = variables[v].same_as if main != v: @@ -492,11 +495,15 @@ def build_needs(parsed_text, subroutines, stuple, variables): variables[v].to_provide = variables[main].to_provide # ~#~#~#~#~# - # Needs and to_provide + # Needs_by # ~#~#~#~#~# - for v in variables: - variables[v].needed_by = [] + # This a some dark vodou magic. + # The algo is: + # - Initialise needed_by + # - Create the pointer copy + # - Add the value (so it add also to the pointer reference...) + for v in variables: main = variables[v].same_as if main != v: @@ -508,8 +515,7 @@ def build_needs(parsed_text, subroutines, stuple, variables): for x in var.needs: variables[x].needed_by.append(var.same_as) - for v in variables: - var = variables[v] + for var in variables.values(): var.needed_by = uniquify(var.needed_by) ###################################################################### From 99848583ea0809e30af25c0c419b5935cce5e30f Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 26 Jan 2017 11:13:23 -0600 Subject: [PATCH 03/31] Correct graphiz format --- src/irpf90.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/irpf90.py b/src/irpf90.py index 99632f9..1fce99b 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -57,10 +57,10 @@ def main(): if command_line.do_graph: comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. - print 'graph { ' + print 'digraph { ' for name,entity in comm_world.d_entity.items(): if entity.needs: - print ' {0} -> {1}'.format(name, ' '.join(entity.needs)) + print ' %s -> { %s } ' % (name, ' ; '.join(entity.needs)) print '}' return From a380a1b835d30422041dc42d28227c9d395397ba Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 26 Jan 2017 12:02:45 -0600 Subject: [PATCH 04/31] Realy Amazing dots representation --- src/irpf90.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/irpf90.py b/src/irpf90.py index 1fce99b..5f8b755 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -55,12 +55,23 @@ def main(): comm_world = Irpy_comm_world() if command_line.do_graph: + # Create a dot reprenstion of the dependency graph. + # Merge inside a subgraph the Entity provided together comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. + from util import mangled print 'digraph { ' + + from util import mangled for name,entity in comm_world.d_entity.items(): - if entity.needs: + if entity.is_main: + + if entity.needs: print ' %s -> { %s } ' % (name, ' ; '.join(entity.needs)) + if entity.others_entity_name: + print ' subgraph cluster%s {' % name + print ' %s ' % ' '.join([entity.name] + entity.others_entity_name) + print ' }' print '}' return From 6f48bc966c2ab2d61466389c738819e02494fdbb Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 26 Jan 2017 12:18:43 -0600 Subject: [PATCH 05/31] Yapf! --- src/irpy_files.py | 260 +++++++++++++++++++++++---------------------- src/parsed_text.py | 5 +- 2 files changed, 139 insertions(+), 126 deletions(-) diff --git a/src/irpy_files.py b/src/irpy_files.py index dc0a19b..2131878 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -1,10 +1,10 @@ -from util import parmap,lazy_write_file +from util import parmap, lazy_write_file from util import flatten, listdir try: - import irpy + import irpy except: - import lib_irpy as irpy + import lib_irpy as irpy import os import irpf90_t @@ -14,67 +14,67 @@ from command_line import command_line from util import logger + class Irpy_comm_world(object): '''Maestro.''' - def __init__(self,l_dir=None, l_file=None): - # (Iter, Iter) -> None + def __init__(self, l_dir=None, l_file=None): + # (Iter, Iter) -> None # Create directories - from itertools import ifilterfalse + from itertools import ifilterfalse i_folder = ifilterfalse(os.path.exists, (irpf90_t.irpdir, irpf90_t.mandir)) - map(os.mkdir,i_folder) + map(os.mkdir, i_folder) - # List file - - l_dir =l_dir if l_dir else (command_line.include_dir+['.']) - l_not_dir = [d for d in l_dir if not (os.path.exists(d) and os.path.isdir(d))] - if l_not_dir: - logger.error('Try to include no existing directory: [%s]' % ','.join(l_not_dir)) - sys.exit(1) - - # Create folder in IRPDIR - i_folder = ifilterfalse(os.path.exists, (os.path.join(irpf90_t.irpdir,d) for d in l_dir)) - map(os.mkdir, i_folder) + # List file - s_folder_abs = set(os.path.abspath(path) for path in l_dir) - - s_file_folder_all = set(flatten(listdir(path,abspath=True) for path in s_folder_abs)) + l_dir = l_dir if l_dir else (command_line.include_dir + ['.']) + l_not_dir = [d for d in l_dir if not (os.path.exists(d) and os.path.isdir(d))] + if l_not_dir: + logger.error('Try to include no existing directory: [%s]' % ','.join(l_not_dir)) + sys.exit(1) - # Take everything! - s_file_folder = filter(lambda f: os.path.isfile(f) and not f.startswith("."), s_file_folder_all) + # Create folder in IRPDIR + i_folder = ifilterfalse(os.path.exists, (os.path.join(irpf90_t.irpdir, d) for d in l_dir)) + map(os.mkdir, i_folder) + s_folder_abs = set(os.path.abspath(path) for path in l_dir) - + s_file_folder_all = set(flatten(listdir(path, abspath=True) for path in s_folder_abs)) - s_file_tot = set(l_file) if l_file else set() - s_file_tot.update(s_file_folder) - - s_file_rel = set(os.path.relpath(f,self.cwd) for f in s_file_tot) + # Take everything! + s_file_folder = filter(lambda f: os.path.isfile(f) and not f.startswith("."), + s_file_folder_all) - # Lazy Copy file - for f in s_file_rel: - src = os.path.join(self.cwd,f) - text_ref = open(src, 'rb').read() + s_file_tot = set(l_file) if l_file else set() + s_file_tot.update(s_file_folder) - dest = os.path.join(self.cwd,irpf90_t.irpdir, f) - lazy_write_file(dest, text_ref) + s_file_rel = set(os.path.relpath(f, self.cwd) for f in s_file_tot) + + # Lazy Copy file + for f in s_file_rel: + src = os.path.join(self.cwd, f) + text_ref = open(src, 'rb').read() + + dest = os.path.join(self.cwd, irpf90_t.irpdir, f) + lazy_write_file(dest, text_ref) if command_line.do_codelet: - s_file_tot.update(command_line.codelet[3]) + s_file_tot.update(command_line.codelet[3]) - # No filter the irpf90 file - self.irpf90_files_ordered=sorted(filter(lambda f: f.endswith(".irp.f") ,s_file_rel)) + # No filter the irpf90 file + self.irpf90_files_ordered = sorted(filter(lambda f: f.endswith(".irp.f"), s_file_rel)) @irpy.lazy_property def cwd(self): - return os.getcwd() + return os.getcwd() @irpy.lazy_property def t_filename_preprocessed_text(self): - '''Tuple (filename, preprocessed_text)''' + '''Tuple (filename, preprocessed_text)''' from preprocessed_text import Preprocess_text + def worker_preprocess(filename): return (filename, Preprocess_text(filename).preprocessed_text) @@ -82,8 +82,8 @@ class Irpy_comm_world(object): @irpy.lazy_property def l_preprocessed_text(self): - # (None) -> List[Line] - '''List preprocessed_text''' + # (None) -> List[Line] + '''List preprocessed_text''' return [line for _, text in self.t_filename_preprocessed_text for line in text] @@ -98,61 +98,71 @@ class Irpy_comm_world(object): @irpy.lazy_property def d_entity(self): - # None -> Dict[Str,Entity] + # None -> Dict[Str,Entity] '''And entity is a collection of line between BEGIN_PROVIDER and END_PROVIDER ''' from irpf90_t import Begin_provider, End_provider from entity import Entity l_begin = [i for i, line in self.d_type_lines[Begin_provider]] - l_end = [i for i, line in self.d_type_lines[End_provider]] - l_provider = [ self.l_preprocessed_text[begin:end] for begin, end in zip(l_begin, l_end)] + l_end = [i for i, line in self.d_type_lines[End_provider]] + l_provider = [self.l_preprocessed_text[begin:end] for begin, end in zip(l_begin, l_end)] + l_ent = [] + for icount, buf in enumerate(l_provider): + from functools import partial + Ent_part = partial(Entity, buf, icount, comm_world=self) - l_ent = [] - for icount, buf in enumerate(l_provider): - from functools import partial - Ent_part = partial(Entity,buf,icount,comm_world=self) + ent = Ent_part() + l_ent += [ent] + [Ent_part(name) for name in ent.others_entity_name] - ent = Ent_part() - l_ent += [ent] + [Ent_part(name) for name in ent.others_entity_name] +# O(2) but who care + l_duplicate = [x for x in l_ent if l_ent.count(x) > 1] + if l_duplicate: + from util import logger + logger.error('You have duplicate PROVIDER: %s' % ' '.join( + [e.name for e in l_duplicate])) + import sys + sys.exit(1) - # O(2) but who care - l_duplicate = [x for x in l_ent if l_ent.count(x) > 1] - if l_duplicate: - from util import logger - logger.error('You have duplicate PROVIDER: %s' % ' '.join([e.name for e in l_duplicate])) - import sys - sys.exit(1) +# Python 2.6 Don't allow list comprehesion + d_ent = dict() + for e in l_ent: + d_ent[e.name] = e - # Python 2.6 Don't allow list comprehesion - d_ent = dict() - for e in l_ent: - d_ent[e.name] = e - - # - # Second pass - # - # Modify parameter of variables +# +# Second pass +# +# Modify parameter of variables + +# Touch Softouch - # Touch Softouch def find_variable(line): + from util import logger + import sys + l_var = line.lower.split()[1:] if len(l_var) < 1: - error.fail(line, "Syntax error") + logger.error("Syntax error: %s", line) + import sys + sys.exit(1) - if any(v for v in l_var if v not in d_ent): - error.fail(line, "Variable %s unknown" % (v, )) + try: + e = next(v for v in l_var if v not in d_ent) + except StopIteration: + pass + else: + logger.error("Entity %s unknown: %s" % (e, line)) + import sys + sys.exit(1) return l_var - - d_modif = dict() + d_modif = dict() from irpf90_t import Touch, SoftTouch, Free from util import flatten - for cmd, l_type in [('is_self_touched', [Touch, SoftTouch]), - ('is_free', [Free])]: + for cmd, l_type in [('is_self_touched', [Touch, SoftTouch]), ('is_free', [Free])]: - l_line = flatten( [self.d_type_lines[type_] for type_ in l_type]) - l_name = flatten( [find_variable(line) for _, line in l_line]) + l_line = flatten([self.d_type_lines[type_] for type_ in l_type]) + l_name = flatten([find_variable(line) for _, line in l_line]) d_modif[cmd] = l_name # Read and Write @@ -175,19 +185,20 @@ class Irpy_comm_world(object): Routine is a collection of line between Subroutine / Function ''' - # ~#~#~#~#~# # Create the dict # ~#~#~#~#~# from irpf90_t import Subroutine, Function, Program, End - d_type = self.d_type_lines + d_type = self.d_type_lines l_begin = sorted(i for type_ in (Subroutine, Function, Program) for i, _ in d_type[type_]) l_end = [i for i, _ in d_type[End]] - from routine import Routine - text = self.l_preprocessed_text - l_rou = [ Routine(text[b:e]) for b, e in zip(l_begin, l_end) if not isinstance(text[b], Program)] + from routine import Routine + text = self.l_preprocessed_text + l_rou = [ + Routine(text[b:e]) for b, e in zip(l_begin, l_end) if not isinstance(text[b], Program) + ] # Now we can create a dict and at it d_rou = dict() @@ -207,55 +218,57 @@ class Irpy_comm_world(object): for x in entity.calls: d_called_by[x].add(name) - from util import uniquify - for routine in d_rou.values(): - for x in routine.calls: - d_called_by[x].add(routine.name) + from util import uniquify + for routine in d_rou.values(): + for x in routine.calls: + d_called_by[x].add(routine.name) - for routine in d_rou.values(): - routine.called_by = sorted(d_called_by[routine.name]) - - l_set = [d_rou[name].touches_my_self for name in routine.calls if name in d_rou] - routine.touches_ancestor = set().union(*l_set) + for routine in d_rou.values(): + routine.called_by = sorted(d_called_by[routine.name]) + + l_set = [d_rou[name].touches_my_self for name in routine.calls if name in d_rou] + routine.touches_ancestor = set().union(*l_set) return d_rou @irpy.lazy_property def t_filename_parsed_text(self): - '''(filename,parsed_text)''' + '''(filename,parsed_text)''' d_entity = self.d_entity d_routine = self.d_routine import parsed_text - vtuple = [(v, s.regexp) for v, s in d_entity.iteritems()] + vtuple = [(v, s.regexp) for v, s in d_entity.iteritems()] + def worker_parsed(filename_text): filename, text = filename_text return parsed_text.get_parsed_text(filename, text, d_entity, d_routine, vtuple) parsed_text_0 = parmap(worker_parsed, self.t_filename_preprocessed_text) - - from irpf90_t import NoDep,Declaration,Implicit,Use,Cont_provider - def moved_to_top_l(ptext): - l = [NoDep, Declaration, Implicit, Use, Cont_provider] - for _, text in ptext: - parsed_text.move_to_top_list(text, l) + from irpf90_t import NoDep, Declaration, Implicit, Use, Cont_provider + + def moved_to_top_l(ptext): + l = [NoDep, Declaration, Implicit, Use, Cont_provider] + for _, text in ptext: + parsed_text.move_to_top_list(text, l) + + #Touch routine - #Touch routine parsed_text.build_sub_needs(parsed_text_0, d_routine) moved_to_top_l(parsed_text_0) parsed_text_1 = parsed_text.add_subroutine_needs(parsed_text_0, d_routine) parsed_text_1 = parsed_text.move_variables(parsed_text_1) - - moved_to_top_l(parsed_text_1) + + moved_to_top_l(parsed_text_1) parsed_text.check_opt(parsed_text_1) parsed_text_1 = parsed_text.perform_loop_substitutions(parsed_text_1) #touch entity - stuple = [(s, v.regexp) for s, v in d_routine.iteritems() if v.is_function] - parsed_text.build_needs(parsed_text_1, d_routine, stuple,d_entity) + stuple = [(s, v.regexp) for s, v in d_routine.iteritems() if v.is_function] + parsed_text.build_needs(parsed_text_1, d_routine, stuple, d_entity) return parsed_text_1 @@ -264,7 +277,7 @@ class Irpy_comm_world(object): from module import Fmodule result = dict() for filename, text in self.t_filename_parsed_text: - result[filename] = Fmodule(text, filename,self.d_entity) + result[filename] = Fmodule(text, filename, self.d_entity) return result @@ -276,9 +289,9 @@ class Irpy_comm_world(object): for m in self.d_module.values(): # Module data if m.has_irp_module: - filename = os.path.join(irpdir, '%s.irp.module.F90' % m.filename) - text = '\n'.join(m.header + m.head) - lazy_write_file(filename, '%s\n' % text) + filename = os.path.join(irpdir, '%s.irp.module.F90' % m.filename) + text = '\n'.join(m.header + m.head) + lazy_write_file(filename, '%s\n' % text) # Subroutines filename = os.path.join(irpdir, '%s.irp.F90' % m.filename) @@ -289,9 +302,9 @@ class Irpy_comm_world(object): import irp_stack irp_stack.create() - def create_buildfile(self,ninja): - import build_file - build_file.run(self.d_module,ninja) + def create_buildfile(self, ninja): + import build_file + build_file.run(self.d_module, ninja) def create_touches(self): import touches @@ -301,22 +314,19 @@ class Irpy_comm_world(object): import create_man as c_man c_man.run(self.d_entity, self.d_routine) - def create_lock(self): - from util import lazy_write_file - l = sorted(self.d_entity.keys()) + from util import lazy_write_file + l = sorted(self.d_entity.keys()) - out = [] - for v in l: - out += self.d_entity[v].locker + out = [] + for v in l: + out += self.d_entity[v].locker - out += [ "subroutine irp_init_locks_%s()"%(irpf90_t.irp_id), - " implicit none" ] - for v in l: - out += [ " call irp_lock_%s(.True.)"%v ] - out += [ " call irp_lock_%s(.False.)"%v ] - out += [ "end subroutine", "" ] - - filename = os.path.join(irpf90_t.irpdir,'irp_locks.irp.F90') - lazy_write_file(filename, '\n'.join(out)) + out += ["subroutine irp_init_locks_%s()" % (irpf90_t.irp_id), " implicit none"] + for v in l: + out += [" call irp_lock_%s(.True.)" % v] + out += [" call irp_lock_%s(.False.)" % v] + out += ["end subroutine", ""] + filename = os.path.join(irpf90_t.irpdir, 'irp_locks.irp.F90') + lazy_write_file(filename, '\n'.join(out)) diff --git a/src/parsed_text.py b/src/parsed_text.py index 0731797..32fb04c 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -109,7 +109,8 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): l = filter(lambda x: x not in varlist, l) for v in l: if v not in variables: - error.fail(line, "Variable %s is unknown" % (v)) + logger.error("Variable %s is unknown (%s)" % (v,line)) + sys.exit(1) append(Parsed_text(l, Provide(line.i, "", line.filename))) append(Parsed_text(l, Simple_line(line.i, "!%s" % (line.text), line.filename))) @@ -119,6 +120,8 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): for v in l: if v not in variables: error.fail(line, "Variable %s is unknown" % (v)) + sys.exit(1) + l = map(lambda x: "-%s" % (x), l) append(Parsed_text(l, Simple_line(line.i, "!%s" % (line.text), line.filename))) elif type(line) in [Touch, SoftTouch]: From cb7838d068d1de078aee313ba7e5e2aaab5c3ce1 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Tue, 31 Jan 2017 09:11:38 -0600 Subject: [PATCH 06/31] Add vitali reduction in -G --- src/irpf90.py | 11 +++++++++-- src/util.py | 23 +++++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/src/irpf90.py b/src/irpf90.py index 5f8b755..456b941 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -58,7 +58,7 @@ def main(): # Create a dot reprenstion of the dependency graph. # Merge inside a subgraph the Entity provided together comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. - from util import mangled + from util import mangled, l_dummy_entity print 'digraph { ' @@ -72,7 +72,14 @@ def main(): print ' subgraph cluster%s {' % name print ' %s ' % ' '.join([entity.name] + entity.others_entity_name) print ' }' - print '}' + + for i,s in enumerate(l_dummy_entity(comm_world.d_entity)): + print ' subgraph cluster%s {' % i + print ' %s ' % ' '.join(s) + print ' color = blue' + print ' }' + print '}' + return diff --git a/src/util.py b/src/util.py index 7072a3a..8717408 100644 --- a/src/util.py +++ b/src/util.py @@ -308,3 +308,26 @@ def build_call_provide(l_ent, d_ent): return flatten(map(bld_f90, l_same_as)) +def che_merge(sets): + #(List[Set] -> List[Set] + """Merge a list of set is they are not disjoint. + Note: + This will destry sets + """ + results = [] + upd, isd, pop = set.update, set.isdisjoint, sets.pop + while sets: + if not [upd(sets[0],pop(i)) for i in range(len(sets)-1,0,-1) if not isd(sets[0],sets[i])]: + results.append(pop(0)) + return results + + +def l_dummy_entity(d_entity): + from itertools import combinations + l_candidate_botom = [ (i,j) for i,j in combinations(d_entity.keys(),2) if d_entity[i].children == d_entity[j].children] + l_dummy = [set([i,j]) for i,j in l_candidate_botom if d_entity[i].parents == d_entity[j].parents] + + return che_merge(l_dummy) + + + From 93b2aca745f4aa3f24fa5150176ea86c05ced0a3 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Tue, 31 Jan 2017 14:17:23 -0600 Subject: [PATCH 07/31] Rework needed_by --- src/irpf90.py | 92 ++++++++++++++++++++++++++++++++++++++-------- src/parsed_text.py | 14 +++---- src/util.py | 22 ++++++++--- 3 files changed, 99 insertions(+), 29 deletions(-) diff --git a/src/irpf90.py b/src/irpf90.py index 456b941..f786c87 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -57,29 +57,89 @@ def main(): if command_line.do_graph: # Create a dot reprenstion of the dependency graph. # Merge inside a subgraph the Entity provided together + comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. - from util import mangled, l_dummy_entity - print 'digraph { ' + from util import l_dummy_entity, split_l_set + #print len(comm_world.d_entity) + #print sum(len(i.needs) for i in comm_world.d_entity.values()) + #print l_dummy_entity(comm_world.d_entity) + #print len(l_dummy_entity(comm_world.d_entity)) + #print sum(len(i) for i in l_dummy_entity(comm_world.d_entity)) - from util import mangled - for name,entity in comm_world.d_entity.items(): - if entity.is_main: - if entity.needs: - print ' %s -> { %s } ' % (name, ' ; '.join(entity.needs)) - if entity.others_entity_name: - print ' subgraph cluster%s {' % name - print ' %s ' % ' '.join([entity.name] + entity.others_entity_name) - print ' }' + l_main_usr = set([entity for entity in comm_world.d_entity.values() if entity.is_main]) + l_main_head_usr = set([entity for entity in l_main_usr if entity.others_entity_name]) + l_main_atomic_usr = l_main_usr - l_main_head_usr + + print 'digraph Full { ' + for e in comm_world.d_entity.values(): + if e.needs: + print ' %s -> { %s } ' % (e.name, ' '.join(e.needs)) + + print '}' + print '' + + print 'digraph Small { ' + print ' graph [ordering="out"];' + for e in l_main_head_usr: + print ' subgraph cluster%s {' % e.name + print ' %s ' % ' '.join([e.name] + e.others_entity_name) + print ' }' + + + + l_set_dummy_name= l_dummy_entity(comm_world.d_entity) + for i,s in enumerate(l_set_dummy_name): + print ' subgraph cluster%s {' % i + print ' %s ' % ' '.join(s) + print ' color = blue' + print ' }' + + + # We do exactly like the multi-provider. + l_main_dummy_name, s_exculde_dummy_name = split_l_set(l_set_dummy_name) + from util import flatten + l_dummy_name = flatten(l_set_dummy_name) + l_main_head_dummy = [comm_world.d_entity[name] for name in l_main_dummy_name] + + + # Optimisation + # 1) We merge the depency of multiple-provider. All entity into a multiple provider are the same. + # 2) For the automatic one, we draw only the arrow for one parent. + + for e in (e for e in l_main_atomic_usr if e.needs and e.name not in l_dummy_name): + needs_filter = set(e.needs) - s_exculde_dummy_name + if set(e.needs) != needs_filter: + needs_filter = set(e.needs) - s_exculde_dummy_name + for s in needs_filter: + if s in l_dummy_name: + print ' %s -> { %s } [color=blue, penwidth=2]' % (e.name, s) + + else: + print ' %s -> { %s }' % (e.name, s) + else: + print ' %s -> { %s }' % (e.name, ' ; '.join(e.needs)) + + for e in (e for e in l_main_head_usr if e.needs and e.name not in l_dummy_name): + needs_filter = set(e.needs) - s_exculde_dummy_name + if set(e.needs) != needs_filter: + needs_filter = set(e.needs) - s_exculde_dummy_name + for s in needs_filter: + if s in l_dummy_name: + print ' %s -> { %s } [color=blue, penwidth=2]' % (e.name, s) + + else: + print ' %s -> { %s } [penwidth=2]' % (e.name, s) + else: + print ' %s -> { %s } [penwidth=2]' % (e.name, ' ; '.join(e.needs)) + + for e in (e for e in l_main_head_dummy if e.needs): + print ' %s -> { %s } [color=blue, penwidth=2]' % (e.name, ' ; '.join(e.needs)) - for i,s in enumerate(l_dummy_entity(comm_world.d_entity)): - print ' subgraph cluster%s {' % i - print ' %s ' % ' '.join(s) - print ' color = blue' - print ' }' print '}' + return diff --git a/src/parsed_text.py b/src/parsed_text.py index 32fb04c..0e68117 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -507,16 +507,16 @@ def build_needs(parsed_text, subroutines, stuple, variables): # - Create the pointer copy # - Add the value (so it add also to the pointer reference...) - for v in variables: - main = variables[v].same_as - if main != v: - variables[v].needed_by = variables[main].needed_by +# for v in variables: +# main = variables[v].same_as +# if main != v: +# variables[v].needed_by = variables[main].needed_by for v in variables: var = variables[v] - if var.is_main: - for x in var.needs: - variables[x].needed_by.append(var.same_as) +# if var.is_main: + for x in var.needs: + variables[x].needed_by.append(var.name) for var in variables.values(): var.needed_by = uniquify(var.needed_by) diff --git a/src/util.py b/src/util.py index 8717408..95c6a8c 100644 --- a/src/util.py +++ b/src/util.py @@ -256,7 +256,7 @@ def OrderedUniqueList(l): return sorted(set(l)) def flatten(l_2d): - # (List [ List[Any] ]) -> List + # (List [ Iter[Any] ]) -> List '''Construct a copy of the 2d list collapsed into one dimension. Note: @@ -287,7 +287,7 @@ def build_dim(l_dim, colons=False): def mangled(l_ent, d_ent): # (List, Dict[str,Entity]) -> list - '''Create a uniq list of provider''' + '''Create a uniq list of providier (merge the multione) ''' return OrderedUniqueList(d_ent[name].same_as for name in l_ent) def build_use(l_ent, d_ent): @@ -323,11 +323,21 @@ def che_merge(sets): def l_dummy_entity(d_entity): + # Dict[str:Entity] -> List[set] from itertools import combinations - l_candidate_botom = [ (i,j) for i,j in combinations(d_entity.keys(),2) if d_entity[i].children == d_entity[j].children] - l_dummy = [set([i,j]) for i,j in l_candidate_botom if d_entity[i].parents == d_entity[j].parents] - - return che_merge(l_dummy) + l_candidate_botom = [ (i,j) for i,j in combinations(d_entity.keys(),2) if d_entity[i].needs == d_entity[j].needs] + l_dummy = [set([i,j]) for i,j in l_candidate_botom if d_entity[i].needed_by == d_entity[j].needed_by] + return che_merge(l_dummy) + l_merge = che_merge(l_dummy) + return [l_set for l_set in l_merge if all(d_entity[e].is_main for e in l_set)] +def split_l_set(l_set_org): + #(List[set] -> (List, Set) + '''Split the list of set into a list of Head and and the concetenad of all the tail + Note: Head and Tail a not defined in set. Head in one element of the set, and tail the rest. + ''' + l_set = [set(s) for s in l_set_org] + l_main = [ s.pop() for s in l_set] + return l_main, set(flatten(l_set)) From 24e968de7086813b4068b4b621b44b80cee83542 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Tue, 31 Jan 2017 14:22:32 -0600 Subject: [PATCH 08/31] Fix toucher --- src/entity.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/entity.py b/src/entity.py index 98b8bef..0f0fd17 100644 --- a/src/entity.py +++ b/src/entity.py @@ -400,8 +400,9 @@ class Entity(object): # Only one by EntityColleciton if not self.is_main: return [] - - parents = self.parents + + from util import mangled + parents = mangled(self.parents) name = self.name result = ["subroutine touch_%s" % (name)] From 3084774791942e72b455c28963257d5ac98b9d6c Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Tue, 31 Jan 2017 14:25:06 -0600 Subject: [PATCH 09/31] Fix toucher 2. --- src/entity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/entity.py b/src/entity.py index 0f0fd17..1a9e2d3 100644 --- a/src/entity.py +++ b/src/entity.py @@ -402,7 +402,7 @@ class Entity(object): return [] from util import mangled - parents = mangled(self.parents) + parents = mangled(self.parents,self.d_entity) name = self.name result = ["subroutine touch_%s" % (name)] From bab16c1e34d7c9d2892cda6e9cbe45d0d8dfa9a3 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Tue, 31 Jan 2017 14:35:55 -0600 Subject: [PATCH 10/31] Fix toucher 2. --- src/parsed_text.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/parsed_text.py b/src/parsed_text.py index 0e68117..2907d29 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -507,15 +507,15 @@ def build_needs(parsed_text, subroutines, stuple, variables): # - Create the pointer copy # - Add the value (so it add also to the pointer reference...) -# for v in variables: -# main = variables[v].same_as -# if main != v: -# variables[v].needed_by = variables[main].needed_by + for v in variables: + main = variables[v].same_as + if main != v: + variables[v].needed_by = variables[main].needed_by for v in variables: var = variables[v] -# if var.is_main: - for x in var.needs: + if var.is_main: + for x in var.needs: variables[x].needed_by.append(var.name) for var in variables.values(): From 602fb3cf29e72b90720eb02b9e61c5de87b01be0 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Tue, 31 Jan 2017 14:39:46 -0600 Subject: [PATCH 11/31] Fix toucher 3. --- src/parsed_text.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/src/parsed_text.py b/src/parsed_text.py index 2907d29..955e7af 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -501,22 +501,10 @@ def build_needs(parsed_text, subroutines, stuple, variables): # Needs_by # ~#~#~#~#~# - # This a some dark vodou magic. - # The algo is: - # - Initialise needed_by - # - Create the pointer copy - # - Add the value (so it add also to the pointer reference...) - - for v in variables: - main = variables[v].same_as - if main != v: - variables[v].needed_by = variables[main].needed_by - for v in variables: var = variables[v] - if var.is_main: - for x in var.needs: - variables[x].needed_by.append(var.name) + for x in var.needs: + variables[x].needed_by.append(var.name) for var in variables.values(): var.needed_by = uniquify(var.needed_by) From 87584fb3cc916742ee91147ab7706c908e94ed73 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Wed, 1 Feb 2017 12:19:36 -0600 Subject: [PATCH 12/31] Guess is ok --- src/irpy_files.py | 20 +++++------ src/parsed_text.py | 89 ++++++++++++++++++++++++++++++++++++++++++---- src/touches.py | 1 - 3 files changed, 91 insertions(+), 19 deletions(-) diff --git a/src/irpy_files.py b/src/irpy_files.py index 2131878..69d7725 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -115,7 +115,7 @@ class Irpy_comm_world(object): ent = Ent_part() l_ent += [ent] + [Ent_part(name) for name in ent.others_entity_name] -# O(2) but who care + # O(2) but who care l_duplicate = [x for x in l_ent if l_ent.count(x) > 1] if l_duplicate: from util import logger @@ -124,18 +124,15 @@ class Irpy_comm_world(object): import sys sys.exit(1) -# Python 2.6 Don't allow list comprehesion + # Python 2.6 Don't allow list comprehesion d_ent = dict() for e in l_ent: d_ent[e.name] = e -# -# Second pass -# -# Modify parameter of variables - -# Touch Softouch - + # + # Second pass + # Modify parameter of variables + # Touch Softouch def find_variable(line): from util import logger import sys @@ -238,7 +235,7 @@ class Irpy_comm_world(object): d_routine = self.d_routine import parsed_text - vtuple = [(v, s.regexp) for v, s in d_entity.iteritems()] + vtuple = [(v,s.same_as, s.regexp) for v, s in d_entity.iteritems()] def worker_parsed(filename_text): filename, text = filename_text @@ -253,8 +250,7 @@ class Irpy_comm_world(object): for _, text in ptext: parsed_text.move_to_top_list(text, l) - #Touch routine - + #Touch routine parsed_text.build_sub_needs(parsed_text_0, d_routine) moved_to_top_l(parsed_text_0) diff --git a/src/parsed_text.py b/src/parsed_text.py index 955e7af..e4cd2f7 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -34,7 +34,8 @@ regexps_re_string_sub = regexps.re_string.sub def find_variables_in_line(line, vtuple): line_lower = regexps_re_string_sub('', line.lower) - return [v for v, regexp in vtuple if v in line_lower and regexp(line_lower)] + #return [same_as for v,same_as, regexp in vtuple if v in line_lower and regexp(line_lower)] + return [v for v,same_as, regexp in vtuple if v in line_lower and regexp(line_lower)] def find_funcs_in_line(line, stuple): @@ -97,6 +98,7 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): variable_list = find_variables_in_line(line, vtuple) variable_list.remove(v) +# variable_list.remove(variables[v].same_as) append(Parsed_text(variable_list, line)) @@ -110,6 +112,7 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): for v in l: if v not in variables: logger.error("Variable %s is unknown (%s)" % (v,line)) + import sys sys.exit(1) append(Parsed_text(l, Provide(line.i, "", line.filename))) @@ -332,6 +335,52 @@ def add_subroutine_needs(parsed_text, subroutines): ###################################################################### +def raise_entity(text): + #(List[ Tuple[List[Entity], Tuple[int,List[Line]] ]] + '''Working progress''' + l_token = [] + d_level_var = dict() + d_level_var[0] = [] + + skip_interface = False + lvl = 0 + + + for i,(e, line) in enumerate(text): + type_ = type(line) + + if type_ in [Interface, End_interface]: + skip_interface = not skip_interface + + if skip_interface: + continue + + if type_ in [Begin_provider, Program, Subroutine, Function,If]: + l_token.append(i) + lvl += 1 + d_level_var[lvl] = e[:] + + elif type_ in [End_provider, End, Endif]: + i = l_token.pop() + text[i] = ( d_level_var[lvl],text[i][1]) + + lvl += -1 + + elif type_ in [Else,Elseif]: + i = l_token.pop() + text[i] = ( d_level_var[lvl],text[i][1]) + + assert (type(text[i][1]) == If) + + l_token.append(i) + d_level_var[lvl] = e[:] + + else: + d_level_var[lvl] += e[:] + text[i] = ([],line) + + assert(lvl==0) + def move_variables(parsed_text): #(List[ Tuple[List[Entity], Tuple[int,List[Line]] ]] '''Move variables into the top of the declaraiton''' @@ -409,6 +458,10 @@ def move_variables(parsed_text): result.reverse() + #print '@@@@@@@@@@@@@' + #for i in text: + # print i + # 2nd pass text = result result = [] @@ -450,7 +503,14 @@ def move_variables(parsed_text): main_result = [] for filename, text in parsed_text: + #for i in text: + # print i main_result.append((filename, func(filename, text))) + + #print '===========' + #for i in main_result[-1][1]: + # print i + return main_result @@ -502,12 +562,29 @@ def build_needs(parsed_text, subroutines, stuple, variables): # ~#~#~#~#~# for v in variables: - var = variables[v] - for x in var.needs: - variables[x].needed_by.append(var.name) + variables[v].needed_by = [] + for v in variables: + main = variables[v].same_as + if main != v: + variables[v].needed_by = variables[main].needed_by - for var in variables.values(): - var.needed_by = uniquify(var.needed_by) + for v in variables: + var = variables[v] + if var.is_main: + for x in var.needs: + variables[x].needed_by.append(var.same_as) + + for v in variables: + var = variables[v] + var.needed_by = uniquify(var.needed_by) + +# for v in variables: +# var = variables[v] +# for x in var.needs: +# variables[x].needed_by.append(var.name) +# +# for var in variables.values(): +# var.needed_by = uniquify(var.needed_by) ###################################################################### from command_line import command_line diff --git a/src/touches.py b/src/touches.py index b2533bf..c3a6382 100644 --- a/src/touches.py +++ b/src/touches.py @@ -43,7 +43,6 @@ def create(modules,variables): for v,var in variables.iteritems(): if var.fmodule not in main_modules_name: - #if var.is_self_touched: out += var.toucher if var.dim: finalize += " if (allocated(%s)) then\n"%v From 1c9b661b49412b2fc839579415a72bd995ab99fa Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Wed, 1 Feb 2017 15:27:26 -0600 Subject: [PATCH 13/31] Fix touch --- src/entity.py | 10 +++------- src/parsed_text.py | 46 ++++++++++++++++++++++++++++------------------ 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/src/entity.py b/src/entity.py index 1a9e2d3..ba1bdf2 100644 --- a/src/entity.py +++ b/src/entity.py @@ -716,19 +716,15 @@ class Entity(object): ########################################################## @irpy.lazy_property def parents(self): - if not self.is_main: - return [] - result = [] for x in self.needed_by: result.append(x) - try: - result += self.d_entity[x].parents - except RuntimeError: - pass # Exception will be checked after + result += self.d_entity[x].parents result = OrderedUniqueList(result) if self.name in result: error.fail(self.prototype, "Cyclic dependencies:\n%s" % (str(self._parents))) return result + + diff --git a/src/parsed_text.py b/src/parsed_text.py index e4cd2f7..672c682 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -64,10 +64,12 @@ def check_touch(variables, line, vars, main_vars): vars.sort() for x, y in zip(vars, all_others): if x != y: - message = "The following entities should be touched:\n" + message = "The following entities should be touched:" message = "\n".join([message] + map(lambda x: "- %s" % (x, ), all_others)) - error.fail(line, message) - + from util import logger + logger.error("%s (%s)" % (message,line)) + import sys + sys.exit(1) from collections import namedtuple Parsed_text = namedtuple('Parsed_text', ['varlist', 'line']) @@ -383,7 +385,10 @@ def raise_entity(text): def move_variables(parsed_text): #(List[ Tuple[List[Entity], Tuple[int,List[Line]] ]] - '''Move variables into the top of the declaraiton''' + '''Move variables into the top of the declaraiton. + + This need to be optimised to handle the fact that we can have multi-provider + ''' def func(filename, text): @@ -560,31 +565,36 @@ def build_needs(parsed_text, subroutines, stuple, variables): # ~#~#~#~#~# # Needs_by # ~#~#~#~#~# + from collections import defaultdict + d_needed_by = defaultdict(list) + + d_needed_by2 = defaultdict(list) for v in variables: - variables[v].needed_by = [] + var = variables[v] + for x in var.needs: + d_needed_by2[x].append(var.name) + + for v in variables: main = variables[v].same_as if main != v: - variables[v].needed_by = variables[main].needed_by + d_needed_by[v] = d_needed_by[main] for v in variables: var = variables[v] if var.is_main: for x in var.needs: - variables[x].needed_by.append(var.same_as) + d_needed_by[x].append(var.same_as) - for v in variables: - var = variables[v] - var.needed_by = uniquify(var.needed_by) - -# for v in variables: -# var = variables[v] -# for x in var.needs: -# variables[x].needed_by.append(var.name) -# -# for var in variables.values(): -# var.needed_by = uniquify(var.needed_by) + from util import mangled + for v in d_needed_by: + d_needed_by[v] = uniquify(d_needed_by[v]) + d_needed_by2[v] = uniquify(d_needed_by2[v]) + + for v in d_needed_by: + variables[v].needed_by = d_needed_by2[v] + variables[v].needed_by2 = d_needed_by2[v] ###################################################################### from command_line import command_line From aaebcf8025cf20a420aa805603986ed0f00e6ec8 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Wed, 1 Feb 2017 18:35:43 -0600 Subject: [PATCH 14/31] Vitali reduction working --- src/irpf90.py | 128 +++++++++++++++++++++++---------------------- src/parsed_text.py | 29 ++-------- src/util.py | 14 +++-- 3 files changed, 80 insertions(+), 91 deletions(-) diff --git a/src/irpf90.py b/src/irpf90.py index f786c87..12b654d 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -54,92 +54,96 @@ def main(): comm_world = Irpy_comm_world() + if command_line.do_graph: # Create a dot reprenstion of the dependency graph. # Merge inside a subgraph the Entity provided together + def print_full_diagram(l_entity): + + l_entity_not_leaf= [e for e in l_entity if e.needs] + print 'digraph Full { ' + for e in l_entity_not_leaf: + print ' %s -> { %s } ' % (e.name, ' '.join(e.needs)) + print '}' + + + + def print_subgraph(l_tuple,name,color): + for i,s in enumerate(l_tuple): + print ' subgraph cluster_%s_%s {' % (name,i) + print ' %s ' % ' '.join(s) + print ' color = %s ' % color + print ' }' + comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. - from util import l_dummy_entity, split_l_set - #print len(comm_world.d_entity) - #print sum(len(i.needs) for i in comm_world.d_entity.values()) - #print l_dummy_entity(comm_world.d_entity) - #print len(l_dummy_entity(comm_world.d_entity)) - #print sum(len(i) for i in l_dummy_entity(comm_world.d_entity)) + print 'digraph Compact { ' + print ' graph [ordering="out" splines=true overlap=false];' l_main_usr = set([entity for entity in comm_world.d_entity.values() if entity.is_main]) l_main_head_usr = set([entity for entity in l_main_usr if entity.others_entity_name]) - l_main_atomic_usr = l_main_usr - l_main_head_usr + l_set_main_head_name = [ set([e.name]+e.others_entity_name) for e in l_main_head_usr] - print 'digraph Full { ' - for e in comm_world.d_entity.values(): - if e.needs: - print ' %s -> { %s } ' % (e.name, ' '.join(e.needs)) - - print '}' - print '' + print_subgraph(l_set_main_head_name,'usr',color='blue') - print 'digraph Small { ' - print ' graph [ordering="out"];' - for e in l_main_head_usr: - print ' subgraph cluster%s {' % e.name - print ' %s ' % ' '.join([e.name] + e.others_entity_name) - print ' }' + from util import l_dummy_entity + l_set_dummy_name= l_dummy_entity(comm_world.d_entity) + print_subgraph(l_set_dummy_name,'dummy',color='red') - - l_set_dummy_name= l_dummy_entity(comm_world.d_entity) - for i,s in enumerate(l_set_dummy_name): - print ' subgraph cluster%s {' % i - print ' %s ' % ' '.join(s) - print ' color = blue' - print ' }' + #~=~=~=~= + # Create List Node Uniq + #~=~=~=~= - - # We do exactly like the multi-provider. + from util import split_l_set, flatten l_main_dummy_name, s_exculde_dummy_name = split_l_set(l_set_dummy_name) - from util import flatten - l_dummy_name = flatten(l_set_dummy_name) - l_main_head_dummy = [comm_world.d_entity[name] for name in l_main_dummy_name] + l_name_dummy_name_flatten = flatten(l_set_dummy_name) + + l_main_head_dummy = set([comm_world.d_entity[name] for name in l_name_dummy_name_flatten]) + s_exculde_dummy = set([comm_world.d_entity[name] for name in s_exculde_dummy_name]) + + l_node_uniq = (l_main_usr | l_main_head_dummy) - s_exculde_dummy + + + #~=~=~=~= + # Create All edge + #~=~=~=~= + # We need to remove the spurious edge caused by the the dummy multiples providers + d_need = dict() + for e in l_node_uniq: + d_need[e.name] = set(e.needs) - # Optimisation - # 1) We merge the depency of multiple-provider. All entity into a multiple provider are the same. - # 2) For the automatic one, we draw only the arrow for one parent. + #~=~=~=~= + # Create All edge + #~=~=~=~= + # Draw the eddge + # If a arrow if arriving into Multipliple provider and if it is bold this mean it use all the entity inside it. - for e in (e for e in l_main_atomic_usr if e.needs and e.name not in l_dummy_name): - needs_filter = set(e.needs) - s_exculde_dummy_name - if set(e.needs) != needs_filter: - needs_filter = set(e.needs) - s_exculde_dummy_name - for s in needs_filter: - if s in l_dummy_name: - print ' %s -> { %s } [color=blue, penwidth=2]' % (e.name, s) + from util import uniquify + l_set_multiple = uniquify(l_set_dummy_name + l_set_main_head_name) - else: - print ' %s -> { %s }' % (e.name, s) - else: - print ' %s -> { %s }' % (e.name, ' ; '.join(e.needs)) + l_name_usr = [e.name for e in l_main_head_usr] + for source,l_target in d_need.items(): - for e in (e for e in l_main_head_usr if e.needs and e.name not in l_dummy_name): - needs_filter = set(e.needs) - s_exculde_dummy_name - if set(e.needs) != needs_filter: - needs_filter = set(e.needs) - s_exculde_dummy_name - for s in needs_filter: - if s in l_dummy_name: - print ' %s -> { %s } [color=blue, penwidth=2]' % (e.name, s) + if source in l_name_usr: + color = 'blue' + elif source in l_name_dummy_name_flatten: + color = 'red' + else: + color = 'black' - else: - print ' %s -> { %s } [penwidth=2]' % (e.name, s) - else: - print ' %s -> { %s } [penwidth=2]' % (e.name, ' ; '.join(e.needs)) + for s in l_set_multiple: + if s.issubset(l_target): + print ' %s -> %s [color="%s", penwidth=2]' %(source,sorted(s).pop(), color) + l_target = l_target - s - for e in (e for e in l_main_head_dummy if e.needs): - print ' %s -> { %s } [color=blue, penwidth=2]' % (e.name, ' ; '.join(e.needs)) + if l_target: + print ' %s -> { %s } [color="%s"]'% (source,' '.join(l_target), color) - print '}' - - + print ' }' return diff --git a/src/parsed_text.py b/src/parsed_text.py index 672c682..a5a3d3a 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -566,35 +566,14 @@ def build_needs(parsed_text, subroutines, stuple, variables): # Needs_by # ~#~#~#~#~# from collections import defaultdict + d_needed_by = defaultdict(list) - - d_needed_by2 = defaultdict(list) - - for v in variables: - var = variables[v] + for var in variables.values(): for x in var.needs: - d_needed_by2[x].append(var.name) + d_needed_by[x].append(var.name) - - for v in variables: - main = variables[v].same_as - if main != v: - d_needed_by[v] = d_needed_by[main] - - for v in variables: - var = variables[v] - if var.is_main: - for x in var.needs: - d_needed_by[x].append(var.same_as) - - from util import mangled for v in d_needed_by: - d_needed_by[v] = uniquify(d_needed_by[v]) - d_needed_by2[v] = uniquify(d_needed_by2[v]) - - for v in d_needed_by: - variables[v].needed_by = d_needed_by2[v] - variables[v].needed_by2 = d_needed_by2[v] + variables[v].needed_by = uniquify(d_needed_by[v]) ###################################################################### from command_line import command_line diff --git a/src/util.py b/src/util.py index 95c6a8c..f562d0c 100644 --- a/src/util.py +++ b/src/util.py @@ -243,8 +243,16 @@ def check_output(*popenargs, **kwargs): def uniquify(l,sort=False): # (Iter, bool) -> List[Any] - '''Uniquify a immutable iterable. Don't preserve the order''' - r = list(set(l)) + '''Uniquify a immutable iterable. Don't preserve the order. Or maybe.''' + + + #Be carefull that element in Iter can be unshable. + try: + r = list(set(l)) + except TypeError: + used = list() + r = [x for x in l if x not in used and (used.append(x) or True)] + if not sort: return r else: @@ -329,8 +337,6 @@ def l_dummy_entity(d_entity): l_dummy = [set([i,j]) for i,j in l_candidate_botom if d_entity[i].needed_by == d_entity[j].needed_by] return che_merge(l_dummy) - l_merge = che_merge(l_dummy) - return [l_set for l_set in l_merge if all(d_entity[e].is_main for e in l_set)] def split_l_set(l_set_org): #(List[set] -> (List, Set) From f1ce16195f03566276997c4985070d276b3d3abc Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 3 Feb 2017 15:57:59 -0600 Subject: [PATCH 15/31] others_entity_name -> l_others_name --- src/create_man.py | 2 +- src/entity.py | 20 ++++++++++++-------- src/irpf90.py | 4 ++-- src/irpy_files.py | 2 +- src/parsed_text.py | 2 +- 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/create_man.py b/src/create_man.py index 5479dc0..b6c7bc0 100644 --- a/src/create_man.py +++ b/src/create_man.py @@ -56,7 +56,7 @@ def process_deps(l): def process_types(entity_input, d_entity): assert type(entity_input) == Entity - l_name = [entity_input.name] + entity_input.others_entity_name + l_name = entity_input.l_name l_entity = [d_entity[name] for name in l_name] l = [ "{0}\t:: {1}\t{2}".format(entity.type, name, build_dim(entity.dim) ) diff --git a/src/entity.py b/src/entity.py index ba1bdf2..88afb7c 100644 --- a/src/entity.py +++ b/src/entity.py @@ -130,13 +130,17 @@ class Entity(object): d = self.d_type_lines return next(line for _,line in d[Begin_provider]+d[Cont_provider] if line.filename[1] == self.name) + @irpy.lazy_property + def l_name(self): + # () -> List[str] + d = self.d_type_lines + return [line.filename[1] for _,line in d[Begin_provider]+d[Cont_provider] ] @irpy.lazy_property - def others_entity_name(self): + def l_others_name(self): # () -> List[str] '''Extract the other entity-name defined''' - d = self.d_type_lines - return [line.filename[1] for _,line in d[Begin_provider]+d[Cont_provider] if not line.filename[1] == self.name] + return [name for name in self.l_name if not name == self.name] @irpy.lazy_property @@ -193,7 +197,7 @@ class Entity(object): " irp_iunit = irp_iunit+1", " inquire(unit=irp_iunit,opened=irp_is_open)", " enddo" ] - for n in [name] + self.others_entity_name: + for n in self.l_name: result += [\ " open(unit=irp_iunit,file='irpf90_%s_'//trim(irp_num),form='FORMATTED',status='UNKNOWN',action='WRITE')"%(n), " write(irp_iunit,*) %s%s"%(n,build_dim(self.d_entity[n].dim,colons=True)), @@ -224,7 +228,7 @@ class Entity(object): " logical :: irp_is_open", " integer :: irp_iunit" ] if command_line.do_debug: - length = len("reader_%s" % (self.name)) + length = len("reader_%s" % (name)) result += [\ " character*(%d) :: irp_here = 'reader_%s'"%(length,name), " call irp_enter(irp_here)" ] @@ -235,7 +239,7 @@ class Entity(object): " do while (irp_is_open)", " inquire(unit=irp_iunit,opened=irp_is_open)", " enddo"] - for n in [name] + self.others: + for n in self.l_name: result += [\ " open(unit=irp_iunit,file='irpf90_%s_'//trim(irp_num),form='FORMATTED',status='OLD',action='READ')"%(n), " read(irp_iunit,*) %s%s"%(n,build_dim(self.cm_d_variable[n].dim,colons=True)), @@ -325,7 +329,7 @@ class Entity(object): return [] else: # We never go here - return [var for var in self.others_entity_name + [self.name] if self.d_entity[var].dim] + return [var for var in l_name if self.d_entity[var].dim] # ~ # ~ # ~ # D e c l a r a t i o n @@ -606,7 +610,7 @@ class Entity(object): if command_line.do_assert or command_line.do_debug: result.append(" call irp_enter(irp_here)") result += build_call_provide(self.to_provide, self.d_entity) - result += flatten(map(build_alloc, [self.same_as] + self.others_entity_name)) + result += flatten(map(build_alloc, self.l_name)) result += [ " if (.not.%s_is_built) then" % (same_as), " call bld_%s" % (same_as), " %s_is_built = .True." % (same_as), "" diff --git a/src/irpf90.py b/src/irpf90.py index 12b654d..a9c8d3c 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -83,8 +83,8 @@ def main(): print ' graph [ordering="out" splines=true overlap=false];' l_main_usr = set([entity for entity in comm_world.d_entity.values() if entity.is_main]) - l_main_head_usr = set([entity for entity in l_main_usr if entity.others_entity_name]) - l_set_main_head_name = [ set([e.name]+e.others_entity_name) for e in l_main_head_usr] + l_main_head_usr = set([entity for entity in l_main_usr if entity.l_others_name]) + l_set_main_head_name = [ set(e.l_name) for e in l_main_head_usr] print_subgraph(l_set_main_head_name,'usr',color='blue') diff --git a/src/irpy_files.py b/src/irpy_files.py index 69d7725..90518a9 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -113,7 +113,7 @@ class Irpy_comm_world(object): Ent_part = partial(Entity, buf, icount, comm_world=self) ent = Ent_part() - l_ent += [ent] + [Ent_part(name) for name in ent.others_entity_name] + l_ent += [ent] + [Ent_part(name) for name in ent.l_others_name] # O(2) but who care l_duplicate = [x for x in l_ent if l_ent.count(x) > 1] diff --git a/src/parsed_text.py b/src/parsed_text.py index a5a3d3a..cf57c92 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -57,7 +57,7 @@ def check_touch(variables, line, vars, main_vars): if main_var not in variables: error.fail(line, "Variable %s unknown" % (main_var, )) x = variables[main_var] - return [main_var] + x.others_entity_name + return [main_var] + x.l_others_name all_others = uniquify(flatten(map(fun, main_vars))) all_others.sort() From 598587bcf36d145d0506e7a262a273e85a37048c Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Mon, 6 Feb 2017 12:16:40 -0600 Subject: [PATCH 16/31] Add ashe and // support --- src/ashes.py | 2602 ++++++++++++++++++++++++++++++++++++++ src/command_line.py | 1 + src/entity.py | 264 ++-- src/preprocessed_text.py | 11 + 4 files changed, 2772 insertions(+), 106 deletions(-) create mode 100644 src/ashes.py diff --git a/src/ashes.py b/src/ashes.py new file mode 100644 index 0000000..010471d --- /dev/null +++ b/src/ashes.py @@ -0,0 +1,2602 @@ +# -*- coding: utf-8 -*- + +''' +Copyright (c) 2013, Mahmoud Hashemi + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +''' + + +from __future__ import unicode_literals + +import os +import re +import cgi +import sys +import json +import codecs +import pprint +import string +import fnmatch +import time + + +PY3 = (sys.version_info[0] == 3) +if PY3: + unicode, string_types = str, (str, bytes) +else: + string_types = (str, unicode) + +__version__ = '15.1.1dev' +__author__ = 'Mahmoud Hashemi' +__contact__ = 'mahmoudrhashemi@gmail.com' +__url__ = 'https://github.com/mahmoud/ashes' +__license__ = 'BSD' + + +DEFAULT_EXTENSIONS = ('.dust', '.html', '.xml') +DEFAULT_IGNORED_PATTERNS = ('.#*',) + + +# need to add group for literals +# switch to using word boundary for params section +node_re = re.compile(r'({' + r'(?P\/)?' + r'(?:(?P[\~\#\?\@\:\<\>\+\^\%])\s*)?' + r'(?P[a-zA-Z0-9_\$\.]+|"[^"]+")' + r'(?:\:(?P[a-zA-Z0-9\$\.]+))?' + r'(?P[\|a-z]+)*?' + r'(?P(?:\s+\w+\=(("[^"]*?")|([$\w\.]+)))*)?' + r'\s*' + r'(?P\/)?' + r'\})', + flags=re.MULTILINE) + +key_re_str = '[a-zA-Z_$][0-9a-zA-Z_$]*' +key_re = re.compile(key_re_str) +path_re = re.compile('(' + key_re_str + ')?(\.' + key_re_str + ')+') +comment_re = re.compile(r'(\{!.+?!\})|(\{`.+?`\})', flags=re.DOTALL) + + +def get_path_or_key(pork): + if pork == '.': + pk = ['path', True, []] + elif path_re.match(pork): + f_local = pork.startswith('.') + if f_local: + pork = pork[1:] + pk = ['path', f_local, pork.split('.')] + elif key_re.match(pork): + pk = ['key', pork] + else: + raise ValueError('expected a path or key, not %r' % pork) + return pk + + +def split_leading(text): + leading_stripped = text.lstrip() + leading_ws = text[:len(text) - len(leading_stripped)] + return leading_ws, leading_stripped + + +class Token(object): + def __init__(self, text): + self.text = text + + def get_line_count(self): + # returns 0 if there's only one line, because the + # token hasn't increased the number of lines. + count = len(self.text.splitlines()) - 1 + if self.text[-1] in ('\n', '\r'): + count += 1 + return count + + def __repr__(self): + cn = self.__class__.__name__ + disp = self.text + if len(disp) > 20: + disp = disp[:17] + '...' + return '%s(%r)' % (cn, disp) + + +class CommentToken(Token): + def to_dust_ast(self): + return [['comment', self.text]] + + +class RawToken(Token): + def to_dust_ast(self): + return [['raw', self.text]] + + +class BufferToken(Token): + def to_dust_ast(self): + # It is hard to simulate the PEG parsing in this case, + # especially while supporting universal newlines. + if not self.text: + return [] + rev = [] + remaining_lines = self.text.splitlines() + if self.text[-1] in ('\n', '\r'): + # kind of a bug in splitlines if you ask me. + remaining_lines.append('') + while remaining_lines: + line = remaining_lines.pop() + leading_ws, lstripped = split_leading(line) + if remaining_lines: + if lstripped: + rev.append(['buffer', lstripped]) + rev.append(['format', '\n', leading_ws]) + else: + if line: + rev.append(['buffer', line]) + ret = list(reversed(rev)) + return ret + + +ALL_ATTRS = ('closing', 'symbol', 'refpath', 'contpath', + 'filters', 'params', 'selfclosing') + + +class Tag(Token): + req_attrs = () + ill_attrs = () + + def __init__(self, text, **kw): + super(Tag, self).__init__(text) + self._attr_dict = kw + self.set_attrs(kw) + + @property + def param_list(self): + try: + return params_to_kv(self.params) + except AttributeError: + return [] + + @property + def name(self): + try: + return self.refpath.strip().lstrip('.') + except (AttributeError, TypeError): + return None + + def set_attrs(self, attr_dict, raise_exc=True): + cn = self.__class__.__name__ + all_attrs = getattr(self, 'all_attrs', ()) + if all_attrs: + req_attrs = [a for a in ALL_ATTRS if a in all_attrs] + ill_attrs = [a for a in ALL_ATTRS if a not in all_attrs] + else: + req_attrs = getattr(self, 'req_attrs', ()) + ill_attrs = getattr(self, 'ill_attrs', ()) + + opt_attrs = getattr(self, 'opt_attrs', ()) + if opt_attrs: + ill_attrs = [a for a in ill_attrs if a not in opt_attrs] + for attr in req_attrs: + if attr_dict.get(attr, None) is None: + raise ValueError('%s expected %s' % (cn, attr)) + for attr in ill_attrs: + if attr_dict.get(attr, None) is not None: + raise ValueError('%s does not take %s' % (cn, attr)) + + avail_attrs = [a for a in ALL_ATTRS if a not in ill_attrs] + for attr in avail_attrs: + setattr(self, attr, attr_dict.get(attr, '')) + return True + + @classmethod + def from_match(cls, match): + kw = dict([(str(k), v.strip()) + for k, v in match.groupdict().items() + if v is not None and v.strip()]) + obj = cls(text=match.group(0), **kw) + obj.orig_match = match + return obj + + +class ReferenceTag(Tag): + all_attrs = ('refpath',) + opt_attrs = ('filters',) + + def to_dust_ast(self): + pork = get_path_or_key(self.refpath) + filters = ['filters'] + if self.filters: + f_list = self.filters.split('|')[1:] + for f in f_list: + filters.append(f) + return [['reference', pork, filters]] + + +class SectionTag(Tag): + ill_attrs = ('closing') + + +class ClosingTag(Tag): + all_attrs = ('closing', 'refpath') + + +class SpecialTag(Tag): + all_attrs = ('symbol', 'refpath') + + def to_dust_ast(self): + return [['special', self.refpath]] + + +class BlockTag(Tag): + all_attrs = ('symbol', 'refpath') + + +class PartialTag(Tag): + req_attrs = ('symbol', 'refpath', 'selfclosing') + + def __init__(self, **kw): + super(PartialTag, self).__init__(**kw) + self.subtokens = parse_inline(self.refpath) + + def to_dust_ast(self): + """ + 2014.05.09 + This brings compatibility to the more popular fork of Dust.js + from LinkedIn (v1.0) + + Adding in `params` so `partials` function like sections. + """ + context = ['context'] + contpath = self.contpath + if contpath: + context.append(get_path_or_key(contpath)) + + params = ['params'] + param_list = self.param_list + if param_list: + try: + params.extend(params_to_dust_ast(param_list)) + except ParseError as pe: + pe.token = self + raise + + # tying to make this more standardized + inline_body = inline_to_dust_ast(self.subtokens) + return [['partial', + inline_body, + context, + params, + ]] + + +def parse_inline(source): + if not source: + raise ParseError('empty inline token') + if source.startswith('"') and source.endswith('"'): + source = source[1:-1] + if not source: + return [BufferToken("")] + tokens = tokenize(source, inline=True) + return tokens + + +def inline_to_dust_ast(tokens): + if tokens and all(isinstance(t, BufferToken) for t in tokens): + body = ['literal', ''.join(t.text for t in tokens)] + else: + body = ['body'] + for b in tokens: + body.extend(b.to_dust_ast()) + return body + + +def params_to_kv(params_str): + ret = [] + new_k, v = None, None + p_str = params_str.strip() + k, _, tail = p_str.partition('=') + while tail: + tmp, _, tail = tail.partition('=') + tail = tail.strip() + if not tail: + v = tmp + else: + v, new_k = tmp.split() + ret.append((k.strip(), v.strip())) + k = new_k + return ret + + +def params_to_dust_ast(param_kv): + ret = [] + for k, v in param_kv: + try: + v_body = get_path_or_key(v) + except ValueError: + v_body = inline_to_dust_ast(parse_inline(v)) + ret.append(['param', ['literal', k], v_body]) + return ret + + +def get_tag(match, inline=False): + groups = match.groupdict() + symbol = groups['symbol'] + closing = groups['closing'] + refpath = groups['refpath'] + if closing: + tag_type = ClosingTag + elif symbol is None and refpath is not None: + tag_type = ReferenceTag + elif symbol in '#?^<+@%': + tag_type = SectionTag + elif symbol == '~': + tag_type = SpecialTag + elif symbol == ':': + tag_type = BlockTag + elif symbol == '>': + tag_type = PartialTag + else: + raise ParseError('invalid tag symbol: %r' % symbol) + if inline and tag_type not in (ReferenceTag, SpecialTag): + raise ParseError('invalid inline tag') + return tag_type.from_match(match) + + +def tokenize(source, inline=False): + tokens = [] + com_nocom = comment_re.split(source) + line_counts = [1] + + def _add_token(t): + # i wish i had nonlocal so bad + t.start_line = sum(line_counts) + line_counts.append(t.get_line_count()) + t.end_line = sum(line_counts) + tokens.append(t) + for cnc in com_nocom: + if not cnc: + continue + elif cnc.startswith('{!') and cnc.endswith('!}'): + _add_token(CommentToken(cnc[2:-2])) + continue + elif cnc.startswith('{`') and cnc.endswith('`}'): + _add_token(RawToken(cnc[2:-2])) + continue + prev_end = 0 + start = None + end = None + for match in node_re.finditer(cnc): + start, end = match.start(1), match.end(1) + if prev_end < start: + _add_token(BufferToken(cnc[prev_end:start])) + prev_end = end + try: + _add_token(get_tag(match, inline)) + except ParseError as pe: + pe.line_no = sum(line_counts) + raise + tail = cnc[prev_end:] + if tail: + _add_token(BufferToken(tail)) + return tokens + +######### +# PARSING +######### + + +class Section(object): + def __init__(self, start_tag=None, blocks=None): + if start_tag is None: + refpath = None + name = '' + else: + refpath = start_tag.refpath + name = start_tag.name + + self.refpath = refpath + self.name = name + self.start_tag = start_tag + self.blocks = blocks or [] + + def add(self, obj): + if type(obj) == Block: + self.blocks.append(obj) + else: + if not self.blocks: + self.blocks = [Block()] + self.blocks[-1].add(obj) + + def to_dict(self): + ret = {self.name: dict([(b.name, b.to_list()) for b in self.blocks])} + return ret + + def to_dust_ast(self): + symbol = self.start_tag.symbol + + pork = get_path_or_key(self.refpath) + + context = ['context'] + contpath = self.start_tag.contpath + if contpath: + context.append(get_path_or_key(contpath)) + + params = ['params'] + param_list = self.start_tag.param_list + if param_list: + try: + params.extend(params_to_dust_ast(param_list)) + except ParseError as pe: + pe.token = self + raise + + bodies = ['bodies'] + if self.blocks: + for b in reversed(self.blocks): + bodies.extend(b.to_dust_ast()) + + return [[symbol, + pork, + context, + params, + bodies]] + + +class Block(object): + def __init__(self, name='block'): + if not name: + raise ValueError('blocks need a name, not: %r' % name) + self.name = name + self.items = [] + + def add(self, item): + self.items.append(item) + + def to_list(self): + ret = [] + for i in self.items: + try: + ret.append(i.to_dict()) + except AttributeError: + ret.append(i) + return ret + + def _get_dust_body(self): + # for usage by root block in ParseTree + ret = [] + for i in self.items: + ret.extend(i.to_dust_ast()) + return ret + + def to_dust_ast(self): + name = self.name + body = ['body'] + dust_body = self._get_dust_body() + if dust_body: + body.extend(dust_body) + return [['param', + ['literal', name], + body]] + + +class ParseTree(object): + def __init__(self, root_block): + self.root_block = root_block + + def to_dust_ast(self): + ret = ['body'] + ret.extend(self.root_block._get_dust_body()) + return ret + + @classmethod + def from_tokens(cls, tokens): + root_sect = Section() + ss = [root_sect] # section stack + for token in tokens: + if type(token) == SectionTag: + new_s = Section(token) + ss[-1].add(new_s) + if not token.selfclosing: + ss.append(new_s) + elif type(token) == ClosingTag: + if len(ss) <= 1: + msg = 'closing tag before opening tag: %r' % token.text + raise ParseError(msg, token=token) + if token.name != ss[-1].name: + msg = ('improperly nested tags: %r does not close %r' % + (token.text, ss[-1].start_tag.text)) + raise ParseError(msg, token=token) + ss.pop() + elif type(token) == BlockTag: + if len(ss) <= 1: + msg = 'start block outside of a section: %r' % token.text + raise ParseError(msg, token=token) + new_b = Block(name=token.refpath) + ss[-1].add(new_b) + else: + ss[-1].add(token) + if len(ss) > 1: + raise ParseError('unclosed tag: %r' % ss[-1].start_tag.text, + token=ss[-1].start_tag) + return cls(root_sect.blocks[0]) + + @classmethod + def from_source(cls, src): + tokens = tokenize(src) + return cls.from_tokens(tokens) + + +############## +# Optimize AST +############## +DEFAULT_SPECIAL_CHARS = {'s': ' ', + 'n': '\n', + 'r': '\r', + 'lb': '{', + 'rb': '}'} + +DEFAULT_OPTIMIZERS = { + 'body': 'compact_buffers', + 'special': 'convert_special', + 'format': 'nullify', + 'comment': 'nullify'} + +for nsym in ('buffer', 'filters', 'key', 'path', 'literal', 'raw'): + DEFAULT_OPTIMIZERS[nsym] = 'noop' + +for nsym in ('#', '?', '^', '<', '+', '@', '%', 'reference', + 'partial', 'context', 'params', 'bodies', 'param'): + DEFAULT_OPTIMIZERS[nsym] = 'visit' + +UNOPT_OPTIMIZERS = dict(DEFAULT_OPTIMIZERS) +UNOPT_OPTIMIZERS.update({'format': 'noop', 'body': 'visit'}) + + +def escape(text, esc_func=json.dumps): + return esc_func(text) + + +class Optimizer(object): + def __init__(self, optimizers=None, special_chars=None): + if special_chars is None: + special_chars = DEFAULT_SPECIAL_CHARS + self.special_chars = special_chars + + if optimizers is None: + optimizers = DEFAULT_OPTIMIZERS + self.optimizers = dict(optimizers) + + def optimize(self, node): + # aka filter_node() + nsym = node[0] + optimizer_name = self.optimizers[nsym] + return getattr(self, optimizer_name)(node) + + def noop(self, node): + return node + + def nullify(self, node): + return None + + def convert_special(self, node): + return ['buffer', self.special_chars[node[1]]] + + def visit(self, node): + ret = [node[0]] + for n in node[1:]: + filtered = self.optimize(n) + if filtered: + ret.append(filtered) + return ret + + def compact_buffers(self, node): + ret = [node[0]] + memo = None + for n in node[1:]: + filtered = self.optimize(n) + if not filtered: + continue + if filtered[0] == 'buffer': + if memo is not None: + memo[1] += filtered[1] + else: + memo = filtered + ret.append(filtered) + else: + memo = None + ret.append(filtered) + return ret + + def __call__(self, node): + return self.optimize(node) + + +######### +# Compile +######### + + +ROOT_RENDER_TMPL = \ +'''def render(chk, ctx): + {body} + return {root_func_name}(chk, ctx) +''' + + +def _python_compile(source): + """ + Generates a Python `code` object (via `compile`). + + args: + source: (required) string of python code to be compiled + + this actually compiles the template to code + """ + try: + code = compile(source, '', 'single') + return code + except: + raise + + +def _python_exec(code, name, global_env=None): + """ + this loads a code object (generated via `_python_compile` + + args: + code: (required) code object (generate via `_python_compile`) + name: (required) the name of the function + + kwargs: + global_env: (default None): the environment + """ + if global_env is None: + global_env = {} + else: + global_env = dict(global_env) + if PY3: + exec(code, global_env) + else: + exec("exec code in global_env") + return global_env[name] + + +def python_string_to_code(python_string): + """ + utility function + used to compile python string functions to code object + + args: + ``python_string`` + """ + code = _python_compile(python_string) + return code + + +def python_string_to_function(python_string): + """ + utility function + used to compile python string functions for template loading/caching + + args: + ``python_string`` + """ + code = _python_compile(python_string) + function = _python_exec(code, name='render', global_env=None) + return function + + +class Compiler(object): + """ + Note: Compiler objects aren't really meant to be reused, + the class is just for namespacing and convenience. + """ + sections = {'#': 'section', + '?': 'exists', + '^': 'notexists'} + nodes = {'<': 'inline_partial', + '+': 'region', + '@': 'helper', + '%': 'pragma'} + + def __init__(self, env=None): + if env is None: + env = default_env + self.env = env + + self.bodies = {} + self.blocks = {} + self.block_str = '' + self.index = 0 + self.auto = self.env.autoescape_filter + + def compile(self, ast, name='render'): + python_source = self._gen_python(ast) + python_code = _python_compile(python_source) + python_func = _python_exec(python_code, name=name) + return (python_code, python_func) + + def _gen_python(self, ast): # ast to init? + lines = [] + c_node = self._node(ast) + + block_str = self._root_blocks() + + bodies = self._root_bodies() + lines.extend(bodies.splitlines()) + if block_str: + lines.extend(['', block_str, '']) + body = '\n '.join(lines) + + ret = ROOT_RENDER_TMPL.format(body=body, + root_func_name=c_node) + self.python_source = ret + return ret + + def _root_blocks(self): + if not self.blocks: + self.block_str = '' + return '' + self.block_str = 'ctx = ctx.shift_blocks(blocks)\n ' + pairs = ['"' + name + '": ' + fn for name, fn in self.blocks.items()] + return 'blocks = {' + ', '.join(pairs) + '}' + + def _root_bodies(self): + max_body = max(self.bodies.keys()) + ret = [''] * (max_body + 1) + for i, body in self.bodies.items(): + ret[i] = ('\ndef body_%s(chk, ctx):\n %sreturn chk%s\n' + % (i, self.block_str, body)) + return ''.join(ret) + + def _convert_special(self, node): + return ['buffer', self.special_chars[node[1]]] + + def _node(self, node): + ntype = node[0] + if ntype in self.sections: + stype = self.sections[ntype] + return self._section(node, stype) + elif ntype in self.nodes: + ntype = self.nodes[ntype] + cfunc = getattr(self, '_' + ntype, None) + if not callable(cfunc): + raise TypeError('unsupported node type: "%r"', node[0]) + return cfunc(node) + + def _body(self, node): + index = self.index + self.index += 1 # make into property, equal to len of bodies? + name = 'body_%s' % index + self.bodies[index] = self._parts(node) + return name + + def _parts(self, body): + parts = [] + for part in body[1:]: + parts.append(self._node(part)) + return ''.join(parts) + + def _raw(self, node): + return '.write(%r)' % node[1] + + def _buffer(self, node): + return '.write(%s)' % escape(node[1]) + + def _format(self, node): + return '.write(%s)' % escape(node[1] + node[2]) + + def _reference(self, node): + return '.reference(%s,ctx,%s)' % (self._node(node[1]), + self._node(node[2])) + + def _section(self, node, cmd): + return '.%s(%s,%s,%s,%s)' % (cmd, + self._node(node[1]), + self._node(node[2]), + self._node(node[4]), + self._node(node[3])) + + def _inline_partial(self, node): + bodies = node[4] + for param in bodies[1:]: + btype = param[1][1] + if btype == 'block': + self.blocks[node[1][1]] = self._node(param[2]) + return '' + return '' + + def _region(self, node): + """aka the plus sign ('+') block""" + tmpl = '.block(ctx.get_block(%s),%s,%s,%s)' + return tmpl % (escape(node[1][1]), + self._node(node[2]), + self._node(node[4]), + self._node(node[3])) + + def _helper(self, node): + return '.helper(%s,%s,%s,%s)' % (escape(node[1][1]), + self._node(node[2]), + self._node(node[4]), + self._node(node[3])) + + def _pragma(self, node): + pr_name = node[1][1] + pragma = self.env.pragmas.get(pr_name) + if not pragma or not callable(pragma): + return '' # TODO: raise? + raw_bodies = node[4] + bodies = {} + for rb in raw_bodies[1:]: + bodies[rb[1][1]] = rb[2] + + raw_params = node[3] + params = {} + for rp in raw_params[1:]: + params[rp[1][1]] = rp[2][1] + + try: + ctx = node[2][1][1] + except (IndexError, AttributeError): + ctx = None + + return pragma(self, ctx, bodies, params) + + def _partial(self, node): + """ + 2014.05.09 + This brings compatibility to the more popular fork of Dust.js + from LinkedIn (v1.0) + + Adding in `params` so `partials` function like sections. + updating call to .partial() to include the kwargs + + dust.js reference : + compile.nodes = { + partial: function(context, node) { + return '.partial(' + + compiler.compileNode(context, node[1]) + + ',' + compiler.compileNode(context, node[2]) + + ',' + compiler.compileNode(context, node[3]) + ')'; + }, + """ + if node[0] == 'body': + body_name = self._node(node[1]) + return '.partial(' + body_name + ', %s)' % self._node(node[2]) + return '.partial(%s, %s, %s)' % (self._node(node[1]), + self._node(node[2]), + self._node(node[3])) + + def _context(self, node): + contpath = node[1:] + if contpath: + return 'ctx.rebase(%s)' % (self._node(contpath[0])) + return 'ctx' + + def _params(self, node): + parts = [self._node(p) for p in node[1:]] + if parts: + return '{' + ','.join(parts) + '}' + return 'None' + + def _bodies(self, node): + parts = [self._node(p) for p in node[1:]] + return '{' + ','.join(parts) + '}' + + def _param(self, node): + return ':'.join([self._node(node[1]), self._node(node[2])]) + + def _filters(self, node): + ret = '"%s"' % self.auto + f_list = ['"%s"' % f for f in node[1:]] # repr? + if f_list: + ret += ',[%s]' % ','.join(f_list) + return ret + + def _key(self, node): + return 'ctx.get(%r)' % node[1] + + def _path(self, node): + cur = node[1] + keys = node[2] or [] + return 'ctx.get_path(%s, %s)' % (cur, keys) + + def _literal(self, node): + return escape(node[1]) + + +######### +# Runtime +######### + + +class UndefinedValueType(object): + def __repr__(self): + return self.__class__.__name__ + '()' + + def __str__(self): + return '' + + +UndefinedValue = UndefinedValueType() + +# Prerequisites for escape_url_path + + +def _make_quote_map(allowed_chars): + ret = {} + for i in range(256): + c = chr(i) + esc_c = c if c in allowed_chars else '%{0:02X}'.format(i) + ret[i] = ret[c] = esc_c + return ret + +# The unreserved URI characters (per RFC 3986) +_UNRESERVED_CHARS = (frozenset(string.ascii_letters) + | frozenset(string.digits) + | frozenset('-._~')) +_RESERVED_CHARS = frozenset(":/?#[]@!$&'()*+,;=") # not used +_PATH_RESERVED_CHARS = frozenset("?#") # not used + +_PATH_QUOTE_MAP = _make_quote_map(_UNRESERVED_CHARS | set('/?=&:#')) + +# Escapes/filters + + +def escape_uri_path(text, to_bytes=True): + # actually meant to run on path + query args + fragment + text = to_unicode(text) + if not to_bytes: + return unicode().join([_PATH_QUOTE_MAP.get(c, c) for c in text]) + try: + bytestr = text.encode('utf-8') + except UnicodeDecodeError: + bytestr = text + except: + raise ValueError('expected text or UTF-8 encoded bytes, not %r' % text) + return ''.join([_PATH_QUOTE_MAP[b] for b in bytestr]) + + +def escape_uri_component(text): + return (escape_uri_path(text) # calls to_unicode for us + .replace('/', '%2F') + .replace('?', '%3F') + .replace('=', '%3D') + .replace('&', '%26')) + + +def escape_html(text): + text = to_unicode(text) + # TODO: dust.js doesn't use this, but maybe we should: + # .replace("'", '&squot;') + return cgi.escape(text, True) + + +def escape_js(text): + text = to_unicode(text) + return (text + .replace('\\', '\\\\') + .replace('"', '\\"') + .replace("'", "\\'") + .replace('\r', '\\r') + .replace('\u2028', '\\u2028') + .replace('\u2029', '\\u2029') + .replace('\n', '\\n') + .replace('\f', '\\f') + .replace('\t', '\\t')) + + +def comma_num(val): + try: + return '{0:,}'.format(val) + except ValueError: + return to_unicode(val) + + +def pp_filter(val): + try: + return pprint.pformat(val) + except: + try: + return repr(val) + except: + return 'unreprable object %s' % object.__repr__(val) + + +JSON_PP_INDENT = 2 + + +def ppjson_filter(val): + "A best-effort pretty-printing filter, based on the JSON module" + try: + return json.dumps(val, indent=JSON_PP_INDENT, sort_keys=True) + except TypeError: + return to_unicode(val) + + +# Helpers + +def first_helper(chunk, context, bodies, params=None): + if context.stack.index > 0: + return chunk + if 'block' in bodies: + return bodies['block'](chunk, context) + return chunk + + +def last_helper(chunk, context, bodies, params=None): + if context.stack.index < context.stack.of - 1: + return chunk + if 'block' in bodies: + return bodies['block'](chunk, context) + return chunk + + +def sep_helper(chunk, context, bodies, params=None): + if context.stack.index == context.stack.of - 1: + return chunk + if 'block' in bodies: + return bodies['block'](chunk, context) + return chunk + + +def idx_helper(chunk, context, bodies, params=None): + if 'block' in bodies: + return bodies['block'](chunk, context.push(context.stack.index)) + return chunk + + +def idx_1_helper(chunk, context, bodies, params=None): + if 'block' in bodies: + return bodies['block'](chunk, context.push(context.stack.index + 1)) + return chunk + + +def size_helper(chunk, context, bodies, params): + try: + key = params['key'] + return chunk.write(unicode(len(key))) + except (KeyError, TypeError): + return chunk + + +def _sort_iterate_items(items, sort_key, direction): + if not items: + return items + reverse = False + if direction == 'desc': + reverse = True + if not sort_key: + sort_key = 0 + elif sort_key[0] == '$': + sort_key = sort_key[1:] + if sort_key == 'key': + sort_key = 0 + elif sort_key == 'value': + sort_key = 1 + else: + try: + sort_key = int(sort_key) + except: + sort_key = 0 + return sorted(items, key=lambda x: x[sort_key], reverse=reverse) + + +def iterate_helper(chunk, context, bodies, params): + params = params or {} + body = bodies.get('block') + sort = params.get('sort') + sort_key = params.get('sort_key') + target = params.get('key') + if not body or not target: + context.env.log('warn', 'helper.iterate', 'empty block or target') + return chunk + try: + iter(target) + except: + context.env.log('warn', 'helper.iterate', 'non-iterable target') + return chunk + try: + items = target.items() + is_dict = True + except: + items = target + is_dict = False + if sort: + try: + items = _sort_iterate_items(items, sort_key, direction=sort) + except: + context.env.log('warn', 'helper.iterate', 'failed to sort target') + return chunk + if is_dict: + for key, value in items: + body(chunk, context.push({'$key': key, + '$value': value, + '$type': type(value).__name__, + '$0': key, + '$1': value})) + else: + # all this is for iterating over tuples and the like + for values in items: + try: + key = values[0] + except: + key, value = None, None + else: + try: + value = values[1] + except: + value = None + new_scope = {'$key': key, + '$value': value, + '$type': type(value).__name__} + try: + for i, value in enumerate(values): + new_scope['$%s' % i] = value + except TypeError: + context.env.log('warn', 'helper.iterate', + 'unable to enumerate values') + return chunk + else: + body(chunk, context.push(new_scope)) + return chunk + + +def _do_compare(chunk, context, bodies, params, cmp_op): + "utility function used by @eq, @gt, etc." + params = params or {} + try: + body = bodies['block'] + key = params['key'] + value = params['value'] + typestr = params.get('type') + except KeyError: + context.env.log('warn', 'helper.compare', + 'comparison missing key/value') + return chunk + rkey = _resolve_value(key, chunk, context) + if not typestr: + typestr = _COERCE_REV_MAP.get(type(rkey), 'string') + rvalue = _resolve_value(value, chunk, context) + crkey, crvalue = _coerce(rkey, typestr), _coerce(rvalue, typestr) + if isinstance(crvalue, type(crkey)) and cmp_op(crkey, crvalue): + return chunk.render(body, context) + elif 'else' in bodies: + return chunk.render(bodies['else'], context) + return chunk + + +def _resolve_value(item, chunk, context): + if not callable(item): + return item + try: + return chunk.tap_render(item, context) + except TypeError: + if getattr(context, 'is_strict', None): + raise + return item + + +_COERCE_MAP = { + 'number': float, + 'string': unicode, + 'boolean': bool, +} # Not implemented: date, context +_COERCE_REV_MAP = dict([(v, k) for k, v in _COERCE_MAP.items()]) +_COERCE_REV_MAP[int] = 'number' +try: + _COERCE_REV_MAP[long] = 'number' +except NameError: + pass + + +def _coerce(value, typestr): + coerce_type = _COERCE_MAP.get(typestr.lower()) + if not coerce_type or isinstance(value, coerce_type): + return value + if isinstance(value, string_types): + try: + value = json.loads(value) + except (TypeError, ValueError): + pass + try: + return coerce_type(value) + except (TypeError, ValueError): + return value + + +def _make_compare_helpers(): + from functools import partial + from operator import eq, ne, lt, le, gt, ge + CMP_MAP = {'eq': eq, 'ne': ne, 'gt': gt, 'lt': lt, 'gte': ge, 'lte': le} + ret = {} + for name, op in CMP_MAP.items(): + ret[name] = partial(_do_compare, cmp_op=op) + return ret + + +DEFAULT_HELPERS = {'first': first_helper, + 'last': last_helper, + 'sep': sep_helper, + 'idx': idx_helper, + 'idx_1': idx_1_helper, + 'size': size_helper, + 'iterate': iterate_helper} +DEFAULT_HELPERS.update(_make_compare_helpers()) + + +def make_base(env, stack, global_vars=None): + """`make_base( env, stack, global_vars=None )` + `env` and `stack` are required by the Python implementation. + `global_vars` is optional. set to global_vars. + + 2014.05.09 + This brings compatibility to the more popular fork of Dust.js + from LinkedIn (v1.0) + + adding this to try and create compatibility with Dust + + this is used for the non-activated alternative approach of rendering a + partial with a custom context object + + dust.makeBase = function(global) { + return new Context(new Stack(), global); + }; + """ + return Context(env, stack, global_vars) + + +# Actual runtime objects + +class Context(object): + """\ + The context is a special object that handles variable lookups and + controls template behavior. It is the interface between your + application logic and your templates. The context can be + visualized as a stack of objects that grows as we descend into + nested sections. + + When looking up a key, Dust searches the context stack from the + bottom up. There is no need to merge helper functions into the + template data; instead, create a base context onto which you can + push your local template data. + """ + def __init__(self, env, stack, global_vars=None, blocks=None): + self.env = env + self.stack = stack + if global_vars is None: + global_vars = {} + self.globals = global_vars + self.blocks = blocks + + @classmethod + def wrap(cls, env, context): + if isinstance(context, cls): + return context + return cls(env, Stack(context)) + + def get(self, path, cur=False): + "Retrieves the value `path` as a key from the context stack." + if isinstance(path, (str, unicode)): + if path[0] == '.': + cur = True + path = path[1:] + path = path.split('.') + return self._get(cur, path) + + def get_path(self, cur, down): + return self._get(cur, down) + + def _get(self, cur, down): + # many thanks to jvanasco for his contribution -mh 2014 + """ + * Get a value from the context + * @method `_get` + * @param {boolean} `cur` Get only from the current context + * @param {array} `down` An array of each step in the path + * @private + * @return {string | object} + """ + ctx = self.stack + length = 0 if not down else len(down) # TODO: try/except? + + if not length: + # wants nothing? ok, send back the entire payload + return ctx.head + + first_path_element = down[0] + + value = UndefinedValue + + if cur and not length: + ctx = ctx.head + else: + if not cur: + # Search up the stack for the first_path_element value + while ctx: + if isinstance(ctx.head, dict): + if first_path_element in ctx.head: + value = ctx.head[first_path_element] + break + ctx = ctx.tail + if value is UndefinedValue: + if first_path_element in self.globals: + ctx = self.globals[first_path_element] + else: + ctx = UndefinedValue + else: + ctx = value + else: + # if scope is limited by a leading dot, don't search up tree + if first_path_element in ctx.head: + ctx = ctx.head[first_path_element] + else: + ctx = UndefinedValue + + i = 1 + while ctx and ctx is not UndefinedValue and i < length: + if down[i] in ctx: + ctx = ctx[down[i]] + else: + ctx = UndefinedValue + i += 1 + + if ctx is UndefinedValue: + return None + else: + return ctx + + def push(self, head, index=None, length=None): + """\ + Pushes an arbitrary value `head` onto the context stack and returns + a new `Context` instance. Specify `index` and/or `length` to enable + enumeration helpers.""" + return Context(self.env, + Stack(head, self.stack, index, length), + self.globals, + self.blocks) + + def rebase(self, head): + """\ + Returns a new context instance consisting only of the value at + `head`, plus any previously defined global object.""" + return Context(self.env, + Stack(head), + self.globals, + self.blocks) + + def current(self): + """Returns the head of the context stack.""" + return self.stack.head + + def get_block(self, key): + blocks = self.blocks + if not blocks: + return None + fn = None + for block in blocks[::-1]: + try: + fn = block[key] + if fn: + break + except KeyError: + continue + return fn + + def shift_blocks(self, local_vars): + blocks = self.blocks + if local_vars: + if blocks: + new_blocks = blocks + [local_vars] + else: + new_blocks = [local_vars] + return Context(self.env, self.stack, self.globals, new_blocks) + return self + + +class Stack(object): + def __init__(self, head, tail=None, index=None, length=None): + self.head = head + self.tail = tail + self.index = index or 0 + self.of = length or 1 + # self.is_object = is_scalar(head) + + def __repr__(self): + return 'Stack(%r, %r, %r, %r)' % (self.head, + self.tail, + self.index, + self.of) + + +class Stub(object): + def __init__(self, callback): + self.head = Chunk(self) + self.callback = callback + self._out = [] + + @property + def out(self): + return ''.join(self._out) + + def flush(self): + chunk = self.head + while chunk: + if chunk.flushable: + self._out.append(chunk.data) + elif chunk.error: + self.callback(chunk.error, '') + self.flush = lambda self: None + return + else: + return + self.head = chunk = chunk.next + self.callback(None, self.out) + + +class Stream(object): + def __init__(self): + self.head = Chunk(self) + self.events = {} + + def flush(self): + chunk = self.head + while chunk: + if chunk.flushable: + self.emit('data', chunk.data) + elif chunk.error: + self.emit('error', chunk.error) + self.flush = lambda self: None + return + else: + return + self.head = chunk = chunk.next + self.emit('end') + + def emit(self, etype, data=None): + try: + self.events[etype](data) + except KeyError: + pass + + def on(self, etype, callback): + self.events[etype] = callback + return self + + +def is_scalar(obj): + return not hasattr(obj, '__iter__') or isinstance(obj, string_types) + + +def is_empty(obj): + try: + return obj is None or obj is False or len(obj) == 0 + except TypeError: + return False + + +class Chunk(object): + """\ + A Chunk is a Dust primitive for controlling the flow of the + template. Depending upon the behaviors defined in the context, + templates may output one or more chunks during rendering. A + handler that writes to a chunk directly must return the modified + chunk. + """ + def __init__(self, root, next_chunk=None, taps=None): + self.root = root + self.next = next_chunk + self.taps = taps + self._data, self.data = [], '' + self.flushable = False + self.error = None + + def write(self, data): + "Writes data to this chunk's buffer" + if self.taps: + data = self.taps.go(data) + self._data.append(data) + return self + + def end(self, data=None): + """\ + Writes data to this chunk's buffer and marks it as flushable. This + method must be called on any chunks created via chunk.map. Do + not call this method on a handler's main chunk -- dust.render + and dust.stream take care of this for you. + """ + if data: + self.write(data) + self.data = ''.join(self._data) + self.flushable = True + self.root.flush() + return self + + def map(self, callback): + """\ + Creates a new chunk and passes it to `callback`. Use map to wrap + asynchronous functions and to partition the template for + streaming. chunk.map tells Dust to manufacture a new chunk, + reserving a slot in the output stream before continuing on to + render the rest of the template. You must (eventually) call + chunk.end() on a mapped chunk to weave its content back into + the stream. + """ + cursor = Chunk(self.root, self.next, self.taps) + branch = Chunk(self.root, cursor, self.taps) + self.next = branch + self.data = ''.join(self._data) + self.flushable = True + callback(branch) + return cursor + + def tap(self, tap): + "Convenience methods for applying filters to a stream." + if self.taps: + self.taps = self.taps.push(tap) + else: + self.taps = Tap(tap) + return self + + def untap(self): + "Convenience methods for applying filters to a stream." + self.taps = self.taps.tail + return self + + def render(self, body, context): + """\ + Renders a template block, such as a default block or an else + block. Basically equivalent to body(chunk, context). + """ + return body(self, context) + + def tap_render(self, body, context): + output = [] + + def tmp_tap(data): + if data: + output.append(data) + return '' + self.tap(tmp_tap) + try: + self.render(body, context) + finally: + self.untap() + return ''.join(output) + + def reference(self, elem, context, auto, filters=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + if callable(elem): + # this whole callable thing is a quirky thing about dust + try: + elem = elem(self, context) + except TypeError: + if getattr(context, 'is_strict', None): + raise + elem = repr(elem) + else: + if isinstance(elem, Chunk): + return elem + if is_empty(elem): + return self + else: + filtered = context.env.apply_filters(elem, auto, filters) + return self.write(filtered) + + def section(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, sections, + blocks, partials and context helpers. While it is unlikely you'll need + to modify these methods or invoke them from within handlers, the + source code may be a useful point of reference for developers.""" + if callable(elem): + try: + elem = elem(self, context, bodies, params) + except TypeError: + if getattr(context, 'is_strict', None): + raise + elem = repr(elem) + else: + if isinstance(elem, Chunk): + return elem + body = bodies.get('block') + else_body = bodies.get('else') + if params: + context = context.push(params) + if not elem and else_body and elem is not 0: + # breaks with dust.js; dust.js doesn't render else blocks + # on sections referencing empty lists. + return else_body(self, context) + + if not body or elem is None: + return self + if elem is True: + return body(self, context) + elif isinstance(elem, dict) or is_scalar(elem): + return body(self, context.push(elem)) + else: + chunk = self + length = len(elem) + head = context.stack.head + for i, el in enumerate(elem): + new_ctx = context.push(el, i, length) + new_ctx.globals.update({'$len': length, + '$idx': i, + '$idx_1': i + 1}) + chunk = body(chunk, new_ctx) + return chunk + + def exists(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, sections, + blocks, partials and context helpers. While it is unlikely you'll need + to modify these methods or invoke them from within handlers, the + source code may be a useful point of reference for developers.""" + if not is_empty(elem): + if bodies.get('block'): + return bodies['block'](self, context) + elif bodies.get('else'): + return bodies['else'](self, context) + return self + + def notexists(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + if is_empty(elem): + if bodies.get('block'): + return bodies['block'](self, context) + elif bodies.get('else'): + return bodies['else'](self, context) + return self + + def block(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + body = bodies.get('block') + if elem: + body = elem + if body: + body(self, context) + return self + + def partial(self, elem, context, params=None): + """These methods implement Dust's default behavior for keys, sections, + blocks, partials and context helpers. While it is unlikely you'll need + to modify these methods or invoke them from within handlers, the + source code may be a useful point of reference for developers. + """ + if params: + context = context.push(params) + if callable(elem): + _env = context.env + cback = lambda name, chk: _env.load_chunk(name, chk, context).end() + return self.capture(elem, context, cback) + return context.env.load_chunk(elem, self, context) + + def helper(self, name, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + return context.env.helpers[name](self, context, bodies, params) + + def capture(self, body, context, callback): + def map_func(chunk): + def stub_cb(err, out): + if err: + chunk.set_error(err) + else: + callback(out, chunk) + stub = Stub(stub_cb) + body(stub.head, context).end() + return self.map(map_func) + + def set_error(self, error): + "Sets an error on this chunk and immediately flushes the output." + self.error = error + self.root.flush() + return self + + +class Tap(object): + def __init__(self, head=None, tail=None): + self.head = head + self.tail = tail + + def push(self, tap): + return Tap(tap, self) + + def go(self, value): + tap = self + while tap: + value = tap.head(value) # TODO: type errors? + tap = tap.tail + return value + + def __repr__(self): + cn = self.__class__.__name__ + return '%s(%r, %r)' % (cn, self.head, self.tail) + + +def to_unicode(obj): + try: + return unicode(obj) + except UnicodeDecodeError: + return unicode(obj, encoding='utf8') + + +DEFAULT_FILTERS = { + 'h': escape_html, + 's': to_unicode, + 'j': escape_js, + 'u': escape_uri_path, + 'uc': escape_uri_component, + 'cn': comma_num, + 'pp': pp_filter, + 'ppjson': ppjson_filter} + + +######### +# Pragmas +######### + + +def esc_pragma(compiler, context, bodies, params): + old_auto = compiler.auto + if not context: + context = 'h' + if context == 's': + compiler.auto = '' + else: + compiler.auto = context + out = compiler._parts(bodies['block']) + compiler.auto = old_auto + return out + + +DEFAULT_PRAGMAS = { + 'esc': esc_pragma +} + + +########### +# Interface +########### + +def load_template_path(path, encoding='utf-8'): + """ + split off `from_path` so __init__ can use + returns a tuple of the source and adjusted absolute path + """ + abs_path = os.path.abspath(path) + if not os.path.isfile(abs_path): + raise TemplateNotFound(abs_path) + with codecs.open(abs_path, 'r', encoding) as f: + source = f.read() + return (source, abs_path) + + +class Template(object): + # no need to set defaults on __init__ + last_mtime = None + is_convertable = True + + def __init__(self, + name, + source, + source_file=None, + optimize=True, + keep_source=True, + env=None, + lazy=False, + ): + if not source and source_file: + (source, source_abs_path) = load_template_path(source_file) + self.name = name + self.source = source + self.source_file = source_file + self.time_generated = time.time() + if source_file: + self.last_mtime = os.path.getmtime(source_file) + self.optimized = optimize + if env is None: + env = default_env + self.env = env + + if lazy: # lazy is only for testing + self.render_func = None + return + (render_code, + self.render_func + ) = self._get_render_func(optimize) + if not keep_source: + self.source = None + + @classmethod + def from_path(cls, path, name=None, encoding='utf-8', **kw): + """classmethod. + Builds a template from a filepath. + args: + ``path`` + kwargs: + ``name`` default ``None``. + ``encoding`` default ``utf-8``. + """ + (source, abs_path) = load_template_path(path) + if not name: + name = path + return cls(name=name, source=source, source_file=abs_path, **kw) + + @classmethod + def from_ast(cls, ast, name=None, **kw): + """classmethod + Builds a template from an AST representation. + This is only provided as an invert to `to_ast` + args: + ``ast`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + (render_code, + render_func + ) = template._ast_to_render_func(ast) + template.render_func = render_func + template.is_convertable = False + return template + + @classmethod + def from_python_string(cls, python_string, name=None, **kw): + """classmethod + Builds a template from an python string representation. + This is only provided as an invert to `to_python_string` + args: + ``python_string`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + render_code = _python_compile(python_string) + template.render_func = _python_exec(render_code, name='render') + template.is_convertable = False + return template + + @classmethod + def from_python_code(cls, python_code, name=None, **kw): + """classmethod + Builds a template from python code object. + This is only provided as an invert to `to_python_code` + args: + ``python_code`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + template.render_func = _python_exec(python_code, name='render') + template.is_convertable = False + return template + + @classmethod + def from_python_func(cls, python_func, name=None, **kw): + """classmethod + Builds a template from an compiled python function. + This is only provided as an invert to `to_python_func` + args: + ``python_func`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + template.render_func = python_func + template.is_convertable = False + return template + + def to_ast(self, optimize=True, raw=False): + """Generates the AST for a given template. + This can be inverted with the classmethod `from_ast`. + + kwargs: + ``optimize`` default ``True``. + ``raw`` default ``False``. + + Note: this is just a public function for `_get_ast` + """ + if not self.is_convertable: + raise TemplateConversionException() + return self._get_ast(optimize=optimize, raw=raw) + + def to_python_string(self, optimize=True): + """Generates the Python string representation for a template. + This can be inverted with the classmethod `from_python_string`. + + kwargs: + ``optimize`` default ``True``. + + Note: this is just a public method for `_get_render_string` + """ + if not self.is_convertable: + raise TemplateConversionException() + python_string = self._get_render_string(optimize=optimize) + return python_string + + def to_python_code(self, optimize=True): + """Generates the Python code representation for a template. + This can be inverted with the classmethod `from_python_code`. + + kwargs: + ``optimize`` default ``True``. + + Note: this is just a public method for `_get_render_func` + """ + if not self.is_convertable: + raise TemplateConversionException() + (python_code, + python_string + ) = self._get_render_func(optimize=optimize) + return python_code + + def to_python_func(self, optimize=True): + """Makes the python render func available. + This can be inverted with the classmethod `from_python_func`. + + Note: this is just a public method for `_get_render_func` + """ + if self.render_func: + return self.render_func + if not self.is_convertable: + raise TemplateConversionException() + (render_code, render_func) = self._get_render_func(optimize=optimize) + return render_func + + def render(self, model, env=None): + env = env or self.env + rendered = [] + + def tmp_cb(err, result): + # TODO: get rid of + if err: + print('Error on template %r: %r' % (self.name, err)) + raise RenderException(err) + else: + rendered.append(result) + return result + + chunk = Stub(tmp_cb).head + self.render_chunk(chunk, Context.wrap(env, model)).end() + return rendered[0] + + def render_chunk(self, chunk, context): + if not self.render_func: + # to support laziness for testing + (render_code, + self.render_func + ) = self._get_render_func() + return self.render_func(chunk, context) + + def _get_tokens(self): + if not self.source: + return None + return tokenize(self.source) + + def _get_ast(self, optimize=False, raw=False): + if not self.source: + return None + try: + dast = ParseTree.from_source(self.source).to_dust_ast() + except ParseError as pe: + pe.source_file = self.source_file + raise + if raw: + return dast + return self.env.filter_ast(dast, optimize) + + def _get_render_string(self, optimize=True): + """ + Uses `optimize=True` by default because it makes the output easier to + read and more like dust's docs + + This was previously `_get_render_func(..., ret_str=True)` + """ + ast = self._get_ast(optimize) + if not ast: + return None + # for testing/dev purposes + return Compiler(self.env)._gen_python(ast) + + def _get_render_func(self, optimize=True, ret_str=False): + """ + Uses `optimize=True` by default because it makes the output easier to + read and more like dust's docs + + split `ret_str=True` into `_get_render_string()` + + Note that this doesn't save the render_code/render_func. + It is compiled as needed. + """ + ast = self._get_ast(optimize) + if not ast: + return (None, None) + # consolidated the original code into _ast_to_render_func as-is below + (render_code, + render_func + ) = self._ast_to_render_func(ast) + return (render_code, render_func) + + def _ast_to_render_func(self, ast): + """this was part of ``_get_render_func`` but is better implemented + as an separate function so that AST can be directly loaded. + """ + compiler = Compiler(self.env) + (python_code, + python_func + ) = compiler.compile(ast) + return (python_code, python_func) + + def __repr__(self): + cn = self.__class__.__name__ + name, source_file = self.name, self.source_file + if not source_file: + return '<%s name=%r>' % (cn, name) + return '<%s name=%r source_file=%r>' % (cn, name, source_file) + + +class AshesException(Exception): + pass + + +class TemplateNotFound(AshesException): + def __init__(self, name): + self.name = name + super(TemplateNotFound, self).__init__('could not find template: %r' + % name) + + +class RenderException(AshesException): + pass + + +class ParseError(AshesException): + token = None + source_file = None + + def __init__(self, message, line_no=None, token=None): + self.message = message + self.token = token + self._line_no = line_no + + super(ParseError, self).__init__(self.__str__()) + + @property + def line_no(self): + if self._line_no: + return self._line_no + if getattr(self.token, 'start_line', None) is not None: + return self.token.start_line + return None + + @line_no.setter + def set_line_no(self, val): + self._line_no = val + + def __str__(self): + msg = self.message + infos = [] + if self.source_file: + infos.append('in %s' % self.source_file) + if self.line_no is not None: + infos.append('line %s' % self.line_no) + if infos: + msg += ' (%s)' % ' - '.join(infos) + return msg + + +class TemplateConversionException(AshesException): + def __init__(self): + super(TemplateConversionException, self).__init__('only templates from source ' + 'are convertable') + + +class BaseAshesEnv(object): + template_type = Template + autoescape_filter = 'h' + + def __init__(self, + loaders=None, + helpers=None, + filters=None, + special_chars=None, + optimizers=None, + pragmas=None, + auto_reload=True): + self.templates = {} + self.loaders = list(loaders or []) + self.filters = dict(DEFAULT_FILTERS) + if filters: + self.filters.update(filters) + self.helpers = dict(DEFAULT_HELPERS) + if helpers: + self.helpers.update(helpers) + self.special_chars = dict(DEFAULT_SPECIAL_CHARS) + if special_chars: + self.special_chars.update(special_chars) + self.optimizers = dict(DEFAULT_OPTIMIZERS) + if optimizers: + self.optimizers.update(optimizers) + self.pragmas = dict(DEFAULT_PRAGMAS) + if pragmas: + self.pragmas.update(pragmas) + self.auto_reload = auto_reload + + def log(self, level, name, message): + return # print(level, '-', name, '-', message) + + def render(self, name, model): + tmpl = self.load(name) + return tmpl.render(model, self) + + def load(self, name): + """Loads a template. + + args: + ``name`` template name + """ + try: + template = self.templates[name] + except KeyError: + template = self._load_template(name) + self.register(template) + if self.auto_reload: + if not getattr(template, 'source_file', None): + return template + mtime = os.path.getmtime(template.source_file) + if mtime > template.last_mtime: + template = self._load_template(name) + self.register(template) + return self.templates[name] + + def _load_template(self, name): + for loader in self.loaders: + try: + source = loader.load(name, env=self) + except TemplateNotFound: + continue + else: + return source + raise TemplateNotFound(name) + + def load_all(self, do_register=True, **kw): + """Loads all templates. + + args: + ``do_register`` default ``True` + """ + all_tmpls = [] + for loader in reversed(self.loaders): + # reversed so the first loader to have a template + # will take precendence on registration + if callable(getattr(loader, 'load_all', None)): + tmpls = loader.load_all(self, **kw) + all_tmpls.extend(tmpls) + if do_register: + for t in tmpls: + self.register(t) + return all_tmpls + + def register(self, template, name=None): + if name is None: + name = template.name + self.templates[name] = template + return + + def register_path(self, path, name=None, **kw): + """\ + Reads in, compiles, and registers a single template from a specific + path to a file containing the dust source code. + """ + kw['env'] = self + ret = self.template_type.from_path(path=path, name=name, **kw) + self.register(ret) + return ret + + def register_source(self, name, source, **kw): + """\ + Compiles and registers a single template from source code + string. Assumes caller already decoded the source string. + """ + kw['env'] = self + ret = self.template_type(name=name, source=source, **kw) + self.register(ret) + return ret + + def filter_ast(self, ast, optimize=True): + if optimize: + optimizers = self.optimizers + else: + optimizers = UNOPT_OPTIMIZERS + optimizer = Optimizer(optimizers, self.special_chars) + ret = optimizer.optimize(ast) + return ret + + def apply_filters(self, string, auto, filters): + filters = filters or [] + if not filters: + if auto: + filters = ['s', auto] + else: + filters = ['s'] + elif filters[-1] != 's': + if auto and auto not in filters: + filters += ['s', auto] + else: + filters += ['s'] + for f in filters: + filt_fn = self.filters.get(f) + if filt_fn: + string = filt_fn(string) + return string + + def load_chunk(self, name, chunk, context): + try: + tmpl = self.load(name) + except TemplateNotFound as tnf: + context.env.log('error', 'load_chunk', + 'TemplateNotFound error: %r' % tnf.name) + return chunk.set_error(tnf) + return tmpl.render_chunk(chunk, context) + + def __iter__(self): + return self.templates.itervalues() + + +class AshesEnv(BaseAshesEnv): + """ + A slightly more accessible Ashes environment, with more + user-friendly options exposed. + """ + def __init__(self, paths=None, keep_whitespace=True, *a, **kw): + if isinstance(paths, string_types): + paths = [paths] + self.paths = list(paths or []) + self.keep_whitespace = keep_whitespace + self.is_strict = kw.pop('is_strict', False) + exts = list(kw.pop('exts', DEFAULT_EXTENSIONS)) + + super(AshesEnv, self).__init__(*a, **kw) + + for path in self.paths: + tpl = TemplatePathLoader(path, exts) + self.loaders.append(tpl) + + def filter_ast(self, ast, optimize=None): + optimize = not self.keep_whitespace # preferences override + return super(AshesEnv, self).filter_ast(ast, optimize) + + +def iter_find_files(directory, patterns, ignored=None): + """\ + Finds files under a `directory`, matching `patterns` using "glob" + syntax (e.g., "*.txt"). It's also possible to ignore patterns with + the `ignored` argument, which uses the same format as `patterns. + + (from osutils.py in the boltons package) + """ + if isinstance(patterns, string_types): + patterns = [patterns] + pats_re = re.compile('|'.join([fnmatch.translate(p) for p in patterns])) + + if not ignored: + ignored = [] + elif isinstance(ignored, string_types): + ignored = [ignored] + ign_re = re.compile('|'.join([fnmatch.translate(p) for p in ignored])) + for root, dirs, files in os.walk(directory): + for basename in files: + if pats_re.match(basename): + if ignored and ign_re.match(basename): + continue + filename = os.path.join(root, basename) + yield filename + return + + +def walk_ext_matches(path, exts=None, ignored=None): + if exts is None: + exts = DEFAULT_EXTENSIONS + if ignored is None: + ignored = DEFAULT_IGNORED_PATTERNS + patterns = list(['*.' + e.lstrip('*.') for e in exts]) + + return sorted(iter_find_files(directory=path, + patterns=patterns, + ignored=ignored)) + + +class TemplatePathLoader(object): + def __init__(self, root_path, exts=None, encoding='utf-8'): + self.root_path = os.path.normpath(root_path) + self.encoding = encoding + self.exts = exts or list(DEFAULT_EXTENSIONS) + + def load(self, path, env=None): + env = env or default_env + norm_path = os.path.normpath(path) + if path.startswith('../'): + raise ValueError('no traversal above loader root path: %r' % path) + if not path.startswith(self.root_path): + norm_path = os.path.join(self.root_path, norm_path) + abs_path = os.path.abspath(norm_path) + template_name = os.path.relpath(abs_path, self.root_path) + template_type = env.template_type + return template_type.from_path(name=template_name, + path=abs_path, + encoding=self.encoding, + env=env) + + def load_all(self, env, exts=None, **kw): + ret = [] + exts = exts or self.exts + tmpl_paths = walk_ext_matches(self.root_path, exts) + for tmpl_path in tmpl_paths: + ret.append(self.load(tmpl_path, env)) + return ret + + +class FlatteningPathLoader(TemplatePathLoader): + """ + I've seen this mode of using dust templates in a couple places, + but really it's lazy and too ambiguous. It increases the chances + of silent conflicts and makes it hard to tell which templates refer + to which just by looking at the template code. + """ + def __init__(self, *a, **kw): + self.keep_ext = kw.pop('keep_ext', True) + super(FlatteningPathLoader, self).__init__(*a, **kw) + + def load(self, *a, **kw): + tmpl = super(FlatteningPathLoader, self).load(*a, **kw) + name = os.path.basename(tmpl.name) + if not self.keep_ext: + name, ext = os.path.splitext(name) + tmpl.name = name + return tmpl + +try: + import bottle +except ImportError: + pass +else: + class AshesBottleTemplate(bottle.BaseTemplate): + extensions = list(bottle.BaseTemplate.extensions) + extensions.extend(['ash', 'ashes', 'dust']) + + def prepare(self, **options): + if not self.source: + self.source = self._load_source(self.name) + if self.source is None: + raise TemplateNotFound(self.name) + + options['name'] = self.name + options['source'] = self.source + options['source_file'] = self.filename + for key in ('optimize', 'keep_source', 'env'): + if key in self.settings: + options.setdefault(key, self.settings[key]) + env = self.settings.get('env', default_env) + # I truly despise 2.6.4's unicode kwarg bug + options = dict([(str(k), v) for k, v in options.iteritems()]) + self.tpl = env.register_source(**options) + + def _load_source(self, name): + fname = self.search(name, self.lookup) + if not fname: + return + with codecs.open(fname, "rb", self.encoding) as f: + return f.read() + + def render(self, *a, **kw): + for dictarg in a: + kw.update(dictarg) + context = self.defaults.copy() + context.update(kw) + return self.tpl.render(context) + + from functools import partial as _fp + ashes_bottle_template = _fp(bottle.template, + template_adapter=AshesBottleTemplate) + ashes_bottle_view = _fp(bottle.view, + template_adapter=AshesBottleTemplate) + del bottle + del _fp + + +ashes = default_env = AshesEnv() + + +def _main(): + # TODO: accidentally unclosed tags may consume + # trailing buffers without warning + try: + tmpl = ('{@eq key=hello value="True" type="boolean"}' + '{hello}, world' + '{:else}' + 'oh well, world' + '{/eq}' + ', {@size key=hello/} characters') + ashes.register_source('hi', tmpl) + print(ashes.render('hi', {'hello': 'ayy'})) + except Exception as e: + import pdb;pdb.post_mortem() + raise + + ae = AshesEnv(filters={'cn': comma_num}) + ae.register_source('cn_tmpl', 'comma_numd: {thing|cn}') + # print(ae.render('cn_tmpl', {'thing': 21000})) + ae.register_source('tmpl', '{`{ok}thing`}') + print(ae.render('tmpl', {'thing': 21000})) + + ae.register_source('tmpl2', '{test|s}') + out = ae.render('tmpl2', {'test': [''] * 10}) + print(out) + + ae.register_source('tmpl3', '{@iterate sort="desc" sort_key=1 key=lol}' + '{$idx} - {$0}: {$1}{~n}{/iterate}') + out = ae.render('tmpl3', {'lol': {'uno': 1, 'dos': 2}}) + print(out) + out = ae.render('tmpl3', {'lol': [(1, 2, 3), (4, 5, 6)]}) + print(out) + + print(escape_uri_path("https://en.wikipedia.org/wiki/Asia's_Next_Top_Model_(cycle_3)")) + print(escape_uri_component("https://en.wikipedia.org/wiki/Asia's_Next_Top_Model_(cycle_3)")) + print('') + ae.register_source('tmpl4', '{#iterable}{$idx_1}/{$len}: {.}{@sep}, {/sep}{/iterable}') + out = ae.render('tmpl4', {'iterable': range(100, 108)}) + print(out) + + tmpl = '''\ + {#.} + row{~n} + {#.} + {.}{~n} + {/.} + {/.}''' + ashes.keep_whitespace = False + ashes.autoescape_filter = '' + ashes.register_source('nested_lists', tmpl) + print(ashes.render('nested_lists', [[1, 2], [3, 4]])) + + +class CLIError(ValueError): + pass + + +def _simple_render(template_path, template_literal, env_path_list, + model_path, model_literal, + trim_whitespace, filter, no_filter, + output_path, output_encoding, verbose): + # TODO: default value (placeholder for missing values) + env = AshesEnv(env_path_list) + env.keep_whitespace = not trim_whitespace + if filter in env.filters: + env.autoescape_filter = filter + else: + raise CLIError('unexpected filter %r, expected one of %r' + % (filter, env.filters)) + if no_filter: + env.autoescape_filter = '' + + if template_literal: + tmpl_obj = env.register_source('_literal_template', template_literal) + else: + if not template_path: + raise CLIError('expected template or template literal') + try: + tmpl_obj = env.load(template_path) + except (KeyError, TemplateNotFound): + tmpl_obj = env.register_path(template_path) + + if model_literal: + model = json.loads(model_literal) + elif not model_path: + raise CLIError('expected model or model literal') + elif model_path == '-': + model = json.load(sys.stdin) + else: + with open(model_path) as f: + model = json.load(f) + + output_text = tmpl_obj.render(model) + output_bytes = output_text.encode(output_encoding) + if output_path == '-': + print(output_bytes) + else: + with open(output_path, 'w') as f: + f.write(output_bytes) + return + + +def main(): + # using optparse for backwards compat with 2.6 (and earlier, maybe) + from optparse import OptionParser + + prs = OptionParser(description="render a template using a JSON input", + version='ashes %s' % (__version__,)) + ao = prs.add_option + ao('--env-path', + help="paths to search for templates, separate paths with :") + ao('--filter', default='h', + help="autoescape values with this filter, defaults to 'h' for HTML") + ao('--no-filter', action="store_true", + help="disables default HTML-escaping filter, overrides --filter") + ao('--trim-whitespace', action="store_true", + help="removes whitespace on template load") + ao('-m', '--model', dest='model_path', + help="path to the JSON model file, default - for stdin") + ao('-M', '--model-literal', + help="the literal string of the JSON model, overrides model") + ao('-o', '--output', dest='output_path', default='-', + help="path to the output file, default - for stdout") + ao('--output-encoding', default='utf-8', + help="encoding for the output, default utf-8") + ao('-t', '--template', dest='template_path', + help="path of template to render, absolute or relative to env-path") + ao('-T', '--template-literal', + help="the literal string of the template, overrides template") + ao('--verbose', help="emit extra output on stderr") + + opts, _ = prs.parse_args() + kwargs = dict(opts.__dict__) + + kwargs['env_path_list'] = (kwargs.pop('env_path') or '').split(':') + try: + _simple_render(**kwargs) + except CLIError as clie: + err_msg = '%s; use --help option for more info.' % (clie.args[0],) + prs.error(err_msg) + return + + +if __name__ == '__main__': + main() diff --git a/src/command_line.py b/src/command_line.py index e8e15bc..8caef65 100644 --- a/src/command_line.py +++ b/src/command_line.py @@ -54,6 +54,7 @@ options['p'] = [ 'preprocess' , 'Prints a preprocessed file to standard output options['r'] = [ 'no_directives', 'Ignore all compiler directives !DEC$ and !DIR$', 0 ] options['s'] = [ 'substitute' , 'Substitute values in do loops for generating specific optimized code.', 1 ] options['t'] = [ 'touch' , 'Display which entities are touched when touching the variable given as an argument.', 1 ] +options['T'] = [ 'Task' , 'Auto-parallelism ', 0 ] options['v'] = [ 'version' , 'Prints version of irpf90', 0 ] options['w'] = [ 'warnings' , 'Activate Warnings', 0 ] options['z'] = [ 'openmp' , 'Activate for OpenMP code', 0 ] diff --git a/src/entity.py b/src/entity.py index 88afb7c..e727ee8 100644 --- a/src/entity.py +++ b/src/entity.py @@ -501,20 +501,157 @@ class Entity(object): if not self.is_main: return [] + from ashes import AshesEnv + template = ''' +{#l_allocate} +{subroutine|s} +{/l_allocate} + +{?inline} +!DEC$ ATTRIBUTES FORCEINLINE :: provide_{name} +{/inline} +subroutine provide_{name} + + {?do_openmp} + use omp_lib + {/do_openmp} + + {#l_module} + {name} + {/l_module} + + implicit none + character*(8+{@size key=name/}),parameter :: irp_here = 'provide_{name}' + + {?do_openmp} + call irp_lock_{name}(.True.) + {/do_openmp} + + {?do_debug} + call irp_enter(irp_here) + {/do_debug} + + {#l_children} + if (.NOT.{name}_is_built) then + call provide_{name} + endif + {/l_children} + + {#do_task} + !$omp task default(shared) {depend} + {/do_task} + + {#l_allocate} + call allocate_{name} + {/l_allocate} + call bld_{name} + + {#do_task} + !$omp end task + {/do_task} + + {name}_is_built = .TRUE. + + {?do_openmp} + call irp_lock_{name}(.False.) + {/do_openmp} + + {?do_debug} + call irp_leave(irp_here) + {/do_debug} + +end subroutine provide_{name} +''' + from util import mangled + name = self.name - same_as = self.same_as + var = self.d_entity[name] + l_module = [ {'name':x} for x in build_use([self.name] + self.to_provide, self.d_entity)] + l_allocate = [ {'name':n, 'subroutine':self.build_alloc(n)} for n in self.l_name if self.d_entity[n].dim] + l_children = [ {'name':x} for x in mangled(self.to_provide, self.d_entity) ] + + in_ = ['depend(in: %s)' % n for n in self.to_provide] + out_ = ['depend(out: %s)' % n for n in self.l_name] + do_task = [ {'depend':' '.join(in_ + out_) } ] if command_line.do_Task else [] + + ashes_env = AshesEnv() + ashes_env.register_source('provide',template) + + l = ashes_env.render('provide', {'name': name, + 'l_module':l_module, + 'l_allocate':l_allocate, + 'l_children':l_children, + 'do_debug':command_line.do_debug, + 'do_openmp':command_line.do_openmp, + 'do_task':do_task}) + + return l.split('\n') + + def build_alloc(self,name): + var = self.d_entity[name] + from ashes import AshesEnv + template = (""" + +subroutine allocate_{name} + + {#l_module} + {name} + {/l_module} + + character*(9+{@size key=name/}),parameter :: irp_here = 'allocate_{name}' + integer :: irp_err + + if ( allocated({name}) .AND.( & + {#l_dim} + ( SIZE({name},{rank}) /= {value} ) {@sep}.OR.{/sep} & + {/l_dim} + )) then + + {?do_memory} + print *, irp_here//': Deallocated {name}' + {/do_memory} + + deallocate( {name}, stat=irp_err ) + + if (irp_err /= 0) then + print *, irp_here//': Deallocation failed: {name}' + print *,' size: {dim}' + endif + + endif + + if ( .NOT. allocated({name}) ) then + + {?do_memory} + print *, irp_here//': Allocate {name} ({dim})' + {/do_memory} + + {^corray} + allocate({name} ({dim}), stat=irp_err) + {:else} + allocate({name} ({dim}[*]), stat=irp_err) + {/corray} + if (irp_err /= 0) then + print *, irp_here//': Allocation failed: {name}' + print *,' size: {dim}' + endif + + endif + +end subroutine + +""") def dimsize(x): - # (str) -> str - '''Compute the number of element in the array''' - try: - b0, b1 = x.split(':') - except ValueError: - return x + # (str) -> str + '''Compute the number of element in the array''' + try: + b0, b1 = x.split(':') + except ValueError: + return x - b0_is_digit = b0.replace('-', '').isdigit() - b1_is_digit = b1.replace('-', '').isdigit() - + b0_is_digit = b0.replace('-', '').isdigit() + b1_is_digit = b1.replace('-', '').isdigit() if b0_is_digit and b1_is_digit: size = str(int(b1) - int(b0) + 1) @@ -524,105 +661,20 @@ class Entity(object): size = "(%d) - (%s)" % (int(b1) + 1, b0) else: size = "(%s) - (%s) + 1" % (b1, b0) - return size - def build_alloc(name): + l_dim = [{'name':name, 'rank':i+1, 'value':dimsize(k)} for i, k in enumerate(var.dim)] + l_module = [ {'name':x} for x in build_use([var.name] + var.needs, self.d_entity) ] - var = self.d_entity[name] - if var.dim == []: - return [] - - from util import build_dim - - def print_size(): - return " " * 5 + "print *, ' size: {0}'".format(build_dim(var.dim)) - - def check_dimensions(): - l = ["(%s>0)" % dimsize(x) for x in var.dim] - str_ = ".and.".join(l) - return " if (%s) then" % (str_) - - def dimensions_OK(): - result = [" irp_dimensions_OK = .True."] - for i, k in enumerate(var.dim): - result.append(" irp_dimensions_OK = irp_dimensions_OK.AND.(SIZE(%s,%d)==(%s))" - % (name, i + 1, dimsize(k))) - return result - - def do_allocate(): - if command_line.coarray: - result = " allocate(%s(%s)[*],stat=irp_err)" - else: - result = " allocate(%s(%s),stat=irp_err)" - result = result % (name, ','.join(var.dim)) - if command_line.do_memory: - tmp = "\n print *, %s, 'Allocating %s(%s)'" - d = ','.join(self.dim) - result += tmp % ('size(' + name + ')', name, d) - return result - - result = [" if (allocated (%s) ) then" % (name)] - result += dimensions_OK() - result += [ - " if (.not.irp_dimensions_OK) then", " deallocate(%s,stat=irp_err)" % (name), - " if (irp_err /= 0) then", " print *, irp_here//': Deallocation failed: %s'" % - (name), print_size(), " endif" - ] - - if command_line.do_memory: - result += [" print *, 'Deallocating %s'" % (name)] - result.append(check_dimensions()) - result.append(do_allocate()) - result += [\ - " if (irp_err /= 0) then", - " print *, irp_here//': Allocation failed: %s'"%(name), - print_size(), - " endif", - " endif", - " endif", - " else" ] - result.append(check_dimensions()) - result.append(do_allocate()) - result += [ - " if (irp_err /= 0) then", " print *, irp_here//': Allocation failed: %s'" % - (name), print_size(), " endif", " endif", " endif" - ] - return result - - result = [] - if command_line.directives and command_line.inline in ["all", "providers"]: - result += ["!DEC$ ATTRIBUTES FORCEINLINE :: provide_%s" % (name)] - result += ["subroutine provide_%s" % (name)] - result += build_use([same_as] + self.to_provide, self.d_entity) - if command_line.do_openmp: - result += [" use omp_lib"] - result.append(" implicit none") - length = len("provide_%s" % (name)) - result += [ - " character*(%d) :: irp_here = 'provide_%s'" % (length, name), - " integer :: irp_err ", - " logical :: irp_dimensions_OK", - "!$ integer :: nthreads" - ] - if command_line.do_openmp: - result.append(" call irp_lock_%s(.True.)" % (same_as)) - if command_line.do_assert or command_line.do_debug: - result.append(" call irp_enter(irp_here)") - result += build_call_provide(self.to_provide, self.d_entity) - result += flatten(map(build_alloc, self.l_name)) - result += [ - " if (.not.%s_is_built) then" % (same_as), " call bld_%s" % (same_as), - " %s_is_built = .True." % (same_as), "" - ] - result += [" endif"] - if command_line.do_assert or command_line.do_debug: - result.append(" call irp_leave(irp_here)") - if command_line.do_openmp: - result.append(" call irp_lock_%s(.False.)" % (same_as)) - result.append("end subroutine provide_%s" % (name)) - result.append("") - return result + ashes_env = AshesEnv() + ashes_env.register_source('hello',template) + + return ashes_env.render('hello', {'name': name, + 'dim':','.join(var.dim), + 'corray': command_line.coarray, + 'l_dim': l_dim, + 'l_module':l_module, + 'do_memory':command_line.do_memory}) ########################################################## @irpy.lazy_property diff --git a/src/preprocessed_text.py b/src/preprocessed_text.py index 75e6308..2c5cfaf 100644 --- a/src/preprocessed_text.py +++ b/src/preprocessed_text.py @@ -667,6 +667,11 @@ def irp_simple_statements(text): assert type(line) == Program program_name = line.lower.split()[1] temp = [Program(0, "program irp_program", program_name)] + + if command_line.do_Task: + for i in [" call omp_set_nested(.TRUE.)", "!$omp parallel", "!$omp single"]: + temp += [Simple_line(0, i, line.filename)] + if command_line.do_profile: temp += [Simple_line(0, "call irp_init_timer()", line.filename)] if command_line.do_openmp: @@ -676,10 +681,16 @@ def irp_simple_statements(text): temp += [Simple_line(0, "call irp_print_timer()", line.filename)] temp += [Simple_line(0, " call irp_finalize_%s()" % (irp_id), line.filename)] + + if command_line.do_Task: + for i in ["!$omp taskwait","!$omp end single", "!$omp end parallel"]: + temp += [Simple_line(0, i, line.filename)] + temp += [End(0, "end program", line.filename)] result = temp + process_subroutine( Subroutine(line.i, "subroutine %s" % (program_name, ), line.filename)) + return result d = { From 243fd9d4d492b6f79205a0c40e8bd77e1832cc37 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Wed, 8 Feb 2017 14:37:54 -0600 Subject: [PATCH 17/31] // with touch --- src/command_line.py | 11 +++--- src/entity.py | 88 +++++++++++++++++++++++++++++---------------- src/irpf90.py | 3 +- 3 files changed, 67 insertions(+), 35 deletions(-) diff --git a/src/command_line.py b/src/command_line.py index 8caef65..e82b8db 100644 --- a/src/command_line.py +++ b/src/command_line.py @@ -54,11 +54,11 @@ options['p'] = [ 'preprocess' , 'Prints a preprocessed file to standard output options['r'] = [ 'no_directives', 'Ignore all compiler directives !DEC$ and !DIR$', 0 ] options['s'] = [ 'substitute' , 'Substitute values in do loops for generating specific optimized code.', 1 ] options['t'] = [ 'touch' , 'Display which entities are touched when touching the variable given as an argument.', 1 ] -options['T'] = [ 'Task' , 'Auto-parallelism ', 0 ] options['v'] = [ 'version' , 'Prints version of irpf90', 0 ] options['w'] = [ 'warnings' , 'Activate Warnings', 0 ] options['z'] = [ 'openmp' , 'Activate for OpenMP code', 0 ] options['G'] = [ 'graph' , 'Print the dependecy-graph of the entities (dots format)', 0 ] +options['T'] = [ 'Task' , 'Auto-parallelism ', 1 ] class CommandLine(object): @@ -78,15 +78,15 @@ class CommandLine(object): @irpy.lazy_property def include_dir(self): - self._include_dir = [] + l = [] for o,a in self.opts: if o in [ "-I", '--'+options['I'][0] ]: if len(a) < 1: print "Error: -I option needs a directory" if a[-1] != '/': a = a+'/' - self._include_dir.append(a) - return self._include_dir + l.append(a) + return l @irpy.lazy_property def inline(self): @@ -211,6 +211,9 @@ do_$LONG = property(fget=do_$LONG) def do_run(self): return not(any( (self.do_version, self.do_help, self.do_preprocess, self.do_touch, self.do_init))) + @irpy.lazy_property + def do_Task(self): + return True command_line = CommandLine() diff --git a/src/entity.py b/src/entity.py index e727ee8..533e348 100644 --- a/src/entity.py +++ b/src/entity.py @@ -401,35 +401,56 @@ class Entity(object): # () -> List[str] '''Fabric the f90 routine who handle the cache invalidation''' + # Only one by EntityColleciton + if not self.is_main: + return [] + + template = ''' +subroutine touch_{name} + + {#l_module} + {name} + {/l_module} + + implicit none + character*(6+{@size key=name/}),parameter :: irp_here = 'touch_{name}' + + {?do_debug} + call irp_enter(irp_here) + {/do_debug} + + {#l_ancestor} + {name}_is_built = .False. + {/l_ancestor} + + {name}_is_built = .True. + + {?do_debug} + call irp_leave(irp_here) + {/do_debug} + +end subroutine touch_{name} +''' + # Only one by EntityColleciton if not self.is_main: return [] from util import mangled - parents = mangled(self.parents,self.d_entity) + l_parents = [{'name':n} for n in mangled(self.parents,self.d_entity)] name = self.name + l_module= [ {'name':n} for n in build_use(self.parents+[name],self.d_entity)] - result = ["subroutine touch_%s" % (name)] - - result += build_use(parents+[name],self.d_entity) - result.append(" implicit none") - - if command_line.do_debug: - length = str(len("touch_%s" % (name))) - result += [" character*(%s) :: irp_here = 'touch_%s'" % (length, name)] - result += [" call irp_enter(irp_here)"] - - result += map(lambda x: " %s_is_built = .False." % (x), parents) - result.append(" %s_is_built = .True." % (name)) - - if command_line.do_debug: - result.append(" call irp_leave(irp_here)") - - result.append("end subroutine touch_%s" % (name)) - result.append("") - - return result + from ashes import AshesEnv + ashes_env = AshesEnv() + ashes_env.register_source('touch',template) + l = ashes_env.render('touch', {'name': name, + 'l_module':l_module, + 'l_ancestor':l_parents, + 'do_debug':command_line.do_debug}) + return l.split('\n') + ########################################################## @irpy.lazy_property def locker(self): @@ -524,21 +545,24 @@ subroutine provide_{name} character*(8+{@size key=name/}),parameter :: irp_here = 'provide_{name}' {?do_openmp} - call irp_lock_{name}(.True.) + CALL irp_lock_{name}(.TRUE.) {/do_openmp} {?do_debug} - call irp_enter(irp_here) + CALL irp_enter(irp_here) {/do_debug} {#l_children} if (.NOT.{name}_is_built) then - call provide_{name} + CALL provide_{name} endif {/l_children} {#do_task} - !$omp task default(shared) {depend} + {?head_touch} + !$OMP TASKGROUP + {/head_touch} + !$OMP TASK DEFAULT(shared) {depend} {/do_task} {#l_allocate} @@ -546,18 +570,21 @@ subroutine provide_{name} {/l_allocate} call bld_{name} - {#do_task} - !$omp end task + {?do_task} + !$OMP END TASK + {?head_touch} + !$OMP END TASKGROUP + {/head_touch} {/do_task} {name}_is_built = .TRUE. {?do_openmp} - call irp_lock_{name}(.False.) + CALL irp_lock_{name}(.FALSE.) {/do_openmp} {?do_debug} - call irp_leave(irp_here) + CALL irp_leave(irp_here) {/do_debug} end subroutine provide_{name} @@ -583,7 +610,8 @@ end subroutine provide_{name} 'l_children':l_children, 'do_debug':command_line.do_debug, 'do_openmp':command_line.do_openmp, - 'do_task':do_task}) + 'do_task':do_task, + 'head_touch':self.is_self_touched}) return l.split('\n') diff --git a/src/irpf90.py b/src/irpf90.py index a9c8d3c..506e0e4 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -77,7 +77,8 @@ def main(): print ' }' comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. - + + print_full_diagram(comm_world.d_entity.values()) print 'digraph Compact { ' print ' graph [ordering="out" splines=true overlap=false];' From 982d771c611530d33fe23525492e7a6e6cde1860 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 9 Feb 2017 11:47:05 -0600 Subject: [PATCH 18/31] Since // touch working --- src/command_line.py | 8 ++--- src/entity.py | 73 ++++++++++++++++++++++++++------------------- 2 files changed, 46 insertions(+), 35 deletions(-) diff --git a/src/command_line.py b/src/command_line.py index e82b8db..d070945 100644 --- a/src/command_line.py +++ b/src/command_line.py @@ -58,7 +58,7 @@ options['v'] = [ 'version' , 'Prints version of irpf90', 0 ] options['w'] = [ 'warnings' , 'Activate Warnings', 0 ] options['z'] = [ 'openmp' , 'Activate for OpenMP code', 0 ] options['G'] = [ 'graph' , 'Print the dependecy-graph of the entities (dots format)', 0 ] -options['T'] = [ 'Task' , 'Auto-parallelism ', 1 ] +options['T'] = [ 'Task' , 'Auto-parallelism ', 0 ] class CommandLine(object): @@ -211,9 +211,9 @@ do_$LONG = property(fget=do_$LONG) def do_run(self): return not(any( (self.do_version, self.do_help, self.do_preprocess, self.do_touch, self.do_init))) - @irpy.lazy_property - def do_Task(self): - return True +# @irpy.lazy_property +# def do_Task(self): +# return True command_line = CommandLine() diff --git a/src/entity.py b/src/entity.py index 533e348..f48d095 100644 --- a/src/entity.py +++ b/src/entity.py @@ -253,6 +253,10 @@ class Entity(object): result.append("") return result + @irpy.lazy_property + def is_source_touch(self): + return (Touch in self.d_type_lines or SoftTouch in self.d_type_lines) + @irpy.lazy_property_mutable def is_self_touched(self): '''Cehck if it will be modified (touch)''' @@ -406,30 +410,32 @@ class Entity(object): return [] template = ''' -subroutine touch_{name} +SUBROUTINE touch_{name} {#l_module} {name} {/l_module} - implicit none - character*(6+{@size key=name/}),parameter :: irp_here = 'touch_{name}' + IMPLICIT NONE + {?do_debug} + CHARACTER*(6+{@size key=name/}),PARAMETER :: irp_here = 'touch_{name}' + {/do_debug} {?do_debug} - call irp_enter(irp_here) + CALL irp_enter(irp_here) {/do_debug} {#l_ancestor} - {name}_is_built = .False. + {name}_is_built = .FALSE. {/l_ancestor} - {name}_is_built = .True. + {name}_is_built = .TRUE. {?do_debug} - call irp_leave(irp_here) + CALL irp_leave(irp_here) {/do_debug} -end subroutine touch_{name} +END SUBROUTINE touch_{name} ''' # Only one by EntityColleciton @@ -449,7 +455,7 @@ end subroutine touch_{name} 'l_module':l_module, 'l_ancestor':l_parents, 'do_debug':command_line.do_debug}) - return l.split('\n') + return [i for i in l.split('\n') if i] ########################################################## @irpy.lazy_property @@ -531,7 +537,7 @@ end subroutine touch_{name} {?inline} !DEC$ ATTRIBUTES FORCEINLINE :: provide_{name} {/inline} -subroutine provide_{name} +SUBROUTINE provide_{name} {?do_openmp} use omp_lib @@ -542,7 +548,9 @@ subroutine provide_{name} {/l_module} implicit none + {?do_debug} character*(8+{@size key=name/}),parameter :: irp_here = 'provide_{name}' + {/do_debug} {?do_openmp} CALL irp_lock_{name}(.TRUE.) @@ -553,28 +561,23 @@ subroutine provide_{name} {/do_debug} {#l_children} - if (.NOT.{name}_is_built) then + IF (.NOT.{name}_is_built) THEN CALL provide_{name} - endif + ENDIF {/l_children} {#do_task} - {?head_touch} - !$OMP TASKGROUP - {/head_touch} !$OMP TASK DEFAULT(shared) {depend} {/do_task} {#l_allocate} - call allocate_{name} + CALL allocate_{name} {/l_allocate} - call bld_{name} + + CALL bld_{name} {?do_task} !$OMP END TASK - {?head_touch} - !$OMP END TASKGROUP - {/head_touch} {/do_task} {name}_is_built = .TRUE. @@ -587,7 +590,7 @@ subroutine provide_{name} CALL irp_leave(irp_here) {/do_debug} -end subroutine provide_{name} +END SUBROUTINE provide_{name} ''' from util import mangled @@ -610,10 +613,8 @@ end subroutine provide_{name} 'l_children':l_children, 'do_debug':command_line.do_debug, 'do_openmp':command_line.do_openmp, - 'do_task':do_task, - 'head_touch':self.is_self_touched}) - - return l.split('\n') + 'do_task':do_task}) + return [i for i in l.split('\n') if i.strip()] def build_alloc(self,name): var = self.d_entity[name] @@ -626,6 +627,7 @@ subroutine allocate_{name} {name} {/l_module} + character*(9+{@size key=name/}),parameter :: irp_here = 'allocate_{name}' integer :: irp_err @@ -715,12 +717,9 @@ end subroutine # ~#~#~#~#~# #Next return the first element of the iterator - ps_text = next(text for filename, text in self.cm_t_filename_parsed_text - if self.prototype.filename[0].startswith(filename)) - begin = next(i for i, (_, line) in enumerate(ps_text) - if isinstance(line, Begin_provider) if line.filename[1] == self.same_as) - end = next(begin + i for i, (_, line) in enumerate(ps_text[begin:]) - if isinstance(line, End_provider)) + ps_text = next(text for filename, text in self.cm_t_filename_parsed_text if self.prototype.filename[0].startswith(filename)) + begin = next(i for i, (_, line) in enumerate(ps_text) if isinstance(line, Begin_provider) if line.filename[1] == self.same_as) + end = next(begin + i for i, (_, line) in enumerate(ps_text[begin:]) if isinstance(line, End_provider)) # Now we now that the text is betern ps_text[begin:end] _, line_prototype = ps_text[begin] @@ -737,11 +736,23 @@ end subroutine text.append(([], Simple_line(line_prototype.i, " irp_rdtsc1 = irp_rdtsc()", line_prototype.filename))) + remove = 1 for vars, line in ps_text[begin + 1:end]: + + if 'call touch' in line.lower: + text += [([], Simple_line(line.i, '!$OMP TASKGROUP', line.filename))] + remove = -1 + text.append((vars, line)) text += map(lambda x: ([], Simple_line(line.i, x, line.filename)), build_call_provide(vars, self.d_entity)) + + if remove == 0: + text += [([], Simple_line(line.i, '!$OMP END TASKGROUP', line.filename))] + + remove +=1 + # ~#~#~#~#~# # Create the subroutine. # ~#~#~#~#~# From 974775a9a59b23eb4be0a0d664eca67e0ff50d3b Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 16 Feb 2017 18:10:21 -0600 Subject: [PATCH 19/31] Add template. Repare OMP_Lock --- src/build_file.py | 10 +- src/entity.py | 497 ++++++++++-------------------------- src/irpy_files.py | 17 +- src/module.py | 34 +-- src/preprocessed_text.py | 8 +- src/templates/allocater.f90 | 47 ++++ src/templates/finalize.f90 | 13 + src/templates/irp_lock.f90 | 31 +++ src/templates/module.f90 | 40 +++ src/templates/provider.f90 | 43 ++++ src/templates/touch.f90 | 26 ++ src/touches.py | 35 +-- src/util.py | 43 +++- 13 files changed, 423 insertions(+), 421 deletions(-) create mode 100644 src/templates/allocater.f90 create mode 100644 src/templates/finalize.f90 create mode 100644 src/templates/irp_lock.f90 create mode 100644 src/templates/module.f90 create mode 100644 src/templates/provider.f90 create mode 100644 src/templates/touch.f90 diff --git a/src/build_file.py b/src/build_file.py index 8094f12..2115734 100644 --- a/src/build_file.py +++ b/src/build_file.py @@ -271,13 +271,21 @@ def create_build_remaining(f,ninja): if extension.lower() in ['f', 'f90']: result = ["build {target_o}: compile_fortran_{irp_id} {target_i}"] + result_make = [ + '{target_o}: {target_i}', + '\t@printf "F: {short_target_o} -> {short_target_i}\\n"', + "\t@$(FC) $(FCFLAGS) -c $^ -o $@", ""] + elif extension.lower() in ['c']: result = ["build {target_o}: compile_c_{irp_id} {target_i}"] elif extension.lower() in ['cxx', 'cpp']: result = ["build {target_o}: compile_cxx_{irp_id} {target_i}"] result += [" short_in = {short_target_i}", " short_out = {short_target_o}", ""] - return '\n'.join(result).format(**locals()) + + result_final = result if ninja else result_make + + return '\n'.join(result_final).format(**locals()) def create_makefile(d_flags,d_var,irpf90_flags,ninja=True): diff --git a/src/entity.py b/src/entity.py index f48d095..33ee3ea 100644 --- a/src/entity.py +++ b/src/entity.py @@ -42,8 +42,8 @@ class Entity(object): ############################################################ def __init__(self, text, label, name=None, comm_world=None): - # (list[str], str, int, Irpy_comm_world) - '''Instantiate the object. + # (list[str], str, int, Irpy_comm_world) + '''Instantiate the object. Args: text: List of lines between BEGIN_PROVIDER and END_PROVIDER included @@ -59,18 +59,17 @@ class Entity(object): self.label = label self.text = text - self.same_as = text[0].filename[1] + self.same_as = text[0].filename[1] self.name = name if name else self.same_as self.comm_world = comm_world - # ~ # ~ # ~ # G l o b a l P r o p e r t y # ~ # ~ # ~ @irpy.lazy_property def d_entity(self): - # () -> Dict[str,Entity] + # () -> Dict[str,Entity] '''Create an alias to the global dictionary of Entity. Note: Be aware of the possiblity of Cyclic Dependency. @@ -79,7 +78,7 @@ class Entity(object): @irpy.lazy_property def cm_t_filename_parsed_text(self): - # () -> Tuple[str, Parsed_text] + # () -> Tuple[str, Parsed_text] '''Create an alias to the global tuple for parsed text Note: self.comm_world.t_filename_parsed_text need d_entity. @@ -89,22 +88,21 @@ class Entity(object): @irpy.lazy_property def d_type_lines(self): - # () -> Dict[Line, Tuple[int,Line] ] - '''Contruct a mapping table between the type of the line and the possition''' + # () -> Dict[Line, Tuple[int,Line] ] + '''Contruct a mapping table between the type of the line and the possition''' from collections import defaultdict d = defaultdict(list) for i, line in enumerate(self.text): d[type(line)] += [(i, line)] return d - # ~ # ~ # ~ # M u l t i p l e P r o v i d e r H a n d l e r # ~ # ~ # ~ @irpy.lazy_property def is_main(self): - # () -> bool - '''Check if this Entity is the main one + # () -> bool + '''Check if this Entity is the main one Exemple: BEGIN_PROVIDER [pi, double precision] & @@ -114,11 +112,10 @@ class Entity(object): ''' return self.name == self.same_as - @irpy.lazy_property def prototype(self): - # () -> Line - '''Find the declaration statement associated with the name of the provider + # () -> Line + '''Find the declaration statement associated with the name of the provider Exemple: BEGIN_PROVIDER [pi, double precision] & @@ -127,33 +124,33 @@ class Entity(object): if self.name == e, will return BEGIN_PROVIDER [e, double preision] ''' - d = self.d_type_lines - return next(line for _,line in d[Begin_provider]+d[Cont_provider] if line.filename[1] == self.name) + d = self.d_type_lines + return next(line for _, line in d[Begin_provider] + d[Cont_provider] + if line.filename[1] == self.name) @irpy.lazy_property def l_name(self): - # () -> List[str] - d = self.d_type_lines - return [line.filename[1] for _,line in d[Begin_provider]+d[Cont_provider] ] + # () -> List[str] + d = self.d_type_lines + return [line.filename[1] for _, line in d[Begin_provider] + d[Cont_provider]] @irpy.lazy_property def l_others_name(self): - # () -> List[str] - '''Extract the other entity-name defined''' - return [name for name in self.l_name if not name == self.name] - + # () -> List[str] + '''Extract the other entity-name defined''' + return [name for name in self.l_name if not name == self.name] @irpy.lazy_property def doc(self): - # () -> List[str] - doc = [line.text.lstrip()[1:] for _,line in self.d_type_lines[Doc]] + # () -> List[str] + doc = [line.text.lstrip()[1:] for _, line in self.d_type_lines[Doc]] if not doc: logger.warning("Entity '%s' is not documented" % (self.name)) return doc @irpy.lazy_property def documented(self): - #() -> bool + #() -> bool return bool(self.doc) # ~ # ~ # ~ @@ -162,8 +159,8 @@ class Entity(object): @irpy.lazy_property_mutable def is_written(self): - #() -> bool - '''Check if it will be written on disk''' + #() -> bool + '''Check if it will be written on disk''' return any(self.d_entity[i].is_written for i in self.parents) @irpy.lazy_property @@ -171,7 +168,7 @@ class Entity(object): if not self.is_main: result = [] else: - from util import mangled + from util import mangled name = self.name result = [ \ "subroutine writer_%s(irp_num)"%(name), @@ -210,7 +207,7 @@ class Entity(object): @irpy.lazy_property_mutable def is_read(self): - '''Check if it will be read from disk''' + '''Check if it will be read from disk''' return any(self.d_entity[i].is_read for i in self.parents) @irpy.lazy_property @@ -218,7 +215,7 @@ class Entity(object): if not self.is_main: result = [] else: - from util import mangled + from util import mangled name = self.name result = [ \ "subroutine reader_%s(irp_num)"%(name), @@ -255,7 +252,7 @@ class Entity(object): @irpy.lazy_property def is_source_touch(self): - return (Touch in self.d_type_lines or SoftTouch in self.d_type_lines) + return (Touch in self.d_type_lines or SoftTouch in self.d_type_lines) @irpy.lazy_property_mutable def is_self_touched(self): @@ -267,8 +264,8 @@ class Entity(object): '''If any of the children is touched, the entity is touched''' if self.is_self_touched or any(self.d_entity[i].is_touched for i in self.children): return True - - return False + + return False # ~ # ~ # ~ # INCLUDE, USE, CALL @@ -276,23 +273,23 @@ class Entity(object): @irpy.lazy_property def includes(self): - # () -> str - '''Extract the name of include who need be to be include in this Entity''' - return [line.filename for _,line in self.d_type_lines[Include]] + # () -> str + '''Extract the name of include who need be to be include in this Entity''' + return [line.filename for _, line in self.d_type_lines[Include]] @irpy.lazy_property def uses(self): - '''Extract the name of module who are used in this Entity''' - return [line.filename for _,line in self.d_type_lines[Use]] + '''Extract the name of module who are used in this Entity''' + return [line.filename for _, line in self.d_type_lines[Use]] @irpy.lazy_property def calls(self): - '''Extract the name ofthe function called by the entity''' + '''Extract the name ofthe function called by the entity''' - def extract_name(line): - return line.text.split('(', 1)[0].split()[1].lower() + def extract_name(line): + return line.text.split('(', 1)[0].split()[1].lower() - return [extract_name(line) for _,line in self.d_type_lines[Call] ] + return [extract_name(line) for _, line in self.d_type_lines[Call]] # ~ # ~ # ~ # Array Dimension @@ -300,8 +297,8 @@ class Entity(object): @irpy.lazy_property def dim(self): - # () -> List[str] - '''Extract the dimension of the needed array in a form of list of variable name + # () -> List[str] + '''Extract the dimension of the needed array in a form of list of variable name Exemple: BEGIN_PROVIDER [real, ao_num ] @@ -324,7 +321,6 @@ class Entity(object): else: return map(str.strip, x[1:-1].split(',')) - @irpy.lazy_property def allocate(self): # () -> List[Str] @@ -338,62 +334,54 @@ class Entity(object): # ~ # ~ # ~ # D e c l a r a t i o n # ~ # ~ # ~ - + @irpy.lazy_property + def is_protected(self): + return self.text[0].lower.startswith('begin_provider_immu') + @irpy.lazy_property def type(self): - # () -> str - '''Compute the fortran type code of the entity''' + # () -> str + '''Compute the fortran type code of the entity''' type_ = self.prototype.text.split(',')[0].split('[')[1].strip() - if not type_: - logger.error( "Error in definition of %s." % (self.name)) - sys.exit(1) + if not type_: + logger.error("Error in definition of %s." % (self.name)) + sys.exit(1) - if self.dim: + if self.dim: return "%s, allocatable" % (type_) else: return type_ @irpy.lazy_property - def header(self): - # () -> List[str] - '''Compute all the code needed to inistanticant the entity''' - - - name = self.name - str_ = " {type_} :: {name} {dim}".format(type_=self.type, name=name, dim=build_dim(self.dim, colons=True)) - - if command_line.coarray: - if not self.dim: - str_ += " [*]" - else: - str_ += " [:]" - - l = [str_] - if self.dim and command_line.align != '1': - l += [" !DIR$ ATTRIBUTES ALIGN: %s :: %s" % (command_line.align, name)] - - if self.is_main: - l += [" logical :: %s_is_built = .False." % (name)] - - return l + def d_header(self): + # () -> List[str] + '''Compute all the code needed to inistanticant the entity''' + import util + d_template = { + 'name': self.name, + 'type': self.type, + 'main': self.is_main, + 'dim': build_dim(self.dim,colons=True), + 'protected': '\n'.join(self.allocater+self.builder) if self.is_protected else False} + return d_template ############################################################ @irpy.lazy_property def fmodule(self): # () -> str - '''Contruct the name of the module who will contain the entity''' + '''Contruct the name of the module who will contain the entity''' name = self.prototype.filename[0].replace('/', '__').split('.irp.f')[0] return '%s_mod' % name ############################################################ @irpy.lazy_property def regexp(self): - # () -> Regex + # () -> Regex '''Compile a regex targeted to 'search' the name of this entity''' - import re + import re return re.compile(r"([^a-z0-9'\"_]|^)%s([^a-z0-9_]|$)" % (self.name), re.I).search # ~ # ~ # ~ @@ -401,118 +389,35 @@ class Entity(object): # ~ # ~ # ~ @irpy.lazy_property - def toucher(self): - # () -> List[str] - '''Fabric the f90 routine who handle the cache invalidation''' + def d_touche_template(self): + # () -> List[str] + '''Fabric the f90 routine who handle the cache invalidation''' # Only one by EntityColleciton if not self.is_main: - return [] + return {} - template = ''' -SUBROUTINE touch_{name} + from util import mangled - {#l_module} - {name} - {/l_module} - - IMPLICIT NONE - {?do_debug} - CHARACTER*(6+{@size key=name/}),PARAMETER :: irp_here = 'touch_{name}' - {/do_debug} - - {?do_debug} - CALL irp_enter(irp_here) - {/do_debug} - - {#l_ancestor} - {name}_is_built = .FALSE. - {/l_ancestor} - - {name}_is_built = .TRUE. - - {?do_debug} - CALL irp_leave(irp_here) - {/do_debug} - -END SUBROUTINE touch_{name} -''' - - # Only one by EntityColleciton - if not self.is_main: - return [] - - from util import mangled - l_parents = [{'name':n} for n in mangled(self.parents,self.d_entity)] - name = self.name - l_module= [ {'name':n} for n in build_use(self.parents+[name],self.d_entity)] - - from ashes import AshesEnv - ashes_env = AshesEnv() - ashes_env.register_source('touch',template) - - l = ashes_env.render('touch', {'name': name, - 'l_module':l_module, - 'l_ancestor':l_parents, - 'do_debug':command_line.do_debug}) - return [i for i in l.split('\n') if i] - - ########################################################## - @irpy.lazy_property - def locker(self): - if not command_line.do_openmp: - return [] - - name = self.name - result = ["subroutine irp_lock_%s(set)" % (name)] - result += [ - " use omp_lib", - " implicit none", - " logical, intent(in) :: set", - " integer(kind=omp_nest_lock_kind),save :: %s_lock" % (name), - " integer,save :: ifirst", - ] - if command_line.do_debug: - length = str(len("irp_lock_%s" % (name))) - result += [ - " character*(%s) :: irp_here = 'irp_lock_%s'" % (length, name), - " call irp_enter(irp_here)" - ] - - result += [ - " if (ifirst == 0) then", - " ifirst = 1", - " call omp_init_nest_lock(%s_lock)" % (name), - " endif", - " if (set) then", - " call omp_set_nest_lock(%s_lock)" % (name), - " else", - " call omp_unset_nest_lock(%s_lock)" % (name), - " endif", - ] - if command_line.do_debug: - result.append(" call irp_leave(irp_here)") - result.append("end subroutine irp_lock_%s" % (name)) - result.append("") - return result + return { + 'name': self.name, + 'l_module': [n for n in build_use(self.parents + [self.name], self.d_entity,use=False)], + 'l_ancestor': [n for n in mangled(self.parents, self.d_entity)]} ########################################################## + @irpy.lazy_property def free(self): - # () -> List[ str ] - '''Compute an part of a subroutine used to free a variable''' + # () -> List[ str ] + '''Compute an part of a subroutine used to free a variable''' name = self.name - result = ["!", - "! >>> FREE %s" % (name), - " %s_is_built = .False." % (self.same_as)] + result = ["!", "! >>> FREE %s" % (name), " %s_is_built = .False." % (self.same_as)] if self.dim: - result += [ - " if (allocated(%s)) then"%(name), - " deallocate (%s)"%(name)] + result += [" if (allocated(%s)) then" % (name), " deallocate (%s)" % (name)] if command_line.do_memory: - result += " print *, 'Deallocating %s'"%(name) + result += " print *, 'Deallocating %s'" % (name) result += [" endif"] @@ -522,190 +427,59 @@ END SUBROUTINE touch_{name} ########################################################## @irpy.lazy_property def provider(self): - # () -> List[str] - '''Create the fortran90 code for the EntityCollection''' + # () -> List[str] + '''Create the fortran90 code for the EntityCollection''' if not self.is_main: return [] - from ashes import AshesEnv - template = ''' -{#l_allocate} -{subroutine|s} -{/l_allocate} - -{?inline} -!DEC$ ATTRIBUTES FORCEINLINE :: provide_{name} -{/inline} -SUBROUTINE provide_{name} - - {?do_openmp} - use omp_lib - {/do_openmp} - - {#l_module} - {name} - {/l_module} - - implicit none - {?do_debug} - character*(8+{@size key=name/}),parameter :: irp_here = 'provide_{name}' - {/do_debug} - - {?do_openmp} - CALL irp_lock_{name}(.TRUE.) - {/do_openmp} - - {?do_debug} - CALL irp_enter(irp_here) - {/do_debug} - - {#l_children} - IF (.NOT.{name}_is_built) THEN - CALL provide_{name} - ENDIF - {/l_children} - - {#do_task} - !$OMP TASK DEFAULT(shared) {depend} - {/do_task} - - {#l_allocate} - CALL allocate_{name} - {/l_allocate} - - CALL bld_{name} - - {?do_task} - !$OMP END TASK - {/do_task} - - {name}_is_built = .TRUE. - - {?do_openmp} - CALL irp_lock_{name}(.FALSE.) - {/do_openmp} - - {?do_debug} - CALL irp_leave(irp_here) - {/do_debug} - -END SUBROUTINE provide_{name} -''' - from util import mangled + from util import mangled + import util name = self.name - var = self.d_entity[name] - l_module = [ {'name':x} for x in build_use([self.name] + self.to_provide, self.d_entity)] - l_allocate = [ {'name':n, 'subroutine':self.build_alloc(n)} for n in self.l_name if self.d_entity[n].dim] - l_children = [ {'name':x} for x in mangled(self.to_provide, self.d_entity) ] + l_module = [x for x in build_use([self.name] + self.to_provide, self.d_entity,use=False)] + l_children = [x for x in mangled(self.to_provide, self.d_entity)] - in_ = ['depend(in: %s)' % n for n in self.to_provide] - out_ = ['depend(out: %s)' % n for n in self.l_name] - do_task = [ {'depend':' '.join(in_ + out_) } ] if command_line.do_Task else [] + l = ashes_env.render('provider.f90', { + 'name': name, + 'l_module': l_module, + 'l_children_static': l_children, + 'do_debug': command_line.do_debug, + 'do_openmp': command_line.do_openmp, + 'do_task': command_line.do_Task, + 'do_corray': command_line.do_coarray, + 'dim': ','.join(self.dim), + }) + return [i for i in l.split('\n') if i.strip()] - ashes_env = AshesEnv() - ashes_env.register_source('provide',template) + @irpy.lazy_property + def allocater(self): + if not self.is_main: + return [] - l = ashes_env.render('provide', {'name': name, - 'l_module':l_module, - 'l_allocate':l_allocate, - 'l_children':l_children, - 'do_debug':command_line.do_debug, - 'do_openmp':command_line.do_openmp, - 'do_task':do_task}) - return [i for i in l.split('\n') if i.strip()] - - def build_alloc(self,name): - var = self.d_entity[name] - from ashes import AshesEnv - template = (""" + from util import mangled -subroutine allocate_{name} + import util + name = self.name + l_module = [x for x in build_use([self.name] + self.to_provide, self.d_entity,use=False)] + if self.is_protected: + l_module.remove(self.fmodule) - {#l_module} - {name} - {/l_module} - - character*(9+{@size key=name/}),parameter :: irp_here = 'allocate_{name}' - integer :: irp_err - - if ( allocated({name}) .AND.( & - {#l_dim} - ( SIZE({name},{rank}) /= {value} ) {@sep}.OR.{/sep} & - {/l_dim} - )) then + l_dim = [{'name': name, 'rank': i + 1, 'value': dimsize(k)} for i, k in enumerate(self.dim)] - {?do_memory} - print *, irp_here//': Deallocated {name}' - {/do_memory} - deallocate( {name}, stat=irp_err ) + l = ashes_env.render('allocater.f90', { + 'name': name, + 'l_module': l_module, + 'do_debug': command_line.do_debug, + 'do_corray': command_line.do_coarray, + 'dim': ','.join(self.dim), + 'l_dim': l_dim + }) + return [i for i in l.split('\n') if i.strip()] - if (irp_err /= 0) then - print *, irp_here//': Deallocation failed: {name}' - print *,' size: {dim}' - endif - - endif - - if ( .NOT. allocated({name}) ) then - - {?do_memory} - print *, irp_here//': Allocate {name} ({dim})' - {/do_memory} - - {^corray} - allocate({name} ({dim}), stat=irp_err) - {:else} - allocate({name} ({dim}[*]), stat=irp_err) - {/corray} - if (irp_err /= 0) then - print *, irp_here//': Allocation failed: {name}' - print *,' size: {dim}' - endif - - endif - -end subroutine - -""") - - def dimsize(x): - # (str) -> str - '''Compute the number of element in the array''' - try: - b0, b1 = x.split(':') - except ValueError: - return x - - b0_is_digit = b0.replace('-', '').isdigit() - b1_is_digit = b1.replace('-', '').isdigit() - - if b0_is_digit and b1_is_digit: - size = str(int(b1) - int(b0) + 1) - elif b0_is_digit: - size = "(%s) - (%d)" % (b1, int(b0) - 1) - elif b1_is_digit: - size = "(%d) - (%s)" % (int(b1) + 1, b0) - else: - size = "(%s) - (%s) + 1" % (b1, b0) - return size - - l_dim = [{'name':name, 'rank':i+1, 'value':dimsize(k)} for i, k in enumerate(var.dim)] - l_module = [ {'name':x} for x in build_use([var.name] + var.needs, self.d_entity) ] - - ashes_env = AshesEnv() - ashes_env.register_source('hello',template) - return ashes_env.render('hello', {'name': name, - 'dim':','.join(var.dim), - 'corray': command_line.coarray, - 'l_dim': l_dim, - 'l_module':l_module, - 'do_memory':command_line.do_memory}) - ########################################################## @irpy.lazy_property def builder(self): @@ -717,9 +491,12 @@ end subroutine # ~#~#~#~#~# #Next return the first element of the iterator - ps_text = next(text for filename, text in self.cm_t_filename_parsed_text if self.prototype.filename[0].startswith(filename)) - begin = next(i for i, (_, line) in enumerate(ps_text) if isinstance(line, Begin_provider) if line.filename[1] == self.same_as) - end = next(begin + i for i, (_, line) in enumerate(ps_text[begin:]) if isinstance(line, End_provider)) + ps_text = next(text for filename, text in self.cm_t_filename_parsed_text + if self.prototype.filename[0].startswith(filename)) + begin = next(i for i, (_, line) in enumerate(ps_text) + if isinstance(line, Begin_provider) if line.filename[1] == self.same_as) + end = next(begin + i for i, (_, line) in enumerate(ps_text[begin:]) + if isinstance(line, End_provider)) # Now we now that the text is betern ps_text[begin:end] _, line_prototype = ps_text[begin] @@ -736,23 +513,13 @@ end subroutine text.append(([], Simple_line(line_prototype.i, " irp_rdtsc1 = irp_rdtsc()", line_prototype.filename))) - remove = 1 for vars, line in ps_text[begin + 1:end]: - if 'call touch' in line.lower: - text += [([], Simple_line(line.i, '!$OMP TASKGROUP', line.filename))] - remove = -1 - text.append((vars, line)) text += map(lambda x: ([], Simple_line(line.i, x, line.filename)), build_call_provide(vars, self.d_entity)) - if remove == 0: - text += [([], Simple_line(line.i, '!$OMP END TASKGROUP', line.filename))] - - remove +=1 - # ~#~#~#~#~# # Create the subroutine. # ~#~#~#~#~# @@ -763,13 +530,19 @@ end subroutine # Add the use statement result += ["subroutine bld_%s" % (self.name)] - result += build_use([self.name] + self.needs, self.d_entity) + + l_use = build_use([self.name] + self.needs, self.d_entity,use=False) + if self.is_protected: + l_use.remove(self.fmodule) + + result += ['USE %s'%n for n in l_use] import parsed_text # Move the variable to top, and add the text - parsed_text.move_to_top_list(text, [Declaration, Implicit, Use]) + parsed_text.move_to_top_list(text, [Declaration, Implicit, Use]) - result.extend( line.text for _,line in text if not isinstance(line, (Begin_doc, End_doc, Doc, Cont_provider))) + result.extend(line.text for _, line in text + if not isinstance(line, (Begin_doc, End_doc, Doc, Cont_provider))) if command_line.do_profile: result += [ @@ -790,7 +563,7 @@ end subroutine @irpy.lazy_property_mutable def needed_by(self): #Set by parsed_text.build_needs(...) - return [] + return [] @irpy.lazy_property def children(self): @@ -821,5 +594,3 @@ end subroutine error.fail(self.prototype, "Cyclic dependencies:\n%s" % (str(self._parents))) return result - - diff --git a/src/irpy_files.py b/src/irpy_files.py index 90518a9..ef80965 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -286,7 +286,7 @@ class Irpy_comm_world(object): # Module data if m.has_irp_module: filename = os.path.join(irpdir, '%s.irp.module.F90' % m.filename) - text = '\n'.join(m.header + m.head) + text = '\n'.join(m.head) lazy_write_file(filename, '%s\n' % text) # Subroutines @@ -312,17 +312,8 @@ class Irpy_comm_world(object): def create_lock(self): from util import lazy_write_file - l = sorted(self.d_entity.keys()) - - out = [] - for v in l: - out += self.d_entity[v].locker - - out += ["subroutine irp_init_locks_%s()" % (irpf90_t.irp_id), " implicit none"] - for v in l: - out += [" call irp_lock_%s(.True.)" % v] - out += [" call irp_lock_%s(.False.)" % v] - out += ["end subroutine", ""] + from util import ashes_env + str_ = ashes_env.render('irp_lock.F90', {'entity':sorted(self.d_entity)}) filename = os.path.join(irpf90_t.irpdir, 'irp_locks.irp.F90') - lazy_write_file(filename, '\n'.join(out)) + lazy_write_file(filename, str_) diff --git a/src/module.py b/src/module.py index e222388..bd6395e 100644 --- a/src/module.py +++ b/src/module.py @@ -74,19 +74,17 @@ class Fmodule(object): @irpy.lazy_property def head(self): '''The module who containt the declaration of the entity''' - body = list(self.use) - body += list(self.dec) - body += [header for var in self.l_entity for header in var.header] + if self.use or self.dec or self.l_entity: - if body: - result = ["module %s" % (self.name)] - result += body - result += ["end module %s" % (self.name)] + d_template = {'name' : self.name, + 'use':list(self.use),'usr_declaration':list(self.dec), + 'irp_declaration':[e.d_header for e in self.l_entity], + 'coarray': command_line.coarray, + 'align': False if command_line.align == 1 else command_line.align} + return [i for i in ashes_env.render('module.f90', d_template).split('\n') if i] else: - result = [] - - return result + return [] @irpy.lazy_property def has_irp_module(self): @@ -106,7 +104,9 @@ class Fmodule(object): result = [] for var in self.l_entity: result += var.provider - result += var.builder + if not var.is_protected: + result += var.builder + result += var.allocater if var.is_read: result += var.reader if var.is_written: @@ -213,14 +213,14 @@ class Fmodule(object): Because user can define F90 Type, we need to keep the correct order. Warning: - If we uniquify that can cause a problem with the type in guess. - ```type toto - integer :: n - end type toto - integer :: n + If we uniquify that can cause a problem. + ```TYPE toto + INTEGER :: n + END TYPE toto + INTEGER :: n ``` Fix: - We need to support Type keyword. + We need to support TYPE keyword. ''' diff --git a/src/preprocessed_text.py b/src/preprocessed_text.py index 2c5cfaf..79a0092 100644 --- a/src/preprocessed_text.py +++ b/src/preprocessed_text.py @@ -57,8 +57,10 @@ simple_dict = { "subst": Subst, "end_doc": End_doc, "begin_provider": Begin_provider, + "begin_provider_immu": Begin_provider, "&begin_provider": Cont_provider, "end_provider": End_provider, + "end_provider_immu": End_provider, "assert": Assert, "touch": Touch, "soft_touch": SoftTouch, @@ -674,8 +676,10 @@ def irp_simple_statements(text): if command_line.do_profile: temp += [Simple_line(0, "call irp_init_timer()", line.filename)] - if command_line.do_openmp: - temp += [Simple_line(0, " call irp_init_locks_%s()" % (irp_id), line.filename)] +# Need to choose between lazy lock or are big full initialization +# if command_line.do_openmp: +# temp += [Simple_line(0, " call irp_init_locks_%s()" % (irp_id), line.filename)] + temp += [Call(0, " call %s" % (program_name), line.filename)] if command_line.do_profile: temp += [Simple_line(0, "call irp_print_timer()", line.filename)] diff --git a/src/templates/allocater.f90 b/src/templates/allocater.f90 new file mode 100644 index 0000000..5f213ab --- /dev/null +++ b/src/templates/allocater.f90 @@ -0,0 +1,47 @@ +{?dim} +SUBROUTINE allocate_{name} + + {#l_module} + USE {.} + {/l_module} + + IMPLICIT NONE + + CHARACTER*(9+{@size key=name/}),PARAMETER :: irp_here = 'allocate_{name}' + INTEGER :: irp_err + + IF ( ALLOCATED({name}) .AND.( & + {#l_dim} + ( SIZE({name},{rank}) /= {value} ) {@sep}.OR.{/sep} & + {/l_dim})) THEN + + {?do_memory} PRINT*, irp_here//': Deallocated {name}' {/do_memory} + DEALLOCATE({name},STAT=irp_err) + + IF (irp_err /= 0) THEN + PRINT*, irp_here//': Deallocation failed: {name}' + PRINT*,' size: {dim}' + ENDIF + + GO TO 666 + ELSE IF (.NOT.ALLOCATED({name})) THEN + GO TO 666 + ELSE + RETURN + ENDIF + + 666 CONTINUE + {?do_memory} PRINT*, irp_here//': Allocate {name} ({dim})'{/do_memory} + + {^do_corray} + ALLOCATE({name} ({dim}), STAT=irp_err) + {:else} + ALLOCATE({name} ({dim}[*]), STAT=irp_err) + {/do_corray} + + IF (irp_err /= 0) then + PRINT*, irp_here//': Allocation failed: {name}' + PRINT*,' size: {dim}' + ENDIF +END SUBROUTINE allocate_{name} +{/dim} diff --git a/src/templates/finalize.f90 b/src/templates/finalize.f90 new file mode 100644 index 0000000..8ead9c4 --- /dev/null +++ b/src/templates/finalize.f90 @@ -0,0 +1,13 @@ +SUBROUTINE irp_finalize_{id} + {#use} + USE {.} + {/use} + + IMPLICIT NONE + {#entity_array} + IF (ALLOCATED({name})) THEN + {name_root}_is_built = .FALSE. +! DEALLOCATE({name}) + ENDIF + {/entity_array} +END SUBROUTINE irp_finalize_{id} diff --git a/src/templates/irp_lock.f90 b/src/templates/irp_lock.f90 new file mode 100644 index 0000000..13c5cf6 --- /dev/null +++ b/src/templates/irp_lock.f90 @@ -0,0 +1,31 @@ +{#entity} +SUBROUTINE irp_lock_{.}(set) + + USE omp_lib + IMPLICIT NONE + LOGICAL, INTENT(in) :: set + INTEGER(KIND=omp_lock_kind),SAVE :: {.}_lock + INTEGER, SAVE :: ifirst = 0 + + {?do_debug} + CHARACTER*(9+{@size key={.}/}),PARAMETER :: irp_here = 'irp_lock_{name}' + {/do_debug} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + IF (ifirst == 0) then + ifirst = 1 + CALL omp_init_lock({.}_lock) + ENDIF + + IF (set) THEN + CALL omp_set_lock({.}_lock) + ELSE + CALL omp_unset_lock({.}_lock) + ENDIF + + {?do_debug} CALL irp_leach(irp_here) {/do_debug} + +END SUBROUTINE irp_lock_{.} +{/entity} + diff --git a/src/templates/module.f90 b/src/templates/module.f90 new file mode 100644 index 0000000..77ac928 --- /dev/null +++ b/src/templates/module.f90 @@ -0,0 +1,40 @@ +! -*- F90 -*- +! +!-----------------------------------------------! +! This file was generated with the irpf90 tool. ! +! ! +! DO NOT MODIFY IT BY HAND ! +!-----------------------------------------------! + + +MODULE {name} + +{#use} + USE {.} +{/use} + +{#usr_declaration} + {.} +{/usr_declaration} + +{#irp_declaration} + + {^dim} + {type} {?protected}, PROTECTED {/protected} :: {name} {?coarray} [*] {/coarray} + {:else} + + {?align} !DIR$ ATTRIBUTES ALIGN: {align} :: {name} {/align} + {type} {?protected}, PROTECTED {/protected} :: {name} {dim} {?coarray} [:] {/coarray} + {/dim} + + {?main} + LOGICAL :: {name}_is_built = .FALSE. + {/main} +{/irp_declaration} + + CONTAINS + {#irp_declaration} +{protected|s} + {/irp_declaration} + +END MODULE {name} diff --git a/src/templates/provider.f90 b/src/templates/provider.f90 new file mode 100644 index 0000000..54db996 --- /dev/null +++ b/src/templates/provider.f90 @@ -0,0 +1,43 @@ +{?inline}!DEC$ ATTRIBUTES FORCEINLINE :: provide_{name}{/inline} +SUBROUTINE provide_{name} + + {#l_module} + USE {.} + {/l_module} + + IMPLICIT NONE + + {?do_debug} + CHARACTER*(8+{@size key=name/}),PARAMETER :: irp_here = 'provide_{name}' + {/do_debug} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + {?do_openmp} + CALL irp_lock_{name}(.TRUE.) + IF (.NOT.{name}_is_built) THEN + {/do_openmp} + + {#l_children_static} + {@first} {?do_task}!$OMP TASKGROUP{/do_task} {/first} + {?do_openmp}!$OMP flush({.}_is_built){/do_openmp} + IF (.NOT.{.}_is_built) THEN + {?do_task}!$OMP TASK{/do_task} + CALL provide_{.} + {?do_task}!$OMP END TASK{/do_task} + ENDIF + {@last} {?do_task}!$OMP END TASKGROUP{/do_task} {/last} + {/l_children_static} + + {?dim} CALL allocate_{name} {/dim} + + CALL bld_{name} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + {?do_openmp} + ENDIF + CALL irp_lock_{name}(.FALSE.) + {/do_openmp} + +END SUBROUTINE provide_{name} diff --git a/src/templates/touch.f90 b/src/templates/touch.f90 new file mode 100644 index 0000000..ef49250 --- /dev/null +++ b/src/templates/touch.f90 @@ -0,0 +1,26 @@ +{#entity} + +SUBROUTINE touch_{name} + + {#l_module} + USE {.} + {/l_module} + + IMPLICIT NONE + {?do_debug} + CHARACTER*(6+{@size key=name/}),PARAMETER :: irp_here = 'touch_{name}' + {/do_debug} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + {#l_ancestor} + {.}_is_built = .FALSE. + {/l_ancestor} + + {name}_is_built = .TRUE. + + {?do_debug} CALL irp_leave(irp_here) {/do_debug} + +END SUBROUTINE touch_{name} + +{/entity} diff --git a/src/touches.py b/src/touches.py index c3a6382..6494a04 100644 --- a/src/touches.py +++ b/src/touches.py @@ -26,39 +26,28 @@ from irpf90_t import irp_id,irpdir import os -from util import lazy_write_file +from command_line import command_line def create(modules,variables): # (Dict[str,Module]. Dict[str, Variable]) -> None '''Create the fortran90 finalize subroutine and the touched one''' - - finalize = "subroutine irp_finalize_%s\n"%(irp_id) - for m in filter(lambda x: not modules[x].is_main and modules[x].has_irp_module, modules): - finalize += " use %s\n"%(modules[m].name) - main_modules_name =[ m.name for m in modules.values() if m.is_main] - - out = [] - for v,var in variables.iteritems(): - if var.fmodule not in main_modules_name: - out += var.toucher - if var.dim: - finalize += " if (allocated(%s)) then\n"%v - finalize += " %s_is_built = .False.\n"%var.same_as - finalize += " deallocate(%s)\n"%v - finalize += " endif\n" + d_template_finalize = {'id':irp_id, + 'use':[m.name for m in modules.values() if not m.is_main and m.has_irp_module], + 'entity_array': + [{'name':e.name,'name_root':e.same_as} for e in variables.values() if e.fmodule not in main_modules_name and e.dim]} + - finalize += "end\n" - - if out: - out = map(lambda x: "%s\n"%(x),out) - - out += finalize + d_template_touch = {'do_debug': command_line.do_debug, + 'entity':[e.d_touche_template for e in variables.values() if e.fmodule not in main_modules_name]} + + import util + str_out = util.ashes_env.render('touch.f90', d_template_touch) + util.ashes_env.render('finalize.f90', d_template_finalize) filename=os.path.join(irpdir,'irp_touches.irp.F90') - lazy_write_file(filename,''.join(out)) + util.lazy_write_file(filename,'%s\n'% util.remove_empy_lines(str_out)) if __name__ == '__main__': create() diff --git a/src/util.py b/src/util.py index f562d0c..44c3fab 100644 --- a/src/util.py +++ b/src/util.py @@ -42,6 +42,17 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger('Irpf90') logger.setLevel(30) + +# ~#~#~#~#~# +# A S H E S _ T E M P L A T E S +# ~#~#~#~#~# +from ashes import AshesEnv +import os +ashes_env = AshesEnv([os.path.join(os.path.dirname(__file__),'templates')]) + +def remove_empy_lines(text): + return os.linesep.join([s for s in text.splitlines() if s.strip()]) + # ~#~#~#~#~# # / / _ R E L A T E D # ~#~#~#~#~# @@ -277,6 +288,28 @@ def flatten(l_2d): # ~#~#~#~#~# # I R P _ R E L A T E D # ~#~#~#~#~# +def dimsize(x): + # (str) -> str + '''Compute the number of element in the array''' + try: + b0, b1 = x.split(':') + except ValueError: + return x + + b0_is_digit = b0.replace('-', '').isdigit() + b1_is_digit = b1.replace('-', '').isdigit() + + if b0_is_digit and b1_is_digit: + size = str(int(b1) - int(b0) + 1) + elif b0_is_digit: + size = "(%s) - (%d)" % (b1, int(b0) - 1) + elif b1_is_digit: + size = "(%d) - (%s)" % (int(b1) + 1, b0) + else: + size = "(%s) - (%s) + 1" % (b1, b0) + return size + + def build_dim(l_dim, colons=False): # (List[str],bool) -> str '''Contruct a valid fortran90 array dimension code from a list dimension @@ -298,10 +331,16 @@ def mangled(l_ent, d_ent): '''Create a uniq list of providier (merge the multione) ''' return OrderedUniqueList(d_ent[name].same_as for name in l_ent) -def build_use(l_ent, d_ent): +def build_use(l_ent, d_ent,use=True): # (List, Dict[str,Entity]) -> list '''Contruct the fortran90 'use' statement for the list of entity''' - return OrderedUniqueList(" use %s" % d_ent[x].fmodule for x in l_ent) + + l_name = OrderedUniqueList(d_ent[x].fmodule for x in l_ent) + if not use: + return l_name + else: + return [" use %s" % n for n in l_name] + def build_call_provide(l_ent, d_ent): # (List, Dict[str,Entity]) -> list From 6bc15db94adef8795aa6f246a381cbe7c313c10b Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 16 Feb 2017 18:12:57 -0600 Subject: [PATCH 20/31] IMMU -> IMMUTABLE --- src/preprocessed_text.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/preprocessed_text.py b/src/preprocessed_text.py index 79a0092..1abee2f 100644 --- a/src/preprocessed_text.py +++ b/src/preprocessed_text.py @@ -57,10 +57,10 @@ simple_dict = { "subst": Subst, "end_doc": End_doc, "begin_provider": Begin_provider, - "begin_provider_immu": Begin_provider, + "begin_provider_immutable": Begin_provider, "&begin_provider": Cont_provider, "end_provider": End_provider, - "end_provider_immu": End_provider, + "end_provider_immutable": End_provider, "assert": Assert, "touch": Touch, "soft_touch": SoftTouch, From 8a31b6302c79d5d1670465e43d775c2fdc4a9b5c Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 16 Feb 2017 19:14:17 -0600 Subject: [PATCH 21/31] Every thing are templated --- src/ashes.py | 2602 -------------------------------------- src/command_line.py | 6 +- src/entity.py | 105 +- src/irpf90_t.py | 7 +- src/irpy_files.py | 9 +- src/lib_irpy.py | 160 --- src/module.py | 2 +- src/preprocessed_text.py | 1 + src/routine.py | 6 +- src/templates/ioer.f90 | 71 ++ src/templates/writer.f90 | 69 + src/util.py | 4 +- 12 files changed, 172 insertions(+), 2870 deletions(-) delete mode 100644 src/ashes.py delete mode 100755 src/lib_irpy.py create mode 100644 src/templates/ioer.f90 create mode 100644 src/templates/writer.f90 diff --git a/src/ashes.py b/src/ashes.py deleted file mode 100644 index 010471d..0000000 --- a/src/ashes.py +++ /dev/null @@ -1,2602 +0,0 @@ -# -*- coding: utf-8 -*- - -''' -Copyright (c) 2013, Mahmoud Hashemi - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -''' - - -from __future__ import unicode_literals - -import os -import re -import cgi -import sys -import json -import codecs -import pprint -import string -import fnmatch -import time - - -PY3 = (sys.version_info[0] == 3) -if PY3: - unicode, string_types = str, (str, bytes) -else: - string_types = (str, unicode) - -__version__ = '15.1.1dev' -__author__ = 'Mahmoud Hashemi' -__contact__ = 'mahmoudrhashemi@gmail.com' -__url__ = 'https://github.com/mahmoud/ashes' -__license__ = 'BSD' - - -DEFAULT_EXTENSIONS = ('.dust', '.html', '.xml') -DEFAULT_IGNORED_PATTERNS = ('.#*',) - - -# need to add group for literals -# switch to using word boundary for params section -node_re = re.compile(r'({' - r'(?P\/)?' - r'(?:(?P[\~\#\?\@\:\<\>\+\^\%])\s*)?' - r'(?P[a-zA-Z0-9_\$\.]+|"[^"]+")' - r'(?:\:(?P[a-zA-Z0-9\$\.]+))?' - r'(?P[\|a-z]+)*?' - r'(?P(?:\s+\w+\=(("[^"]*?")|([$\w\.]+)))*)?' - r'\s*' - r'(?P\/)?' - r'\})', - flags=re.MULTILINE) - -key_re_str = '[a-zA-Z_$][0-9a-zA-Z_$]*' -key_re = re.compile(key_re_str) -path_re = re.compile('(' + key_re_str + ')?(\.' + key_re_str + ')+') -comment_re = re.compile(r'(\{!.+?!\})|(\{`.+?`\})', flags=re.DOTALL) - - -def get_path_or_key(pork): - if pork == '.': - pk = ['path', True, []] - elif path_re.match(pork): - f_local = pork.startswith('.') - if f_local: - pork = pork[1:] - pk = ['path', f_local, pork.split('.')] - elif key_re.match(pork): - pk = ['key', pork] - else: - raise ValueError('expected a path or key, not %r' % pork) - return pk - - -def split_leading(text): - leading_stripped = text.lstrip() - leading_ws = text[:len(text) - len(leading_stripped)] - return leading_ws, leading_stripped - - -class Token(object): - def __init__(self, text): - self.text = text - - def get_line_count(self): - # returns 0 if there's only one line, because the - # token hasn't increased the number of lines. - count = len(self.text.splitlines()) - 1 - if self.text[-1] in ('\n', '\r'): - count += 1 - return count - - def __repr__(self): - cn = self.__class__.__name__ - disp = self.text - if len(disp) > 20: - disp = disp[:17] + '...' - return '%s(%r)' % (cn, disp) - - -class CommentToken(Token): - def to_dust_ast(self): - return [['comment', self.text]] - - -class RawToken(Token): - def to_dust_ast(self): - return [['raw', self.text]] - - -class BufferToken(Token): - def to_dust_ast(self): - # It is hard to simulate the PEG parsing in this case, - # especially while supporting universal newlines. - if not self.text: - return [] - rev = [] - remaining_lines = self.text.splitlines() - if self.text[-1] in ('\n', '\r'): - # kind of a bug in splitlines if you ask me. - remaining_lines.append('') - while remaining_lines: - line = remaining_lines.pop() - leading_ws, lstripped = split_leading(line) - if remaining_lines: - if lstripped: - rev.append(['buffer', lstripped]) - rev.append(['format', '\n', leading_ws]) - else: - if line: - rev.append(['buffer', line]) - ret = list(reversed(rev)) - return ret - - -ALL_ATTRS = ('closing', 'symbol', 'refpath', 'contpath', - 'filters', 'params', 'selfclosing') - - -class Tag(Token): - req_attrs = () - ill_attrs = () - - def __init__(self, text, **kw): - super(Tag, self).__init__(text) - self._attr_dict = kw - self.set_attrs(kw) - - @property - def param_list(self): - try: - return params_to_kv(self.params) - except AttributeError: - return [] - - @property - def name(self): - try: - return self.refpath.strip().lstrip('.') - except (AttributeError, TypeError): - return None - - def set_attrs(self, attr_dict, raise_exc=True): - cn = self.__class__.__name__ - all_attrs = getattr(self, 'all_attrs', ()) - if all_attrs: - req_attrs = [a for a in ALL_ATTRS if a in all_attrs] - ill_attrs = [a for a in ALL_ATTRS if a not in all_attrs] - else: - req_attrs = getattr(self, 'req_attrs', ()) - ill_attrs = getattr(self, 'ill_attrs', ()) - - opt_attrs = getattr(self, 'opt_attrs', ()) - if opt_attrs: - ill_attrs = [a for a in ill_attrs if a not in opt_attrs] - for attr in req_attrs: - if attr_dict.get(attr, None) is None: - raise ValueError('%s expected %s' % (cn, attr)) - for attr in ill_attrs: - if attr_dict.get(attr, None) is not None: - raise ValueError('%s does not take %s' % (cn, attr)) - - avail_attrs = [a for a in ALL_ATTRS if a not in ill_attrs] - for attr in avail_attrs: - setattr(self, attr, attr_dict.get(attr, '')) - return True - - @classmethod - def from_match(cls, match): - kw = dict([(str(k), v.strip()) - for k, v in match.groupdict().items() - if v is not None and v.strip()]) - obj = cls(text=match.group(0), **kw) - obj.orig_match = match - return obj - - -class ReferenceTag(Tag): - all_attrs = ('refpath',) - opt_attrs = ('filters',) - - def to_dust_ast(self): - pork = get_path_or_key(self.refpath) - filters = ['filters'] - if self.filters: - f_list = self.filters.split('|')[1:] - for f in f_list: - filters.append(f) - return [['reference', pork, filters]] - - -class SectionTag(Tag): - ill_attrs = ('closing') - - -class ClosingTag(Tag): - all_attrs = ('closing', 'refpath') - - -class SpecialTag(Tag): - all_attrs = ('symbol', 'refpath') - - def to_dust_ast(self): - return [['special', self.refpath]] - - -class BlockTag(Tag): - all_attrs = ('symbol', 'refpath') - - -class PartialTag(Tag): - req_attrs = ('symbol', 'refpath', 'selfclosing') - - def __init__(self, **kw): - super(PartialTag, self).__init__(**kw) - self.subtokens = parse_inline(self.refpath) - - def to_dust_ast(self): - """ - 2014.05.09 - This brings compatibility to the more popular fork of Dust.js - from LinkedIn (v1.0) - - Adding in `params` so `partials` function like sections. - """ - context = ['context'] - contpath = self.contpath - if contpath: - context.append(get_path_or_key(contpath)) - - params = ['params'] - param_list = self.param_list - if param_list: - try: - params.extend(params_to_dust_ast(param_list)) - except ParseError as pe: - pe.token = self - raise - - # tying to make this more standardized - inline_body = inline_to_dust_ast(self.subtokens) - return [['partial', - inline_body, - context, - params, - ]] - - -def parse_inline(source): - if not source: - raise ParseError('empty inline token') - if source.startswith('"') and source.endswith('"'): - source = source[1:-1] - if not source: - return [BufferToken("")] - tokens = tokenize(source, inline=True) - return tokens - - -def inline_to_dust_ast(tokens): - if tokens and all(isinstance(t, BufferToken) for t in tokens): - body = ['literal', ''.join(t.text for t in tokens)] - else: - body = ['body'] - for b in tokens: - body.extend(b.to_dust_ast()) - return body - - -def params_to_kv(params_str): - ret = [] - new_k, v = None, None - p_str = params_str.strip() - k, _, tail = p_str.partition('=') - while tail: - tmp, _, tail = tail.partition('=') - tail = tail.strip() - if not tail: - v = tmp - else: - v, new_k = tmp.split() - ret.append((k.strip(), v.strip())) - k = new_k - return ret - - -def params_to_dust_ast(param_kv): - ret = [] - for k, v in param_kv: - try: - v_body = get_path_or_key(v) - except ValueError: - v_body = inline_to_dust_ast(parse_inline(v)) - ret.append(['param', ['literal', k], v_body]) - return ret - - -def get_tag(match, inline=False): - groups = match.groupdict() - symbol = groups['symbol'] - closing = groups['closing'] - refpath = groups['refpath'] - if closing: - tag_type = ClosingTag - elif symbol is None and refpath is not None: - tag_type = ReferenceTag - elif symbol in '#?^<+@%': - tag_type = SectionTag - elif symbol == '~': - tag_type = SpecialTag - elif symbol == ':': - tag_type = BlockTag - elif symbol == '>': - tag_type = PartialTag - else: - raise ParseError('invalid tag symbol: %r' % symbol) - if inline and tag_type not in (ReferenceTag, SpecialTag): - raise ParseError('invalid inline tag') - return tag_type.from_match(match) - - -def tokenize(source, inline=False): - tokens = [] - com_nocom = comment_re.split(source) - line_counts = [1] - - def _add_token(t): - # i wish i had nonlocal so bad - t.start_line = sum(line_counts) - line_counts.append(t.get_line_count()) - t.end_line = sum(line_counts) - tokens.append(t) - for cnc in com_nocom: - if not cnc: - continue - elif cnc.startswith('{!') and cnc.endswith('!}'): - _add_token(CommentToken(cnc[2:-2])) - continue - elif cnc.startswith('{`') and cnc.endswith('`}'): - _add_token(RawToken(cnc[2:-2])) - continue - prev_end = 0 - start = None - end = None - for match in node_re.finditer(cnc): - start, end = match.start(1), match.end(1) - if prev_end < start: - _add_token(BufferToken(cnc[prev_end:start])) - prev_end = end - try: - _add_token(get_tag(match, inline)) - except ParseError as pe: - pe.line_no = sum(line_counts) - raise - tail = cnc[prev_end:] - if tail: - _add_token(BufferToken(tail)) - return tokens - -######### -# PARSING -######### - - -class Section(object): - def __init__(self, start_tag=None, blocks=None): - if start_tag is None: - refpath = None - name = '' - else: - refpath = start_tag.refpath - name = start_tag.name - - self.refpath = refpath - self.name = name - self.start_tag = start_tag - self.blocks = blocks or [] - - def add(self, obj): - if type(obj) == Block: - self.blocks.append(obj) - else: - if not self.blocks: - self.blocks = [Block()] - self.blocks[-1].add(obj) - - def to_dict(self): - ret = {self.name: dict([(b.name, b.to_list()) for b in self.blocks])} - return ret - - def to_dust_ast(self): - symbol = self.start_tag.symbol - - pork = get_path_or_key(self.refpath) - - context = ['context'] - contpath = self.start_tag.contpath - if contpath: - context.append(get_path_or_key(contpath)) - - params = ['params'] - param_list = self.start_tag.param_list - if param_list: - try: - params.extend(params_to_dust_ast(param_list)) - except ParseError as pe: - pe.token = self - raise - - bodies = ['bodies'] - if self.blocks: - for b in reversed(self.blocks): - bodies.extend(b.to_dust_ast()) - - return [[symbol, - pork, - context, - params, - bodies]] - - -class Block(object): - def __init__(self, name='block'): - if not name: - raise ValueError('blocks need a name, not: %r' % name) - self.name = name - self.items = [] - - def add(self, item): - self.items.append(item) - - def to_list(self): - ret = [] - for i in self.items: - try: - ret.append(i.to_dict()) - except AttributeError: - ret.append(i) - return ret - - def _get_dust_body(self): - # for usage by root block in ParseTree - ret = [] - for i in self.items: - ret.extend(i.to_dust_ast()) - return ret - - def to_dust_ast(self): - name = self.name - body = ['body'] - dust_body = self._get_dust_body() - if dust_body: - body.extend(dust_body) - return [['param', - ['literal', name], - body]] - - -class ParseTree(object): - def __init__(self, root_block): - self.root_block = root_block - - def to_dust_ast(self): - ret = ['body'] - ret.extend(self.root_block._get_dust_body()) - return ret - - @classmethod - def from_tokens(cls, tokens): - root_sect = Section() - ss = [root_sect] # section stack - for token in tokens: - if type(token) == SectionTag: - new_s = Section(token) - ss[-1].add(new_s) - if not token.selfclosing: - ss.append(new_s) - elif type(token) == ClosingTag: - if len(ss) <= 1: - msg = 'closing tag before opening tag: %r' % token.text - raise ParseError(msg, token=token) - if token.name != ss[-1].name: - msg = ('improperly nested tags: %r does not close %r' % - (token.text, ss[-1].start_tag.text)) - raise ParseError(msg, token=token) - ss.pop() - elif type(token) == BlockTag: - if len(ss) <= 1: - msg = 'start block outside of a section: %r' % token.text - raise ParseError(msg, token=token) - new_b = Block(name=token.refpath) - ss[-1].add(new_b) - else: - ss[-1].add(token) - if len(ss) > 1: - raise ParseError('unclosed tag: %r' % ss[-1].start_tag.text, - token=ss[-1].start_tag) - return cls(root_sect.blocks[0]) - - @classmethod - def from_source(cls, src): - tokens = tokenize(src) - return cls.from_tokens(tokens) - - -############## -# Optimize AST -############## -DEFAULT_SPECIAL_CHARS = {'s': ' ', - 'n': '\n', - 'r': '\r', - 'lb': '{', - 'rb': '}'} - -DEFAULT_OPTIMIZERS = { - 'body': 'compact_buffers', - 'special': 'convert_special', - 'format': 'nullify', - 'comment': 'nullify'} - -for nsym in ('buffer', 'filters', 'key', 'path', 'literal', 'raw'): - DEFAULT_OPTIMIZERS[nsym] = 'noop' - -for nsym in ('#', '?', '^', '<', '+', '@', '%', 'reference', - 'partial', 'context', 'params', 'bodies', 'param'): - DEFAULT_OPTIMIZERS[nsym] = 'visit' - -UNOPT_OPTIMIZERS = dict(DEFAULT_OPTIMIZERS) -UNOPT_OPTIMIZERS.update({'format': 'noop', 'body': 'visit'}) - - -def escape(text, esc_func=json.dumps): - return esc_func(text) - - -class Optimizer(object): - def __init__(self, optimizers=None, special_chars=None): - if special_chars is None: - special_chars = DEFAULT_SPECIAL_CHARS - self.special_chars = special_chars - - if optimizers is None: - optimizers = DEFAULT_OPTIMIZERS - self.optimizers = dict(optimizers) - - def optimize(self, node): - # aka filter_node() - nsym = node[0] - optimizer_name = self.optimizers[nsym] - return getattr(self, optimizer_name)(node) - - def noop(self, node): - return node - - def nullify(self, node): - return None - - def convert_special(self, node): - return ['buffer', self.special_chars[node[1]]] - - def visit(self, node): - ret = [node[0]] - for n in node[1:]: - filtered = self.optimize(n) - if filtered: - ret.append(filtered) - return ret - - def compact_buffers(self, node): - ret = [node[0]] - memo = None - for n in node[1:]: - filtered = self.optimize(n) - if not filtered: - continue - if filtered[0] == 'buffer': - if memo is not None: - memo[1] += filtered[1] - else: - memo = filtered - ret.append(filtered) - else: - memo = None - ret.append(filtered) - return ret - - def __call__(self, node): - return self.optimize(node) - - -######### -# Compile -######### - - -ROOT_RENDER_TMPL = \ -'''def render(chk, ctx): - {body} - return {root_func_name}(chk, ctx) -''' - - -def _python_compile(source): - """ - Generates a Python `code` object (via `compile`). - - args: - source: (required) string of python code to be compiled - - this actually compiles the template to code - """ - try: - code = compile(source, '', 'single') - return code - except: - raise - - -def _python_exec(code, name, global_env=None): - """ - this loads a code object (generated via `_python_compile` - - args: - code: (required) code object (generate via `_python_compile`) - name: (required) the name of the function - - kwargs: - global_env: (default None): the environment - """ - if global_env is None: - global_env = {} - else: - global_env = dict(global_env) - if PY3: - exec(code, global_env) - else: - exec("exec code in global_env") - return global_env[name] - - -def python_string_to_code(python_string): - """ - utility function - used to compile python string functions to code object - - args: - ``python_string`` - """ - code = _python_compile(python_string) - return code - - -def python_string_to_function(python_string): - """ - utility function - used to compile python string functions for template loading/caching - - args: - ``python_string`` - """ - code = _python_compile(python_string) - function = _python_exec(code, name='render', global_env=None) - return function - - -class Compiler(object): - """ - Note: Compiler objects aren't really meant to be reused, - the class is just for namespacing and convenience. - """ - sections = {'#': 'section', - '?': 'exists', - '^': 'notexists'} - nodes = {'<': 'inline_partial', - '+': 'region', - '@': 'helper', - '%': 'pragma'} - - def __init__(self, env=None): - if env is None: - env = default_env - self.env = env - - self.bodies = {} - self.blocks = {} - self.block_str = '' - self.index = 0 - self.auto = self.env.autoescape_filter - - def compile(self, ast, name='render'): - python_source = self._gen_python(ast) - python_code = _python_compile(python_source) - python_func = _python_exec(python_code, name=name) - return (python_code, python_func) - - def _gen_python(self, ast): # ast to init? - lines = [] - c_node = self._node(ast) - - block_str = self._root_blocks() - - bodies = self._root_bodies() - lines.extend(bodies.splitlines()) - if block_str: - lines.extend(['', block_str, '']) - body = '\n '.join(lines) - - ret = ROOT_RENDER_TMPL.format(body=body, - root_func_name=c_node) - self.python_source = ret - return ret - - def _root_blocks(self): - if not self.blocks: - self.block_str = '' - return '' - self.block_str = 'ctx = ctx.shift_blocks(blocks)\n ' - pairs = ['"' + name + '": ' + fn for name, fn in self.blocks.items()] - return 'blocks = {' + ', '.join(pairs) + '}' - - def _root_bodies(self): - max_body = max(self.bodies.keys()) - ret = [''] * (max_body + 1) - for i, body in self.bodies.items(): - ret[i] = ('\ndef body_%s(chk, ctx):\n %sreturn chk%s\n' - % (i, self.block_str, body)) - return ''.join(ret) - - def _convert_special(self, node): - return ['buffer', self.special_chars[node[1]]] - - def _node(self, node): - ntype = node[0] - if ntype in self.sections: - stype = self.sections[ntype] - return self._section(node, stype) - elif ntype in self.nodes: - ntype = self.nodes[ntype] - cfunc = getattr(self, '_' + ntype, None) - if not callable(cfunc): - raise TypeError('unsupported node type: "%r"', node[0]) - return cfunc(node) - - def _body(self, node): - index = self.index - self.index += 1 # make into property, equal to len of bodies? - name = 'body_%s' % index - self.bodies[index] = self._parts(node) - return name - - def _parts(self, body): - parts = [] - for part in body[1:]: - parts.append(self._node(part)) - return ''.join(parts) - - def _raw(self, node): - return '.write(%r)' % node[1] - - def _buffer(self, node): - return '.write(%s)' % escape(node[1]) - - def _format(self, node): - return '.write(%s)' % escape(node[1] + node[2]) - - def _reference(self, node): - return '.reference(%s,ctx,%s)' % (self._node(node[1]), - self._node(node[2])) - - def _section(self, node, cmd): - return '.%s(%s,%s,%s,%s)' % (cmd, - self._node(node[1]), - self._node(node[2]), - self._node(node[4]), - self._node(node[3])) - - def _inline_partial(self, node): - bodies = node[4] - for param in bodies[1:]: - btype = param[1][1] - if btype == 'block': - self.blocks[node[1][1]] = self._node(param[2]) - return '' - return '' - - def _region(self, node): - """aka the plus sign ('+') block""" - tmpl = '.block(ctx.get_block(%s),%s,%s,%s)' - return tmpl % (escape(node[1][1]), - self._node(node[2]), - self._node(node[4]), - self._node(node[3])) - - def _helper(self, node): - return '.helper(%s,%s,%s,%s)' % (escape(node[1][1]), - self._node(node[2]), - self._node(node[4]), - self._node(node[3])) - - def _pragma(self, node): - pr_name = node[1][1] - pragma = self.env.pragmas.get(pr_name) - if not pragma or not callable(pragma): - return '' # TODO: raise? - raw_bodies = node[4] - bodies = {} - for rb in raw_bodies[1:]: - bodies[rb[1][1]] = rb[2] - - raw_params = node[3] - params = {} - for rp in raw_params[1:]: - params[rp[1][1]] = rp[2][1] - - try: - ctx = node[2][1][1] - except (IndexError, AttributeError): - ctx = None - - return pragma(self, ctx, bodies, params) - - def _partial(self, node): - """ - 2014.05.09 - This brings compatibility to the more popular fork of Dust.js - from LinkedIn (v1.0) - - Adding in `params` so `partials` function like sections. - updating call to .partial() to include the kwargs - - dust.js reference : - compile.nodes = { - partial: function(context, node) { - return '.partial(' + - compiler.compileNode(context, node[1]) + - ',' + compiler.compileNode(context, node[2]) + - ',' + compiler.compileNode(context, node[3]) + ')'; - }, - """ - if node[0] == 'body': - body_name = self._node(node[1]) - return '.partial(' + body_name + ', %s)' % self._node(node[2]) - return '.partial(%s, %s, %s)' % (self._node(node[1]), - self._node(node[2]), - self._node(node[3])) - - def _context(self, node): - contpath = node[1:] - if contpath: - return 'ctx.rebase(%s)' % (self._node(contpath[0])) - return 'ctx' - - def _params(self, node): - parts = [self._node(p) for p in node[1:]] - if parts: - return '{' + ','.join(parts) + '}' - return 'None' - - def _bodies(self, node): - parts = [self._node(p) for p in node[1:]] - return '{' + ','.join(parts) + '}' - - def _param(self, node): - return ':'.join([self._node(node[1]), self._node(node[2])]) - - def _filters(self, node): - ret = '"%s"' % self.auto - f_list = ['"%s"' % f for f in node[1:]] # repr? - if f_list: - ret += ',[%s]' % ','.join(f_list) - return ret - - def _key(self, node): - return 'ctx.get(%r)' % node[1] - - def _path(self, node): - cur = node[1] - keys = node[2] or [] - return 'ctx.get_path(%s, %s)' % (cur, keys) - - def _literal(self, node): - return escape(node[1]) - - -######### -# Runtime -######### - - -class UndefinedValueType(object): - def __repr__(self): - return self.__class__.__name__ + '()' - - def __str__(self): - return '' - - -UndefinedValue = UndefinedValueType() - -# Prerequisites for escape_url_path - - -def _make_quote_map(allowed_chars): - ret = {} - for i in range(256): - c = chr(i) - esc_c = c if c in allowed_chars else '%{0:02X}'.format(i) - ret[i] = ret[c] = esc_c - return ret - -# The unreserved URI characters (per RFC 3986) -_UNRESERVED_CHARS = (frozenset(string.ascii_letters) - | frozenset(string.digits) - | frozenset('-._~')) -_RESERVED_CHARS = frozenset(":/?#[]@!$&'()*+,;=") # not used -_PATH_RESERVED_CHARS = frozenset("?#") # not used - -_PATH_QUOTE_MAP = _make_quote_map(_UNRESERVED_CHARS | set('/?=&:#')) - -# Escapes/filters - - -def escape_uri_path(text, to_bytes=True): - # actually meant to run on path + query args + fragment - text = to_unicode(text) - if not to_bytes: - return unicode().join([_PATH_QUOTE_MAP.get(c, c) for c in text]) - try: - bytestr = text.encode('utf-8') - except UnicodeDecodeError: - bytestr = text - except: - raise ValueError('expected text or UTF-8 encoded bytes, not %r' % text) - return ''.join([_PATH_QUOTE_MAP[b] for b in bytestr]) - - -def escape_uri_component(text): - return (escape_uri_path(text) # calls to_unicode for us - .replace('/', '%2F') - .replace('?', '%3F') - .replace('=', '%3D') - .replace('&', '%26')) - - -def escape_html(text): - text = to_unicode(text) - # TODO: dust.js doesn't use this, but maybe we should: - # .replace("'", '&squot;') - return cgi.escape(text, True) - - -def escape_js(text): - text = to_unicode(text) - return (text - .replace('\\', '\\\\') - .replace('"', '\\"') - .replace("'", "\\'") - .replace('\r', '\\r') - .replace('\u2028', '\\u2028') - .replace('\u2029', '\\u2029') - .replace('\n', '\\n') - .replace('\f', '\\f') - .replace('\t', '\\t')) - - -def comma_num(val): - try: - return '{0:,}'.format(val) - except ValueError: - return to_unicode(val) - - -def pp_filter(val): - try: - return pprint.pformat(val) - except: - try: - return repr(val) - except: - return 'unreprable object %s' % object.__repr__(val) - - -JSON_PP_INDENT = 2 - - -def ppjson_filter(val): - "A best-effort pretty-printing filter, based on the JSON module" - try: - return json.dumps(val, indent=JSON_PP_INDENT, sort_keys=True) - except TypeError: - return to_unicode(val) - - -# Helpers - -def first_helper(chunk, context, bodies, params=None): - if context.stack.index > 0: - return chunk - if 'block' in bodies: - return bodies['block'](chunk, context) - return chunk - - -def last_helper(chunk, context, bodies, params=None): - if context.stack.index < context.stack.of - 1: - return chunk - if 'block' in bodies: - return bodies['block'](chunk, context) - return chunk - - -def sep_helper(chunk, context, bodies, params=None): - if context.stack.index == context.stack.of - 1: - return chunk - if 'block' in bodies: - return bodies['block'](chunk, context) - return chunk - - -def idx_helper(chunk, context, bodies, params=None): - if 'block' in bodies: - return bodies['block'](chunk, context.push(context.stack.index)) - return chunk - - -def idx_1_helper(chunk, context, bodies, params=None): - if 'block' in bodies: - return bodies['block'](chunk, context.push(context.stack.index + 1)) - return chunk - - -def size_helper(chunk, context, bodies, params): - try: - key = params['key'] - return chunk.write(unicode(len(key))) - except (KeyError, TypeError): - return chunk - - -def _sort_iterate_items(items, sort_key, direction): - if not items: - return items - reverse = False - if direction == 'desc': - reverse = True - if not sort_key: - sort_key = 0 - elif sort_key[0] == '$': - sort_key = sort_key[1:] - if sort_key == 'key': - sort_key = 0 - elif sort_key == 'value': - sort_key = 1 - else: - try: - sort_key = int(sort_key) - except: - sort_key = 0 - return sorted(items, key=lambda x: x[sort_key], reverse=reverse) - - -def iterate_helper(chunk, context, bodies, params): - params = params or {} - body = bodies.get('block') - sort = params.get('sort') - sort_key = params.get('sort_key') - target = params.get('key') - if not body or not target: - context.env.log('warn', 'helper.iterate', 'empty block or target') - return chunk - try: - iter(target) - except: - context.env.log('warn', 'helper.iterate', 'non-iterable target') - return chunk - try: - items = target.items() - is_dict = True - except: - items = target - is_dict = False - if sort: - try: - items = _sort_iterate_items(items, sort_key, direction=sort) - except: - context.env.log('warn', 'helper.iterate', 'failed to sort target') - return chunk - if is_dict: - for key, value in items: - body(chunk, context.push({'$key': key, - '$value': value, - '$type': type(value).__name__, - '$0': key, - '$1': value})) - else: - # all this is for iterating over tuples and the like - for values in items: - try: - key = values[0] - except: - key, value = None, None - else: - try: - value = values[1] - except: - value = None - new_scope = {'$key': key, - '$value': value, - '$type': type(value).__name__} - try: - for i, value in enumerate(values): - new_scope['$%s' % i] = value - except TypeError: - context.env.log('warn', 'helper.iterate', - 'unable to enumerate values') - return chunk - else: - body(chunk, context.push(new_scope)) - return chunk - - -def _do_compare(chunk, context, bodies, params, cmp_op): - "utility function used by @eq, @gt, etc." - params = params or {} - try: - body = bodies['block'] - key = params['key'] - value = params['value'] - typestr = params.get('type') - except KeyError: - context.env.log('warn', 'helper.compare', - 'comparison missing key/value') - return chunk - rkey = _resolve_value(key, chunk, context) - if not typestr: - typestr = _COERCE_REV_MAP.get(type(rkey), 'string') - rvalue = _resolve_value(value, chunk, context) - crkey, crvalue = _coerce(rkey, typestr), _coerce(rvalue, typestr) - if isinstance(crvalue, type(crkey)) and cmp_op(crkey, crvalue): - return chunk.render(body, context) - elif 'else' in bodies: - return chunk.render(bodies['else'], context) - return chunk - - -def _resolve_value(item, chunk, context): - if not callable(item): - return item - try: - return chunk.tap_render(item, context) - except TypeError: - if getattr(context, 'is_strict', None): - raise - return item - - -_COERCE_MAP = { - 'number': float, - 'string': unicode, - 'boolean': bool, -} # Not implemented: date, context -_COERCE_REV_MAP = dict([(v, k) for k, v in _COERCE_MAP.items()]) -_COERCE_REV_MAP[int] = 'number' -try: - _COERCE_REV_MAP[long] = 'number' -except NameError: - pass - - -def _coerce(value, typestr): - coerce_type = _COERCE_MAP.get(typestr.lower()) - if not coerce_type or isinstance(value, coerce_type): - return value - if isinstance(value, string_types): - try: - value = json.loads(value) - except (TypeError, ValueError): - pass - try: - return coerce_type(value) - except (TypeError, ValueError): - return value - - -def _make_compare_helpers(): - from functools import partial - from operator import eq, ne, lt, le, gt, ge - CMP_MAP = {'eq': eq, 'ne': ne, 'gt': gt, 'lt': lt, 'gte': ge, 'lte': le} - ret = {} - for name, op in CMP_MAP.items(): - ret[name] = partial(_do_compare, cmp_op=op) - return ret - - -DEFAULT_HELPERS = {'first': first_helper, - 'last': last_helper, - 'sep': sep_helper, - 'idx': idx_helper, - 'idx_1': idx_1_helper, - 'size': size_helper, - 'iterate': iterate_helper} -DEFAULT_HELPERS.update(_make_compare_helpers()) - - -def make_base(env, stack, global_vars=None): - """`make_base( env, stack, global_vars=None )` - `env` and `stack` are required by the Python implementation. - `global_vars` is optional. set to global_vars. - - 2014.05.09 - This brings compatibility to the more popular fork of Dust.js - from LinkedIn (v1.0) - - adding this to try and create compatibility with Dust - - this is used for the non-activated alternative approach of rendering a - partial with a custom context object - - dust.makeBase = function(global) { - return new Context(new Stack(), global); - }; - """ - return Context(env, stack, global_vars) - - -# Actual runtime objects - -class Context(object): - """\ - The context is a special object that handles variable lookups and - controls template behavior. It is the interface between your - application logic and your templates. The context can be - visualized as a stack of objects that grows as we descend into - nested sections. - - When looking up a key, Dust searches the context stack from the - bottom up. There is no need to merge helper functions into the - template data; instead, create a base context onto which you can - push your local template data. - """ - def __init__(self, env, stack, global_vars=None, blocks=None): - self.env = env - self.stack = stack - if global_vars is None: - global_vars = {} - self.globals = global_vars - self.blocks = blocks - - @classmethod - def wrap(cls, env, context): - if isinstance(context, cls): - return context - return cls(env, Stack(context)) - - def get(self, path, cur=False): - "Retrieves the value `path` as a key from the context stack." - if isinstance(path, (str, unicode)): - if path[0] == '.': - cur = True - path = path[1:] - path = path.split('.') - return self._get(cur, path) - - def get_path(self, cur, down): - return self._get(cur, down) - - def _get(self, cur, down): - # many thanks to jvanasco for his contribution -mh 2014 - """ - * Get a value from the context - * @method `_get` - * @param {boolean} `cur` Get only from the current context - * @param {array} `down` An array of each step in the path - * @private - * @return {string | object} - """ - ctx = self.stack - length = 0 if not down else len(down) # TODO: try/except? - - if not length: - # wants nothing? ok, send back the entire payload - return ctx.head - - first_path_element = down[0] - - value = UndefinedValue - - if cur and not length: - ctx = ctx.head - else: - if not cur: - # Search up the stack for the first_path_element value - while ctx: - if isinstance(ctx.head, dict): - if first_path_element in ctx.head: - value = ctx.head[first_path_element] - break - ctx = ctx.tail - if value is UndefinedValue: - if first_path_element in self.globals: - ctx = self.globals[first_path_element] - else: - ctx = UndefinedValue - else: - ctx = value - else: - # if scope is limited by a leading dot, don't search up tree - if first_path_element in ctx.head: - ctx = ctx.head[first_path_element] - else: - ctx = UndefinedValue - - i = 1 - while ctx and ctx is not UndefinedValue and i < length: - if down[i] in ctx: - ctx = ctx[down[i]] - else: - ctx = UndefinedValue - i += 1 - - if ctx is UndefinedValue: - return None - else: - return ctx - - def push(self, head, index=None, length=None): - """\ - Pushes an arbitrary value `head` onto the context stack and returns - a new `Context` instance. Specify `index` and/or `length` to enable - enumeration helpers.""" - return Context(self.env, - Stack(head, self.stack, index, length), - self.globals, - self.blocks) - - def rebase(self, head): - """\ - Returns a new context instance consisting only of the value at - `head`, plus any previously defined global object.""" - return Context(self.env, - Stack(head), - self.globals, - self.blocks) - - def current(self): - """Returns the head of the context stack.""" - return self.stack.head - - def get_block(self, key): - blocks = self.blocks - if not blocks: - return None - fn = None - for block in blocks[::-1]: - try: - fn = block[key] - if fn: - break - except KeyError: - continue - return fn - - def shift_blocks(self, local_vars): - blocks = self.blocks - if local_vars: - if blocks: - new_blocks = blocks + [local_vars] - else: - new_blocks = [local_vars] - return Context(self.env, self.stack, self.globals, new_blocks) - return self - - -class Stack(object): - def __init__(self, head, tail=None, index=None, length=None): - self.head = head - self.tail = tail - self.index = index or 0 - self.of = length or 1 - # self.is_object = is_scalar(head) - - def __repr__(self): - return 'Stack(%r, %r, %r, %r)' % (self.head, - self.tail, - self.index, - self.of) - - -class Stub(object): - def __init__(self, callback): - self.head = Chunk(self) - self.callback = callback - self._out = [] - - @property - def out(self): - return ''.join(self._out) - - def flush(self): - chunk = self.head - while chunk: - if chunk.flushable: - self._out.append(chunk.data) - elif chunk.error: - self.callback(chunk.error, '') - self.flush = lambda self: None - return - else: - return - self.head = chunk = chunk.next - self.callback(None, self.out) - - -class Stream(object): - def __init__(self): - self.head = Chunk(self) - self.events = {} - - def flush(self): - chunk = self.head - while chunk: - if chunk.flushable: - self.emit('data', chunk.data) - elif chunk.error: - self.emit('error', chunk.error) - self.flush = lambda self: None - return - else: - return - self.head = chunk = chunk.next - self.emit('end') - - def emit(self, etype, data=None): - try: - self.events[etype](data) - except KeyError: - pass - - def on(self, etype, callback): - self.events[etype] = callback - return self - - -def is_scalar(obj): - return not hasattr(obj, '__iter__') or isinstance(obj, string_types) - - -def is_empty(obj): - try: - return obj is None or obj is False or len(obj) == 0 - except TypeError: - return False - - -class Chunk(object): - """\ - A Chunk is a Dust primitive for controlling the flow of the - template. Depending upon the behaviors defined in the context, - templates may output one or more chunks during rendering. A - handler that writes to a chunk directly must return the modified - chunk. - """ - def __init__(self, root, next_chunk=None, taps=None): - self.root = root - self.next = next_chunk - self.taps = taps - self._data, self.data = [], '' - self.flushable = False - self.error = None - - def write(self, data): - "Writes data to this chunk's buffer" - if self.taps: - data = self.taps.go(data) - self._data.append(data) - return self - - def end(self, data=None): - """\ - Writes data to this chunk's buffer and marks it as flushable. This - method must be called on any chunks created via chunk.map. Do - not call this method on a handler's main chunk -- dust.render - and dust.stream take care of this for you. - """ - if data: - self.write(data) - self.data = ''.join(self._data) - self.flushable = True - self.root.flush() - return self - - def map(self, callback): - """\ - Creates a new chunk and passes it to `callback`. Use map to wrap - asynchronous functions and to partition the template for - streaming. chunk.map tells Dust to manufacture a new chunk, - reserving a slot in the output stream before continuing on to - render the rest of the template. You must (eventually) call - chunk.end() on a mapped chunk to weave its content back into - the stream. - """ - cursor = Chunk(self.root, self.next, self.taps) - branch = Chunk(self.root, cursor, self.taps) - self.next = branch - self.data = ''.join(self._data) - self.flushable = True - callback(branch) - return cursor - - def tap(self, tap): - "Convenience methods for applying filters to a stream." - if self.taps: - self.taps = self.taps.push(tap) - else: - self.taps = Tap(tap) - return self - - def untap(self): - "Convenience methods for applying filters to a stream." - self.taps = self.taps.tail - return self - - def render(self, body, context): - """\ - Renders a template block, such as a default block or an else - block. Basically equivalent to body(chunk, context). - """ - return body(self, context) - - def tap_render(self, body, context): - output = [] - - def tmp_tap(data): - if data: - output.append(data) - return '' - self.tap(tmp_tap) - try: - self.render(body, context) - finally: - self.untap() - return ''.join(output) - - def reference(self, elem, context, auto, filters=None): - """\ - These methods implement Dust's default behavior for keys, - sections, blocks, partials and context helpers. While it is - unlikely you'll need to modify these methods or invoke them - from within handlers, the source code may be a useful point of - reference for developers. - """ - if callable(elem): - # this whole callable thing is a quirky thing about dust - try: - elem = elem(self, context) - except TypeError: - if getattr(context, 'is_strict', None): - raise - elem = repr(elem) - else: - if isinstance(elem, Chunk): - return elem - if is_empty(elem): - return self - else: - filtered = context.env.apply_filters(elem, auto, filters) - return self.write(filtered) - - def section(self, elem, context, bodies, params=None): - """\ - These methods implement Dust's default behavior for keys, sections, - blocks, partials and context helpers. While it is unlikely you'll need - to modify these methods or invoke them from within handlers, the - source code may be a useful point of reference for developers.""" - if callable(elem): - try: - elem = elem(self, context, bodies, params) - except TypeError: - if getattr(context, 'is_strict', None): - raise - elem = repr(elem) - else: - if isinstance(elem, Chunk): - return elem - body = bodies.get('block') - else_body = bodies.get('else') - if params: - context = context.push(params) - if not elem and else_body and elem is not 0: - # breaks with dust.js; dust.js doesn't render else blocks - # on sections referencing empty lists. - return else_body(self, context) - - if not body or elem is None: - return self - if elem is True: - return body(self, context) - elif isinstance(elem, dict) or is_scalar(elem): - return body(self, context.push(elem)) - else: - chunk = self - length = len(elem) - head = context.stack.head - for i, el in enumerate(elem): - new_ctx = context.push(el, i, length) - new_ctx.globals.update({'$len': length, - '$idx': i, - '$idx_1': i + 1}) - chunk = body(chunk, new_ctx) - return chunk - - def exists(self, elem, context, bodies, params=None): - """\ - These methods implement Dust's default behavior for keys, sections, - blocks, partials and context helpers. While it is unlikely you'll need - to modify these methods or invoke them from within handlers, the - source code may be a useful point of reference for developers.""" - if not is_empty(elem): - if bodies.get('block'): - return bodies['block'](self, context) - elif bodies.get('else'): - return bodies['else'](self, context) - return self - - def notexists(self, elem, context, bodies, params=None): - """\ - These methods implement Dust's default behavior for keys, - sections, blocks, partials and context helpers. While it is - unlikely you'll need to modify these methods or invoke them - from within handlers, the source code may be a useful point of - reference for developers. - """ - if is_empty(elem): - if bodies.get('block'): - return bodies['block'](self, context) - elif bodies.get('else'): - return bodies['else'](self, context) - return self - - def block(self, elem, context, bodies, params=None): - """\ - These methods implement Dust's default behavior for keys, - sections, blocks, partials and context helpers. While it is - unlikely you'll need to modify these methods or invoke them - from within handlers, the source code may be a useful point of - reference for developers. - """ - body = bodies.get('block') - if elem: - body = elem - if body: - body(self, context) - return self - - def partial(self, elem, context, params=None): - """These methods implement Dust's default behavior for keys, sections, - blocks, partials and context helpers. While it is unlikely you'll need - to modify these methods or invoke them from within handlers, the - source code may be a useful point of reference for developers. - """ - if params: - context = context.push(params) - if callable(elem): - _env = context.env - cback = lambda name, chk: _env.load_chunk(name, chk, context).end() - return self.capture(elem, context, cback) - return context.env.load_chunk(elem, self, context) - - def helper(self, name, context, bodies, params=None): - """\ - These methods implement Dust's default behavior for keys, - sections, blocks, partials and context helpers. While it is - unlikely you'll need to modify these methods or invoke them - from within handlers, the source code may be a useful point of - reference for developers. - """ - return context.env.helpers[name](self, context, bodies, params) - - def capture(self, body, context, callback): - def map_func(chunk): - def stub_cb(err, out): - if err: - chunk.set_error(err) - else: - callback(out, chunk) - stub = Stub(stub_cb) - body(stub.head, context).end() - return self.map(map_func) - - def set_error(self, error): - "Sets an error on this chunk and immediately flushes the output." - self.error = error - self.root.flush() - return self - - -class Tap(object): - def __init__(self, head=None, tail=None): - self.head = head - self.tail = tail - - def push(self, tap): - return Tap(tap, self) - - def go(self, value): - tap = self - while tap: - value = tap.head(value) # TODO: type errors? - tap = tap.tail - return value - - def __repr__(self): - cn = self.__class__.__name__ - return '%s(%r, %r)' % (cn, self.head, self.tail) - - -def to_unicode(obj): - try: - return unicode(obj) - except UnicodeDecodeError: - return unicode(obj, encoding='utf8') - - -DEFAULT_FILTERS = { - 'h': escape_html, - 's': to_unicode, - 'j': escape_js, - 'u': escape_uri_path, - 'uc': escape_uri_component, - 'cn': comma_num, - 'pp': pp_filter, - 'ppjson': ppjson_filter} - - -######### -# Pragmas -######### - - -def esc_pragma(compiler, context, bodies, params): - old_auto = compiler.auto - if not context: - context = 'h' - if context == 's': - compiler.auto = '' - else: - compiler.auto = context - out = compiler._parts(bodies['block']) - compiler.auto = old_auto - return out - - -DEFAULT_PRAGMAS = { - 'esc': esc_pragma -} - - -########### -# Interface -########### - -def load_template_path(path, encoding='utf-8'): - """ - split off `from_path` so __init__ can use - returns a tuple of the source and adjusted absolute path - """ - abs_path = os.path.abspath(path) - if not os.path.isfile(abs_path): - raise TemplateNotFound(abs_path) - with codecs.open(abs_path, 'r', encoding) as f: - source = f.read() - return (source, abs_path) - - -class Template(object): - # no need to set defaults on __init__ - last_mtime = None - is_convertable = True - - def __init__(self, - name, - source, - source_file=None, - optimize=True, - keep_source=True, - env=None, - lazy=False, - ): - if not source and source_file: - (source, source_abs_path) = load_template_path(source_file) - self.name = name - self.source = source - self.source_file = source_file - self.time_generated = time.time() - if source_file: - self.last_mtime = os.path.getmtime(source_file) - self.optimized = optimize - if env is None: - env = default_env - self.env = env - - if lazy: # lazy is only for testing - self.render_func = None - return - (render_code, - self.render_func - ) = self._get_render_func(optimize) - if not keep_source: - self.source = None - - @classmethod - def from_path(cls, path, name=None, encoding='utf-8', **kw): - """classmethod. - Builds a template from a filepath. - args: - ``path`` - kwargs: - ``name`` default ``None``. - ``encoding`` default ``utf-8``. - """ - (source, abs_path) = load_template_path(path) - if not name: - name = path - return cls(name=name, source=source, source_file=abs_path, **kw) - - @classmethod - def from_ast(cls, ast, name=None, **kw): - """classmethod - Builds a template from an AST representation. - This is only provided as an invert to `to_ast` - args: - ``ast`` - kwargs: - ``name`` default ``None``. - """ - template = cls(name=name, source='', lazy=True, **kw) - (render_code, - render_func - ) = template._ast_to_render_func(ast) - template.render_func = render_func - template.is_convertable = False - return template - - @classmethod - def from_python_string(cls, python_string, name=None, **kw): - """classmethod - Builds a template from an python string representation. - This is only provided as an invert to `to_python_string` - args: - ``python_string`` - kwargs: - ``name`` default ``None``. - """ - template = cls(name=name, source='', lazy=True, **kw) - render_code = _python_compile(python_string) - template.render_func = _python_exec(render_code, name='render') - template.is_convertable = False - return template - - @classmethod - def from_python_code(cls, python_code, name=None, **kw): - """classmethod - Builds a template from python code object. - This is only provided as an invert to `to_python_code` - args: - ``python_code`` - kwargs: - ``name`` default ``None``. - """ - template = cls(name=name, source='', lazy=True, **kw) - template.render_func = _python_exec(python_code, name='render') - template.is_convertable = False - return template - - @classmethod - def from_python_func(cls, python_func, name=None, **kw): - """classmethod - Builds a template from an compiled python function. - This is only provided as an invert to `to_python_func` - args: - ``python_func`` - kwargs: - ``name`` default ``None``. - """ - template = cls(name=name, source='', lazy=True, **kw) - template.render_func = python_func - template.is_convertable = False - return template - - def to_ast(self, optimize=True, raw=False): - """Generates the AST for a given template. - This can be inverted with the classmethod `from_ast`. - - kwargs: - ``optimize`` default ``True``. - ``raw`` default ``False``. - - Note: this is just a public function for `_get_ast` - """ - if not self.is_convertable: - raise TemplateConversionException() - return self._get_ast(optimize=optimize, raw=raw) - - def to_python_string(self, optimize=True): - """Generates the Python string representation for a template. - This can be inverted with the classmethod `from_python_string`. - - kwargs: - ``optimize`` default ``True``. - - Note: this is just a public method for `_get_render_string` - """ - if not self.is_convertable: - raise TemplateConversionException() - python_string = self._get_render_string(optimize=optimize) - return python_string - - def to_python_code(self, optimize=True): - """Generates the Python code representation for a template. - This can be inverted with the classmethod `from_python_code`. - - kwargs: - ``optimize`` default ``True``. - - Note: this is just a public method for `_get_render_func` - """ - if not self.is_convertable: - raise TemplateConversionException() - (python_code, - python_string - ) = self._get_render_func(optimize=optimize) - return python_code - - def to_python_func(self, optimize=True): - """Makes the python render func available. - This can be inverted with the classmethod `from_python_func`. - - Note: this is just a public method for `_get_render_func` - """ - if self.render_func: - return self.render_func - if not self.is_convertable: - raise TemplateConversionException() - (render_code, render_func) = self._get_render_func(optimize=optimize) - return render_func - - def render(self, model, env=None): - env = env or self.env - rendered = [] - - def tmp_cb(err, result): - # TODO: get rid of - if err: - print('Error on template %r: %r' % (self.name, err)) - raise RenderException(err) - else: - rendered.append(result) - return result - - chunk = Stub(tmp_cb).head - self.render_chunk(chunk, Context.wrap(env, model)).end() - return rendered[0] - - def render_chunk(self, chunk, context): - if not self.render_func: - # to support laziness for testing - (render_code, - self.render_func - ) = self._get_render_func() - return self.render_func(chunk, context) - - def _get_tokens(self): - if not self.source: - return None - return tokenize(self.source) - - def _get_ast(self, optimize=False, raw=False): - if not self.source: - return None - try: - dast = ParseTree.from_source(self.source).to_dust_ast() - except ParseError as pe: - pe.source_file = self.source_file - raise - if raw: - return dast - return self.env.filter_ast(dast, optimize) - - def _get_render_string(self, optimize=True): - """ - Uses `optimize=True` by default because it makes the output easier to - read and more like dust's docs - - This was previously `_get_render_func(..., ret_str=True)` - """ - ast = self._get_ast(optimize) - if not ast: - return None - # for testing/dev purposes - return Compiler(self.env)._gen_python(ast) - - def _get_render_func(self, optimize=True, ret_str=False): - """ - Uses `optimize=True` by default because it makes the output easier to - read and more like dust's docs - - split `ret_str=True` into `_get_render_string()` - - Note that this doesn't save the render_code/render_func. - It is compiled as needed. - """ - ast = self._get_ast(optimize) - if not ast: - return (None, None) - # consolidated the original code into _ast_to_render_func as-is below - (render_code, - render_func - ) = self._ast_to_render_func(ast) - return (render_code, render_func) - - def _ast_to_render_func(self, ast): - """this was part of ``_get_render_func`` but is better implemented - as an separate function so that AST can be directly loaded. - """ - compiler = Compiler(self.env) - (python_code, - python_func - ) = compiler.compile(ast) - return (python_code, python_func) - - def __repr__(self): - cn = self.__class__.__name__ - name, source_file = self.name, self.source_file - if not source_file: - return '<%s name=%r>' % (cn, name) - return '<%s name=%r source_file=%r>' % (cn, name, source_file) - - -class AshesException(Exception): - pass - - -class TemplateNotFound(AshesException): - def __init__(self, name): - self.name = name - super(TemplateNotFound, self).__init__('could not find template: %r' - % name) - - -class RenderException(AshesException): - pass - - -class ParseError(AshesException): - token = None - source_file = None - - def __init__(self, message, line_no=None, token=None): - self.message = message - self.token = token - self._line_no = line_no - - super(ParseError, self).__init__(self.__str__()) - - @property - def line_no(self): - if self._line_no: - return self._line_no - if getattr(self.token, 'start_line', None) is not None: - return self.token.start_line - return None - - @line_no.setter - def set_line_no(self, val): - self._line_no = val - - def __str__(self): - msg = self.message - infos = [] - if self.source_file: - infos.append('in %s' % self.source_file) - if self.line_no is not None: - infos.append('line %s' % self.line_no) - if infos: - msg += ' (%s)' % ' - '.join(infos) - return msg - - -class TemplateConversionException(AshesException): - def __init__(self): - super(TemplateConversionException, self).__init__('only templates from source ' - 'are convertable') - - -class BaseAshesEnv(object): - template_type = Template - autoescape_filter = 'h' - - def __init__(self, - loaders=None, - helpers=None, - filters=None, - special_chars=None, - optimizers=None, - pragmas=None, - auto_reload=True): - self.templates = {} - self.loaders = list(loaders or []) - self.filters = dict(DEFAULT_FILTERS) - if filters: - self.filters.update(filters) - self.helpers = dict(DEFAULT_HELPERS) - if helpers: - self.helpers.update(helpers) - self.special_chars = dict(DEFAULT_SPECIAL_CHARS) - if special_chars: - self.special_chars.update(special_chars) - self.optimizers = dict(DEFAULT_OPTIMIZERS) - if optimizers: - self.optimizers.update(optimizers) - self.pragmas = dict(DEFAULT_PRAGMAS) - if pragmas: - self.pragmas.update(pragmas) - self.auto_reload = auto_reload - - def log(self, level, name, message): - return # print(level, '-', name, '-', message) - - def render(self, name, model): - tmpl = self.load(name) - return tmpl.render(model, self) - - def load(self, name): - """Loads a template. - - args: - ``name`` template name - """ - try: - template = self.templates[name] - except KeyError: - template = self._load_template(name) - self.register(template) - if self.auto_reload: - if not getattr(template, 'source_file', None): - return template - mtime = os.path.getmtime(template.source_file) - if mtime > template.last_mtime: - template = self._load_template(name) - self.register(template) - return self.templates[name] - - def _load_template(self, name): - for loader in self.loaders: - try: - source = loader.load(name, env=self) - except TemplateNotFound: - continue - else: - return source - raise TemplateNotFound(name) - - def load_all(self, do_register=True, **kw): - """Loads all templates. - - args: - ``do_register`` default ``True` - """ - all_tmpls = [] - for loader in reversed(self.loaders): - # reversed so the first loader to have a template - # will take precendence on registration - if callable(getattr(loader, 'load_all', None)): - tmpls = loader.load_all(self, **kw) - all_tmpls.extend(tmpls) - if do_register: - for t in tmpls: - self.register(t) - return all_tmpls - - def register(self, template, name=None): - if name is None: - name = template.name - self.templates[name] = template - return - - def register_path(self, path, name=None, **kw): - """\ - Reads in, compiles, and registers a single template from a specific - path to a file containing the dust source code. - """ - kw['env'] = self - ret = self.template_type.from_path(path=path, name=name, **kw) - self.register(ret) - return ret - - def register_source(self, name, source, **kw): - """\ - Compiles and registers a single template from source code - string. Assumes caller already decoded the source string. - """ - kw['env'] = self - ret = self.template_type(name=name, source=source, **kw) - self.register(ret) - return ret - - def filter_ast(self, ast, optimize=True): - if optimize: - optimizers = self.optimizers - else: - optimizers = UNOPT_OPTIMIZERS - optimizer = Optimizer(optimizers, self.special_chars) - ret = optimizer.optimize(ast) - return ret - - def apply_filters(self, string, auto, filters): - filters = filters or [] - if not filters: - if auto: - filters = ['s', auto] - else: - filters = ['s'] - elif filters[-1] != 's': - if auto and auto not in filters: - filters += ['s', auto] - else: - filters += ['s'] - for f in filters: - filt_fn = self.filters.get(f) - if filt_fn: - string = filt_fn(string) - return string - - def load_chunk(self, name, chunk, context): - try: - tmpl = self.load(name) - except TemplateNotFound as tnf: - context.env.log('error', 'load_chunk', - 'TemplateNotFound error: %r' % tnf.name) - return chunk.set_error(tnf) - return tmpl.render_chunk(chunk, context) - - def __iter__(self): - return self.templates.itervalues() - - -class AshesEnv(BaseAshesEnv): - """ - A slightly more accessible Ashes environment, with more - user-friendly options exposed. - """ - def __init__(self, paths=None, keep_whitespace=True, *a, **kw): - if isinstance(paths, string_types): - paths = [paths] - self.paths = list(paths or []) - self.keep_whitespace = keep_whitespace - self.is_strict = kw.pop('is_strict', False) - exts = list(kw.pop('exts', DEFAULT_EXTENSIONS)) - - super(AshesEnv, self).__init__(*a, **kw) - - for path in self.paths: - tpl = TemplatePathLoader(path, exts) - self.loaders.append(tpl) - - def filter_ast(self, ast, optimize=None): - optimize = not self.keep_whitespace # preferences override - return super(AshesEnv, self).filter_ast(ast, optimize) - - -def iter_find_files(directory, patterns, ignored=None): - """\ - Finds files under a `directory`, matching `patterns` using "glob" - syntax (e.g., "*.txt"). It's also possible to ignore patterns with - the `ignored` argument, which uses the same format as `patterns. - - (from osutils.py in the boltons package) - """ - if isinstance(patterns, string_types): - patterns = [patterns] - pats_re = re.compile('|'.join([fnmatch.translate(p) for p in patterns])) - - if not ignored: - ignored = [] - elif isinstance(ignored, string_types): - ignored = [ignored] - ign_re = re.compile('|'.join([fnmatch.translate(p) for p in ignored])) - for root, dirs, files in os.walk(directory): - for basename in files: - if pats_re.match(basename): - if ignored and ign_re.match(basename): - continue - filename = os.path.join(root, basename) - yield filename - return - - -def walk_ext_matches(path, exts=None, ignored=None): - if exts is None: - exts = DEFAULT_EXTENSIONS - if ignored is None: - ignored = DEFAULT_IGNORED_PATTERNS - patterns = list(['*.' + e.lstrip('*.') for e in exts]) - - return sorted(iter_find_files(directory=path, - patterns=patterns, - ignored=ignored)) - - -class TemplatePathLoader(object): - def __init__(self, root_path, exts=None, encoding='utf-8'): - self.root_path = os.path.normpath(root_path) - self.encoding = encoding - self.exts = exts or list(DEFAULT_EXTENSIONS) - - def load(self, path, env=None): - env = env or default_env - norm_path = os.path.normpath(path) - if path.startswith('../'): - raise ValueError('no traversal above loader root path: %r' % path) - if not path.startswith(self.root_path): - norm_path = os.path.join(self.root_path, norm_path) - abs_path = os.path.abspath(norm_path) - template_name = os.path.relpath(abs_path, self.root_path) - template_type = env.template_type - return template_type.from_path(name=template_name, - path=abs_path, - encoding=self.encoding, - env=env) - - def load_all(self, env, exts=None, **kw): - ret = [] - exts = exts or self.exts - tmpl_paths = walk_ext_matches(self.root_path, exts) - for tmpl_path in tmpl_paths: - ret.append(self.load(tmpl_path, env)) - return ret - - -class FlatteningPathLoader(TemplatePathLoader): - """ - I've seen this mode of using dust templates in a couple places, - but really it's lazy and too ambiguous. It increases the chances - of silent conflicts and makes it hard to tell which templates refer - to which just by looking at the template code. - """ - def __init__(self, *a, **kw): - self.keep_ext = kw.pop('keep_ext', True) - super(FlatteningPathLoader, self).__init__(*a, **kw) - - def load(self, *a, **kw): - tmpl = super(FlatteningPathLoader, self).load(*a, **kw) - name = os.path.basename(tmpl.name) - if not self.keep_ext: - name, ext = os.path.splitext(name) - tmpl.name = name - return tmpl - -try: - import bottle -except ImportError: - pass -else: - class AshesBottleTemplate(bottle.BaseTemplate): - extensions = list(bottle.BaseTemplate.extensions) - extensions.extend(['ash', 'ashes', 'dust']) - - def prepare(self, **options): - if not self.source: - self.source = self._load_source(self.name) - if self.source is None: - raise TemplateNotFound(self.name) - - options['name'] = self.name - options['source'] = self.source - options['source_file'] = self.filename - for key in ('optimize', 'keep_source', 'env'): - if key in self.settings: - options.setdefault(key, self.settings[key]) - env = self.settings.get('env', default_env) - # I truly despise 2.6.4's unicode kwarg bug - options = dict([(str(k), v) for k, v in options.iteritems()]) - self.tpl = env.register_source(**options) - - def _load_source(self, name): - fname = self.search(name, self.lookup) - if not fname: - return - with codecs.open(fname, "rb", self.encoding) as f: - return f.read() - - def render(self, *a, **kw): - for dictarg in a: - kw.update(dictarg) - context = self.defaults.copy() - context.update(kw) - return self.tpl.render(context) - - from functools import partial as _fp - ashes_bottle_template = _fp(bottle.template, - template_adapter=AshesBottleTemplate) - ashes_bottle_view = _fp(bottle.view, - template_adapter=AshesBottleTemplate) - del bottle - del _fp - - -ashes = default_env = AshesEnv() - - -def _main(): - # TODO: accidentally unclosed tags may consume - # trailing buffers without warning - try: - tmpl = ('{@eq key=hello value="True" type="boolean"}' - '{hello}, world' - '{:else}' - 'oh well, world' - '{/eq}' - ', {@size key=hello/} characters') - ashes.register_source('hi', tmpl) - print(ashes.render('hi', {'hello': 'ayy'})) - except Exception as e: - import pdb;pdb.post_mortem() - raise - - ae = AshesEnv(filters={'cn': comma_num}) - ae.register_source('cn_tmpl', 'comma_numd: {thing|cn}') - # print(ae.render('cn_tmpl', {'thing': 21000})) - ae.register_source('tmpl', '{`{ok}thing`}') - print(ae.render('tmpl', {'thing': 21000})) - - ae.register_source('tmpl2', '{test|s}') - out = ae.render('tmpl2', {'test': [''] * 10}) - print(out) - - ae.register_source('tmpl3', '{@iterate sort="desc" sort_key=1 key=lol}' - '{$idx} - {$0}: {$1}{~n}{/iterate}') - out = ae.render('tmpl3', {'lol': {'uno': 1, 'dos': 2}}) - print(out) - out = ae.render('tmpl3', {'lol': [(1, 2, 3), (4, 5, 6)]}) - print(out) - - print(escape_uri_path("https://en.wikipedia.org/wiki/Asia's_Next_Top_Model_(cycle_3)")) - print(escape_uri_component("https://en.wikipedia.org/wiki/Asia's_Next_Top_Model_(cycle_3)")) - print('') - ae.register_source('tmpl4', '{#iterable}{$idx_1}/{$len}: {.}{@sep}, {/sep}{/iterable}') - out = ae.render('tmpl4', {'iterable': range(100, 108)}) - print(out) - - tmpl = '''\ - {#.} - row{~n} - {#.} - {.}{~n} - {/.} - {/.}''' - ashes.keep_whitespace = False - ashes.autoescape_filter = '' - ashes.register_source('nested_lists', tmpl) - print(ashes.render('nested_lists', [[1, 2], [3, 4]])) - - -class CLIError(ValueError): - pass - - -def _simple_render(template_path, template_literal, env_path_list, - model_path, model_literal, - trim_whitespace, filter, no_filter, - output_path, output_encoding, verbose): - # TODO: default value (placeholder for missing values) - env = AshesEnv(env_path_list) - env.keep_whitespace = not trim_whitespace - if filter in env.filters: - env.autoescape_filter = filter - else: - raise CLIError('unexpected filter %r, expected one of %r' - % (filter, env.filters)) - if no_filter: - env.autoescape_filter = '' - - if template_literal: - tmpl_obj = env.register_source('_literal_template', template_literal) - else: - if not template_path: - raise CLIError('expected template or template literal') - try: - tmpl_obj = env.load(template_path) - except (KeyError, TemplateNotFound): - tmpl_obj = env.register_path(template_path) - - if model_literal: - model = json.loads(model_literal) - elif not model_path: - raise CLIError('expected model or model literal') - elif model_path == '-': - model = json.load(sys.stdin) - else: - with open(model_path) as f: - model = json.load(f) - - output_text = tmpl_obj.render(model) - output_bytes = output_text.encode(output_encoding) - if output_path == '-': - print(output_bytes) - else: - with open(output_path, 'w') as f: - f.write(output_bytes) - return - - -def main(): - # using optparse for backwards compat with 2.6 (and earlier, maybe) - from optparse import OptionParser - - prs = OptionParser(description="render a template using a JSON input", - version='ashes %s' % (__version__,)) - ao = prs.add_option - ao('--env-path', - help="paths to search for templates, separate paths with :") - ao('--filter', default='h', - help="autoescape values with this filter, defaults to 'h' for HTML") - ao('--no-filter', action="store_true", - help="disables default HTML-escaping filter, overrides --filter") - ao('--trim-whitespace', action="store_true", - help="removes whitespace on template load") - ao('-m', '--model', dest='model_path', - help="path to the JSON model file, default - for stdin") - ao('-M', '--model-literal', - help="the literal string of the JSON model, overrides model") - ao('-o', '--output', dest='output_path', default='-', - help="path to the output file, default - for stdout") - ao('--output-encoding', default='utf-8', - help="encoding for the output, default utf-8") - ao('-t', '--template', dest='template_path', - help="path of template to render, absolute or relative to env-path") - ao('-T', '--template-literal', - help="the literal string of the template, overrides template") - ao('--verbose', help="emit extra output on stderr") - - opts, _ = prs.parse_args() - kwargs = dict(opts.__dict__) - - kwargs['env_path_list'] = (kwargs.pop('env_path') or '').split(':') - try: - _simple_render(**kwargs) - except CLIError as clie: - err_msg = '%s; use --help option for more info.' % (clie.args[0],) - prs.error(err_msg) - return - - -if __name__ == '__main__': - main() diff --git a/src/command_line.py b/src/command_line.py index d070945..ebd2d36 100644 --- a/src/command_line.py +++ b/src/command_line.py @@ -24,11 +24,7 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr -try: - import irpy -except: - import lib_irpy as irpy - +from lib.manager import irpy import getopt, sys from version import version diff --git a/src/entity.py b/src/entity.py index 33ee3ea..2c2e80b 100644 --- a/src/entity.py +++ b/src/entity.py @@ -28,11 +28,7 @@ from irpf90_t import * from util import * from command_line import command_line import sys -try: - import irpy -except: - import lib_irpy as irpy - +from lib.manager import irpy class Entity(object): '''All lines between BEGIN_PROVIDER and END_PROVIDER included @@ -164,92 +160,35 @@ class Entity(object): return any(self.d_entity[i].is_written for i in self.parents) @irpy.lazy_property - def writer(self): + def io_er(self): if not self.is_main: result = [] - else: - from util import mangled - name = self.name - result = [ \ - "subroutine writer_%s(irp_num)"%(name), - " use %s"%(self.fmodule), - " implicit none", - " character*(*), intent(in) :: irp_num", - " logical :: irp_is_open", - " integer :: irp_iunit" ] - if command_line.do_debug: - length = len("writer_%s" % (self.name)) - result += [\ - " character*(%d) :: irp_here = 'writer_%s'"%(length,name), - " call irp_enter(irp_here)" ] - result += [ \ - " if (.not.%s_is_built) then"%(self.same_as), - " call provide_%s"%(self.same_as), - " endif" ] - result += map(lambda x: " call writer_%s(irp_num)" % (x), mangles(self.needs)) - result += [ \ - " irp_is_open = .True.", - " irp_iunit = 9", - " do while (irp_is_open)", - " irp_iunit = irp_iunit+1", - " inquire(unit=irp_iunit,opened=irp_is_open)", - " enddo" ] - for n in self.l_name: - result += [\ - " open(unit=irp_iunit,file='irpf90_%s_'//trim(irp_num),form='FORMATTED',status='UNKNOWN',action='WRITE')"%(n), - " write(irp_iunit,*) %s%s"%(n,build_dim(self.d_entity[n].dim,colons=True)), - " close(irp_iunit)" ] - if command_line.do_debug: - result.append(" call irp_leave(irp_here)") - result.append("end subroutine writer_%s" % (name)) - result.append("") - return result + + from util import mangled + from util import ashes_env + name = self.name + + d_template= {'name':name, + 'fmodule':self.fmodule, + 'same_as' : self.same_as, + 'do_debug':command_line.do_debug, + 'children':mangled(self.needs,self.d_entity), + 'group_entity': [{'name':n,'dim':build_dim(self.cm_d_variable[n].dim,colons=True)} for n in self.l_name]} + + + return ashes_env('io.f90',d_template).split('\n') + + def reader(self): + return io.er.split('TOKEN_SPLIT')[0] + + def writer(self): + return io.er.split('TOKEN_SPLIT')[1] @irpy.lazy_property_mutable def is_read(self): '''Check if it will be read from disk''' return any(self.d_entity[i].is_read for i in self.parents) - @irpy.lazy_property - def reader(self): - if not self.is_main: - result = [] - else: - from util import mangled - name = self.name - result = [ \ - "subroutine reader_%s(irp_num)"%(name), - " use %s"%(self.fmodule), - " implicit none", - " character*(*), intent(in) :: irp_num", - " logical :: irp_is_open", - " integer :: irp_iunit" ] - if command_line.do_debug: - length = len("reader_%s" % (name)) - result += [\ - " character*(%d) :: irp_here = 'reader_%s'"%(length,name), - " call irp_enter(irp_here)" ] - result += map(lambda x: " call reader_%s(irp_num)" % (x), mangled(self.needs)) - result += [ \ - " irp_is_open = .True.", - " irp_iunit = 9", - " do while (irp_is_open)", - " inquire(unit=irp_iunit,opened=irp_is_open)", - " enddo"] - for n in self.l_name: - result += [\ - " open(unit=irp_iunit,file='irpf90_%s_'//trim(irp_num),form='FORMATTED',status='OLD',action='READ')"%(n), - " read(irp_iunit,*) %s%s"%(n,build_dim(self.cm_d_variable[n].dim,colons=True)), - " close(irp_iunit)" ] - result += [ \ - " call touch_%s"%(name), - " %s_is_built = .True."%(name) ] - if command_line.do_debug: - result.append(" call irp_leave(irp_here)") - result.append("end subroutine reader_%s" % (name)) - result.append("") - return result - @irpy.lazy_property def is_source_touch(self): return (Touch in self.d_type_lines or SoftTouch in self.d_type_lines) diff --git a/src/irpf90_t.py b/src/irpf90_t.py index 6011c25..7d845d6 100644 --- a/src/irpf90_t.py +++ b/src/irpf90_t.py @@ -32,11 +32,8 @@ from zlib import crc32 import os irp_id = abs(crc32(os.getcwd())) -try: - import irpy -except: - import lib_irpy as irpy - +from lib.manager import irpy +from util import logger class Line(object): def __init__(self, i, text, filename): diff --git a/src/irpy_files.py b/src/irpy_files.py index ef80965..9f6dfae 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -1,10 +1,8 @@ from util import parmap, lazy_write_file from util import flatten, listdir +from util import logger -try: - import irpy -except: - import lib_irpy as irpy +from lib.manager import irpy import os import irpf90_t @@ -12,9 +10,6 @@ import sys from command_line import command_line -from util import logger - - class Irpy_comm_world(object): '''Maestro.''' diff --git a/src/lib_irpy.py b/src/lib_irpy.py deleted file mode 100755 index 574ca3c..0000000 --- a/src/lib_irpy.py +++ /dev/null @@ -1,160 +0,0 @@ -#Handle the execution stack -from collections import defaultdict -d_path = defaultdict(list) -d_last_caller = defaultdict(lambda: None) - -def genealogy(obj, _node, direction,degree_max=100): - """Return the genealogy of a _node. - Direction is $parents or $children, recurse accordingly""" - - def sap(_node, direction, visited=set(), degree=0): - - visited.add(_node) - try: - s = getattr(obj, "{0}_{1}".format(_node, direction)) - except AttributeError: - s = set() - - for next_ in s - visited: - - if degree < degree_max: - sap(next_, direction, visited, degree+1) - - return visited - - s = sap(_node, direction) - set([_node]) - - return s - - -def addattr(obj, name, value): - try: - s = getattr(obj, name) - except AttributeError: - setattr(obj, name, set([value])) - else: - setattr(obj, name, s | set([value])) - -def removeattr(obj, name, value): - try: - s = getattr(obj, name) - except AttributeError: - pass - else: - setattr(obj, name, s - set([value])) - - -# _ -# | \ _ _ _ ._ _. _|_ _ ._ -# |_/ (/_ (_ (_) | (_| |_ (_) | -# -class lazy_property(object): - """ - My little Property - My little Property - My little Property... friend - """ - - def __init__(self, provider, init_node=False, immutable=True): - """Provider: If a function who will be used to compute the node - init_node: If the name of the node - immutable: If immutable is set you cannot set the node""" - - self.provider = provider - self.init_node = init_node - self.immutable = immutable - - if not self.init_node: - name = provider.__name__ - else: - name = self.init_node - - #Kind of human readable identifier - self._node = "_%s_%s" % (name, id(provider)) - - def __get__(self, obj, objtype): - "Get the value of the node and handle the genealogy" - - _node = self._node - - if d_path[obj]: - _caller = d_path[obj][-1] - if _caller != d_last_caller[obj]: - addattr(obj, "%s_parents" % _node, _caller) - addattr(obj, "%s_children" % _caller, _node) - d_last_caller[obj] = _caller - - #Wanted: value. Cached or Computed - try: - value = getattr(obj, _node) - except AttributeError: - - d_path[obj].append(_node) - - value = self.provider(obj) - setattr(obj, _node, value) - - d_path[obj].pop() - - return value - - def __set__(self, obj, value): - """Set the value of the node - But wait, init_node are "gradual typed" variable! Youpi! - Idea borrowed from the-worst-programming-language-ever (http://bit.ly/13tc6XW) - """ - - _node = self._node - - if not self.init_node: - - if self.immutable: - raise AttributeError("Immutable Node {0}".format(self._node)) - - #Set the new value - setattr(obj, _node, value) - - #Node ancestor need to be recompute is asked - for _parent in genealogy(obj, _node, "parents"): - if hasattr(obj, _parent): delattr(obj, _parent) - - #Node abandoned her children, - for _child in genealogy(obj, _node, "children",degree_max=1): - removeattr(obj, "%s_parents" % _child, _node) - - #Indeed node is now a leaf - setattr(obj, "%s_children" % _node, set()) - - else: - setattr(obj, _node, value) - self.init_node = False - - -def lazy_property_mutable(provider): - "Return a lazy_property mutable" - return lazy_property(provider=provider, immutable=False) - - -def lazy_property_leaves(mutables=(), immutables=()): - "Set to properties for the __init__ method" - - def leaf_decorator(func): - def func_wrapper(self, *args, **kwargs): - - for node in set(immutables) | set(mutables): - - def provider(self): - return getattr(self, "_%s" % node) - - p = lazy_property(provider=provider, - init_node=node, - immutable=node in immutables) - - #If this ugly? Yeah... Is this an issue? I don't really know - setattr(self.__class__, node, p) - - return func(self, *args, **kwargs) - - return func_wrapper - - return leaf_decorator diff --git a/src/module.py b/src/module.py index bd6395e..532e739 100644 --- a/src/module.py +++ b/src/module.py @@ -1,4 +1,4 @@ -#!/unr/bin/env python +#!/usr/bin/env python # IRPF90 is a Fortran90 preprocessor written in Python for programming using # the Implicit Reference to Parameters (IRP) method. # Copyright (C) 2009 Anthony SCEMAMA diff --git a/src/preprocessed_text.py b/src/preprocessed_text.py index 1abee2f..dcdf631 100644 --- a/src/preprocessed_text.py +++ b/src/preprocessed_text.py @@ -59,6 +59,7 @@ simple_dict = { "begin_provider": Begin_provider, "begin_provider_immutable": Begin_provider, "&begin_provider": Cont_provider, + "&begin_provider_immutable": Cont_provider, "end_provider": End_provider, "end_provider_immutable": End_provider, "assert": Assert, diff --git a/src/routine.py b/src/routine.py index 31a4532..e38c697 100644 --- a/src/routine.py +++ b/src/routine.py @@ -28,11 +28,7 @@ from irpf90_t import * from util import logger - -try: - import irpy -except: - import lib_irpy as irpy +from lib.manager import irpy class Routine(object): ''' diff --git a/src/templates/ioer.f90 b/src/templates/ioer.f90 new file mode 100644 index 0000000..ab621e2 --- /dev/null +++ b/src/templates/ioer.f90 @@ -0,0 +1,71 @@ +SUBROUTINE write_{name}(irp_num) + + USE {fmodule} + IMPLICIT NONE + + CHARACTER*(*), INTENT(IN) :: irp_num + LOGICAL :: irp_is_open = .TRUE. + INTEGER :: irp_iunit = 9 + + {?do_debug} + CHARACTER*(7+{@size key=name/}),PARAMETER :: irp_here = 'writer_{name}' + {/do_debug} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + IF (.NOT.{same_as}_is_built) THEN + CALL provide_{same_as} + ENDIF + + {children} + CALL write_{.}(irp_num) + {/children} + + DO WHILE (irp_is_open) + irp_iunit = irp_inuit + 1 + INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) + END DO + + {#group_entity} + OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') + WRITE(irp_inuit,*) {.}{dim} + CLOSE(irp_inuit) + {/group_entity} + + {?do_debug} CALL irp_leave(irp_here) {/do_debug} + +END SUBROUTINE write_{name} + +!TOKEN_SPLIT + +SUBROUTINE read_{name}(irp_num) + + USE {fmodule} + IMPLICIT NONE + + CHARACTER*(*), INTENT(IN) :: irp_num + LOGICAL :: irp_is_open = .TRUE. + INTEGER :: irp_iunit = 9 + + {?do_debug} + CHARACTER*(5+{@size key=name/}),PARAMETER :: irp_here = 'read_{name}' + {/do_debug} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + DO WHILE (irp_is_open) + irp_iunit = irp_inuit + 1 + INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) + END DO + + {#group_entity} + OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') + READ(irp_inuit,*) {name}{dim} + CLOSE(irp_inuit) + {/group_entity} + + CALL touch_{name} + {?do_debug} CALL irp_leave(irp_here) {/do_debug} + +END SUBROUTINE read_{name} + diff --git a/src/templates/writer.f90 b/src/templates/writer.f90 new file mode 100644 index 0000000..1652eec --- /dev/null +++ b/src/templates/writer.f90 @@ -0,0 +1,69 @@ +SUBROUTINE write_{name}(irp_num) + + USE {fmodule} + IMPLICIT NONE + + CHARACTER*(*), INTENT(IN) :: irp_num + LOGICAL :: irp_is_open = .TRUE. + INTEGER :: irp_iunit = 9 + + {?do_debug} + CHARACTER*(7+{@size key=name/}),PARAMETER :: irp_here = 'writer_{name}' + {/do_debug} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + IF (.NOT.{same_as}_is_built) THEN + CALL provide_{same_as} + ENDIF + + {children} + CALL write_{.}(irp_num) + {/children} + + DO WHILE (irp_is_open) + irp_iunit = irp_inuit + 1 + INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) + END DO + + {#group_entity} + OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') + WRITE(irp_inuit,*) {.}{dim} + CLOSE(irp_inuit) + {/group_entity} + + {?do_debug} CALL irp_leave(irp_here) {/do_debug} + +END SUBROUTINE write_{name} + +SUBROUTINE read_{name}(irp_num) + + USE {fmodule} + IMPLICIT NONE + + CHARACTER*(*), INTENT(IN) :: irp_num + LOGICAL :: irp_is_open = .TRUE. + INTEGER :: irp_iunit = 9 + + {?do_debug} + CHARACTER*(5+{@size key=name/}),PARAMETER :: irp_here = 'read_{name}' + {/do_debug} + + {?do_debug} CALL irp_enter(irp_here) {/do_debug} + + DO WHILE (irp_is_open) + irp_iunit = irp_inuit + 1 + INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) + END DO + + {#group_entity} + OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') + READ(irp_inuit,*) {name}{dim} + CLOSE(irp_inuit) + {/group_entity} + + CALL touch_{name} + {?do_debug} CALL irp_leave(irp_here) {/do_debug} + +END SUBROUTINE read_{name} + diff --git a/src/util.py b/src/util.py index 44c3fab..dd88f40 100644 --- a/src/util.py +++ b/src/util.py @@ -46,9 +46,9 @@ logger.setLevel(30) # ~#~#~#~#~# # A S H E S _ T E M P L A T E S # ~#~#~#~#~# -from ashes import AshesEnv +from lib.manager import ashes import os -ashes_env = AshesEnv([os.path.join(os.path.dirname(__file__),'templates')]) +ashes_env = ashes.AshesEnv([os.path.join(os.path.dirname(__file__),'templates')]) def remove_empy_lines(text): return os.linesep.join([s for s in text.splitlines() if s.strip()]) From a4a6d7ceaaeca5f6b6fbc26ba5eab87e723c467c Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Thu, 16 Feb 2017 19:14:37 -0600 Subject: [PATCH 22/31] Move lib --- src/lib/__init__.py | 0 src/lib/__init__.pyc | Bin 0 -> 165 bytes src/lib/manager.py | 10 + src/lib/manager.pyc | Bin 0 -> 311 bytes src/lib/static_ashes.py | 2602 ++++++++++++++++++++++++++++++++++++++ src/lib/static_ashes.pyc | Bin 0 -> 99433 bytes src/lib/static_irpy.py | 161 +++ 7 files changed, 2773 insertions(+) create mode 100644 src/lib/__init__.py create mode 100644 src/lib/__init__.pyc create mode 100644 src/lib/manager.py create mode 100644 src/lib/manager.pyc create mode 100644 src/lib/static_ashes.py create mode 100644 src/lib/static_ashes.pyc create mode 100644 src/lib/static_irpy.py diff --git a/src/lib/__init__.py b/src/lib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/lib/__init__.pyc b/src/lib/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf8d3fde2d852329102bd7b9fe121b15009c39e6 GIT binary patch literal 165 zcmZSn%*%DtWm!Zr0~9a^Gl0L^uzOuvh@Rs^0QKtOY{p2&GZb6^)rhK(ku=1i;I%=_4L40PG*vR he0*kJW=VX!UO{CE2hd2H-29Z%oK!oI#l=9(007PvCV2n= literal 0 HcmV?d00001 diff --git a/src/lib/manager.py b/src/lib/manager.py new file mode 100644 index 0000000..ed54d27 --- /dev/null +++ b/src/lib/manager.py @@ -0,0 +1,10 @@ +try: + import irpy +except: + import static_irpy as irpy + +try: + import ashes +except: + import static_ashes as ashes + diff --git a/src/lib/manager.pyc b/src/lib/manager.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd188400523ad59038fbc62563152f2338fa08e3 GIT binary patch literal 311 zcmZ8b(MrQW5Zp}^idHH36M4#31VIq-#Yd$erJ*l!UQU~uKF5po#2c;RH56)a~v8?tnAyMPoa%&oW|++G8h>&jiAqvvQ!sT;>I^dP1n_~ zwSE_(tbF*AMeti^qjcA|`TR!E|8X}L4Oa4ept^dO#\/)?' + r'(?:(?P[\~\#\?\@\:\<\>\+\^\%])\s*)?' + r'(?P[a-zA-Z0-9_\$\.]+|"[^"]+")' + r'(?:\:(?P[a-zA-Z0-9\$\.]+))?' + r'(?P[\|a-z]+)*?' + r'(?P(?:\s+\w+\=(("[^"]*?")|([$\w\.]+)))*)?' + r'\s*' + r'(?P\/)?' + r'\})', + flags=re.MULTILINE) + +key_re_str = '[a-zA-Z_$][0-9a-zA-Z_$]*' +key_re = re.compile(key_re_str) +path_re = re.compile('(' + key_re_str + ')?(\.' + key_re_str + ')+') +comment_re = re.compile(r'(\{!.+?!\})|(\{`.+?`\})', flags=re.DOTALL) + + +def get_path_or_key(pork): + if pork == '.': + pk = ['path', True, []] + elif path_re.match(pork): + f_local = pork.startswith('.') + if f_local: + pork = pork[1:] + pk = ['path', f_local, pork.split('.')] + elif key_re.match(pork): + pk = ['key', pork] + else: + raise ValueError('expected a path or key, not %r' % pork) + return pk + + +def split_leading(text): + leading_stripped = text.lstrip() + leading_ws = text[:len(text) - len(leading_stripped)] + return leading_ws, leading_stripped + + +class Token(object): + def __init__(self, text): + self.text = text + + def get_line_count(self): + # returns 0 if there's only one line, because the + # token hasn't increased the number of lines. + count = len(self.text.splitlines()) - 1 + if self.text[-1] in ('\n', '\r'): + count += 1 + return count + + def __repr__(self): + cn = self.__class__.__name__ + disp = self.text + if len(disp) > 20: + disp = disp[:17] + '...' + return '%s(%r)' % (cn, disp) + + +class CommentToken(Token): + def to_dust_ast(self): + return [['comment', self.text]] + + +class RawToken(Token): + def to_dust_ast(self): + return [['raw', self.text]] + + +class BufferToken(Token): + def to_dust_ast(self): + # It is hard to simulate the PEG parsing in this case, + # especially while supporting universal newlines. + if not self.text: + return [] + rev = [] + remaining_lines = self.text.splitlines() + if self.text[-1] in ('\n', '\r'): + # kind of a bug in splitlines if you ask me. + remaining_lines.append('') + while remaining_lines: + line = remaining_lines.pop() + leading_ws, lstripped = split_leading(line) + if remaining_lines: + if lstripped: + rev.append(['buffer', lstripped]) + rev.append(['format', '\n', leading_ws]) + else: + if line: + rev.append(['buffer', line]) + ret = list(reversed(rev)) + return ret + + +ALL_ATTRS = ('closing', 'symbol', 'refpath', 'contpath', + 'filters', 'params', 'selfclosing') + + +class Tag(Token): + req_attrs = () + ill_attrs = () + + def __init__(self, text, **kw): + super(Tag, self).__init__(text) + self._attr_dict = kw + self.set_attrs(kw) + + @property + def param_list(self): + try: + return params_to_kv(self.params) + except AttributeError: + return [] + + @property + def name(self): + try: + return self.refpath.strip().lstrip('.') + except (AttributeError, TypeError): + return None + + def set_attrs(self, attr_dict, raise_exc=True): + cn = self.__class__.__name__ + all_attrs = getattr(self, 'all_attrs', ()) + if all_attrs: + req_attrs = [a for a in ALL_ATTRS if a in all_attrs] + ill_attrs = [a for a in ALL_ATTRS if a not in all_attrs] + else: + req_attrs = getattr(self, 'req_attrs', ()) + ill_attrs = getattr(self, 'ill_attrs', ()) + + opt_attrs = getattr(self, 'opt_attrs', ()) + if opt_attrs: + ill_attrs = [a for a in ill_attrs if a not in opt_attrs] + for attr in req_attrs: + if attr_dict.get(attr, None) is None: + raise ValueError('%s expected %s' % (cn, attr)) + for attr in ill_attrs: + if attr_dict.get(attr, None) is not None: + raise ValueError('%s does not take %s' % (cn, attr)) + + avail_attrs = [a for a in ALL_ATTRS if a not in ill_attrs] + for attr in avail_attrs: + setattr(self, attr, attr_dict.get(attr, '')) + return True + + @classmethod + def from_match(cls, match): + kw = dict([(str(k), v.strip()) + for k, v in match.groupdict().items() + if v is not None and v.strip()]) + obj = cls(text=match.group(0), **kw) + obj.orig_match = match + return obj + + +class ReferenceTag(Tag): + all_attrs = ('refpath',) + opt_attrs = ('filters',) + + def to_dust_ast(self): + pork = get_path_or_key(self.refpath) + filters = ['filters'] + if self.filters: + f_list = self.filters.split('|')[1:] + for f in f_list: + filters.append(f) + return [['reference', pork, filters]] + + +class SectionTag(Tag): + ill_attrs = ('closing') + + +class ClosingTag(Tag): + all_attrs = ('closing', 'refpath') + + +class SpecialTag(Tag): + all_attrs = ('symbol', 'refpath') + + def to_dust_ast(self): + return [['special', self.refpath]] + + +class BlockTag(Tag): + all_attrs = ('symbol', 'refpath') + + +class PartialTag(Tag): + req_attrs = ('symbol', 'refpath', 'selfclosing') + + def __init__(self, **kw): + super(PartialTag, self).__init__(**kw) + self.subtokens = parse_inline(self.refpath) + + def to_dust_ast(self): + """ + 2014.05.09 + This brings compatibility to the more popular fork of Dust.js + from LinkedIn (v1.0) + + Adding in `params` so `partials` function like sections. + """ + context = ['context'] + contpath = self.contpath + if contpath: + context.append(get_path_or_key(contpath)) + + params = ['params'] + param_list = self.param_list + if param_list: + try: + params.extend(params_to_dust_ast(param_list)) + except ParseError as pe: + pe.token = self + raise + + # tying to make this more standardized + inline_body = inline_to_dust_ast(self.subtokens) + return [['partial', + inline_body, + context, + params, + ]] + + +def parse_inline(source): + if not source: + raise ParseError('empty inline token') + if source.startswith('"') and source.endswith('"'): + source = source[1:-1] + if not source: + return [BufferToken("")] + tokens = tokenize(source, inline=True) + return tokens + + +def inline_to_dust_ast(tokens): + if tokens and all(isinstance(t, BufferToken) for t in tokens): + body = ['literal', ''.join(t.text for t in tokens)] + else: + body = ['body'] + for b in tokens: + body.extend(b.to_dust_ast()) + return body + + +def params_to_kv(params_str): + ret = [] + new_k, v = None, None + p_str = params_str.strip() + k, _, tail = p_str.partition('=') + while tail: + tmp, _, tail = tail.partition('=') + tail = tail.strip() + if not tail: + v = tmp + else: + v, new_k = tmp.split() + ret.append((k.strip(), v.strip())) + k = new_k + return ret + + +def params_to_dust_ast(param_kv): + ret = [] + for k, v in param_kv: + try: + v_body = get_path_or_key(v) + except ValueError: + v_body = inline_to_dust_ast(parse_inline(v)) + ret.append(['param', ['literal', k], v_body]) + return ret + + +def get_tag(match, inline=False): + groups = match.groupdict() + symbol = groups['symbol'] + closing = groups['closing'] + refpath = groups['refpath'] + if closing: + tag_type = ClosingTag + elif symbol is None and refpath is not None: + tag_type = ReferenceTag + elif symbol in '#?^<+@%': + tag_type = SectionTag + elif symbol == '~': + tag_type = SpecialTag + elif symbol == ':': + tag_type = BlockTag + elif symbol == '>': + tag_type = PartialTag + else: + raise ParseError('invalid tag symbol: %r' % symbol) + if inline and tag_type not in (ReferenceTag, SpecialTag): + raise ParseError('invalid inline tag') + return tag_type.from_match(match) + + +def tokenize(source, inline=False): + tokens = [] + com_nocom = comment_re.split(source) + line_counts = [1] + + def _add_token(t): + # i wish i had nonlocal so bad + t.start_line = sum(line_counts) + line_counts.append(t.get_line_count()) + t.end_line = sum(line_counts) + tokens.append(t) + for cnc in com_nocom: + if not cnc: + continue + elif cnc.startswith('{!') and cnc.endswith('!}'): + _add_token(CommentToken(cnc[2:-2])) + continue + elif cnc.startswith('{`') and cnc.endswith('`}'): + _add_token(RawToken(cnc[2:-2])) + continue + prev_end = 0 + start = None + end = None + for match in node_re.finditer(cnc): + start, end = match.start(1), match.end(1) + if prev_end < start: + _add_token(BufferToken(cnc[prev_end:start])) + prev_end = end + try: + _add_token(get_tag(match, inline)) + except ParseError as pe: + pe.line_no = sum(line_counts) + raise + tail = cnc[prev_end:] + if tail: + _add_token(BufferToken(tail)) + return tokens + +######### +# PARSING +######### + + +class Section(object): + def __init__(self, start_tag=None, blocks=None): + if start_tag is None: + refpath = None + name = '' + else: + refpath = start_tag.refpath + name = start_tag.name + + self.refpath = refpath + self.name = name + self.start_tag = start_tag + self.blocks = blocks or [] + + def add(self, obj): + if type(obj) == Block: + self.blocks.append(obj) + else: + if not self.blocks: + self.blocks = [Block()] + self.blocks[-1].add(obj) + + def to_dict(self): + ret = {self.name: dict([(b.name, b.to_list()) for b in self.blocks])} + return ret + + def to_dust_ast(self): + symbol = self.start_tag.symbol + + pork = get_path_or_key(self.refpath) + + context = ['context'] + contpath = self.start_tag.contpath + if contpath: + context.append(get_path_or_key(contpath)) + + params = ['params'] + param_list = self.start_tag.param_list + if param_list: + try: + params.extend(params_to_dust_ast(param_list)) + except ParseError as pe: + pe.token = self + raise + + bodies = ['bodies'] + if self.blocks: + for b in reversed(self.blocks): + bodies.extend(b.to_dust_ast()) + + return [[symbol, + pork, + context, + params, + bodies]] + + +class Block(object): + def __init__(self, name='block'): + if not name: + raise ValueError('blocks need a name, not: %r' % name) + self.name = name + self.items = [] + + def add(self, item): + self.items.append(item) + + def to_list(self): + ret = [] + for i in self.items: + try: + ret.append(i.to_dict()) + except AttributeError: + ret.append(i) + return ret + + def _get_dust_body(self): + # for usage by root block in ParseTree + ret = [] + for i in self.items: + ret.extend(i.to_dust_ast()) + return ret + + def to_dust_ast(self): + name = self.name + body = ['body'] + dust_body = self._get_dust_body() + if dust_body: + body.extend(dust_body) + return [['param', + ['literal', name], + body]] + + +class ParseTree(object): + def __init__(self, root_block): + self.root_block = root_block + + def to_dust_ast(self): + ret = ['body'] + ret.extend(self.root_block._get_dust_body()) + return ret + + @classmethod + def from_tokens(cls, tokens): + root_sect = Section() + ss = [root_sect] # section stack + for token in tokens: + if type(token) == SectionTag: + new_s = Section(token) + ss[-1].add(new_s) + if not token.selfclosing: + ss.append(new_s) + elif type(token) == ClosingTag: + if len(ss) <= 1: + msg = 'closing tag before opening tag: %r' % token.text + raise ParseError(msg, token=token) + if token.name != ss[-1].name: + msg = ('improperly nested tags: %r does not close %r' % + (token.text, ss[-1].start_tag.text)) + raise ParseError(msg, token=token) + ss.pop() + elif type(token) == BlockTag: + if len(ss) <= 1: + msg = 'start block outside of a section: %r' % token.text + raise ParseError(msg, token=token) + new_b = Block(name=token.refpath) + ss[-1].add(new_b) + else: + ss[-1].add(token) + if len(ss) > 1: + raise ParseError('unclosed tag: %r' % ss[-1].start_tag.text, + token=ss[-1].start_tag) + return cls(root_sect.blocks[0]) + + @classmethod + def from_source(cls, src): + tokens = tokenize(src) + return cls.from_tokens(tokens) + + +############## +# Optimize AST +############## +DEFAULT_SPECIAL_CHARS = {'s': ' ', + 'n': '\n', + 'r': '\r', + 'lb': '{', + 'rb': '}'} + +DEFAULT_OPTIMIZERS = { + 'body': 'compact_buffers', + 'special': 'convert_special', + 'format': 'nullify', + 'comment': 'nullify'} + +for nsym in ('buffer', 'filters', 'key', 'path', 'literal', 'raw'): + DEFAULT_OPTIMIZERS[nsym] = 'noop' + +for nsym in ('#', '?', '^', '<', '+', '@', '%', 'reference', + 'partial', 'context', 'params', 'bodies', 'param'): + DEFAULT_OPTIMIZERS[nsym] = 'visit' + +UNOPT_OPTIMIZERS = dict(DEFAULT_OPTIMIZERS) +UNOPT_OPTIMIZERS.update({'format': 'noop', 'body': 'visit'}) + + +def escape(text, esc_func=json.dumps): + return esc_func(text) + + +class Optimizer(object): + def __init__(self, optimizers=None, special_chars=None): + if special_chars is None: + special_chars = DEFAULT_SPECIAL_CHARS + self.special_chars = special_chars + + if optimizers is None: + optimizers = DEFAULT_OPTIMIZERS + self.optimizers = dict(optimizers) + + def optimize(self, node): + # aka filter_node() + nsym = node[0] + optimizer_name = self.optimizers[nsym] + return getattr(self, optimizer_name)(node) + + def noop(self, node): + return node + + def nullify(self, node): + return None + + def convert_special(self, node): + return ['buffer', self.special_chars[node[1]]] + + def visit(self, node): + ret = [node[0]] + for n in node[1:]: + filtered = self.optimize(n) + if filtered: + ret.append(filtered) + return ret + + def compact_buffers(self, node): + ret = [node[0]] + memo = None + for n in node[1:]: + filtered = self.optimize(n) + if not filtered: + continue + if filtered[0] == 'buffer': + if memo is not None: + memo[1] += filtered[1] + else: + memo = filtered + ret.append(filtered) + else: + memo = None + ret.append(filtered) + return ret + + def __call__(self, node): + return self.optimize(node) + + +######### +# Compile +######### + + +ROOT_RENDER_TMPL = \ +'''def render(chk, ctx): + {body} + return {root_func_name}(chk, ctx) +''' + + +def _python_compile(source): + """ + Generates a Python `code` object (via `compile`). + + args: + source: (required) string of python code to be compiled + + this actually compiles the template to code + """ + try: + code = compile(source, '', 'single') + return code + except: + raise + + +def _python_exec(code, name, global_env=None): + """ + this loads a code object (generated via `_python_compile` + + args: + code: (required) code object (generate via `_python_compile`) + name: (required) the name of the function + + kwargs: + global_env: (default None): the environment + """ + if global_env is None: + global_env = {} + else: + global_env = dict(global_env) + if PY3: + exec(code, global_env) + else: + exec("exec code in global_env") + return global_env[name] + + +def python_string_to_code(python_string): + """ + utility function + used to compile python string functions to code object + + args: + ``python_string`` + """ + code = _python_compile(python_string) + return code + + +def python_string_to_function(python_string): + """ + utility function + used to compile python string functions for template loading/caching + + args: + ``python_string`` + """ + code = _python_compile(python_string) + function = _python_exec(code, name='render', global_env=None) + return function + + +class Compiler(object): + """ + Note: Compiler objects aren't really meant to be reused, + the class is just for namespacing and convenience. + """ + sections = {'#': 'section', + '?': 'exists', + '^': 'notexists'} + nodes = {'<': 'inline_partial', + '+': 'region', + '@': 'helper', + '%': 'pragma'} + + def __init__(self, env=None): + if env is None: + env = default_env + self.env = env + + self.bodies = {} + self.blocks = {} + self.block_str = '' + self.index = 0 + self.auto = self.env.autoescape_filter + + def compile(self, ast, name='render'): + python_source = self._gen_python(ast) + python_code = _python_compile(python_source) + python_func = _python_exec(python_code, name=name) + return (python_code, python_func) + + def _gen_python(self, ast): # ast to init? + lines = [] + c_node = self._node(ast) + + block_str = self._root_blocks() + + bodies = self._root_bodies() + lines.extend(bodies.splitlines()) + if block_str: + lines.extend(['', block_str, '']) + body = '\n '.join(lines) + + ret = ROOT_RENDER_TMPL.format(body=body, + root_func_name=c_node) + self.python_source = ret + return ret + + def _root_blocks(self): + if not self.blocks: + self.block_str = '' + return '' + self.block_str = 'ctx = ctx.shift_blocks(blocks)\n ' + pairs = ['"' + name + '": ' + fn for name, fn in self.blocks.items()] + return 'blocks = {' + ', '.join(pairs) + '}' + + def _root_bodies(self): + max_body = max(self.bodies.keys()) + ret = [''] * (max_body + 1) + for i, body in self.bodies.items(): + ret[i] = ('\ndef body_%s(chk, ctx):\n %sreturn chk%s\n' + % (i, self.block_str, body)) + return ''.join(ret) + + def _convert_special(self, node): + return ['buffer', self.special_chars[node[1]]] + + def _node(self, node): + ntype = node[0] + if ntype in self.sections: + stype = self.sections[ntype] + return self._section(node, stype) + elif ntype in self.nodes: + ntype = self.nodes[ntype] + cfunc = getattr(self, '_' + ntype, None) + if not callable(cfunc): + raise TypeError('unsupported node type: "%r"', node[0]) + return cfunc(node) + + def _body(self, node): + index = self.index + self.index += 1 # make into property, equal to len of bodies? + name = 'body_%s' % index + self.bodies[index] = self._parts(node) + return name + + def _parts(self, body): + parts = [] + for part in body[1:]: + parts.append(self._node(part)) + return ''.join(parts) + + def _raw(self, node): + return '.write(%r)' % node[1] + + def _buffer(self, node): + return '.write(%s)' % escape(node[1]) + + def _format(self, node): + return '.write(%s)' % escape(node[1] + node[2]) + + def _reference(self, node): + return '.reference(%s,ctx,%s)' % (self._node(node[1]), + self._node(node[2])) + + def _section(self, node, cmd): + return '.%s(%s,%s,%s,%s)' % (cmd, + self._node(node[1]), + self._node(node[2]), + self._node(node[4]), + self._node(node[3])) + + def _inline_partial(self, node): + bodies = node[4] + for param in bodies[1:]: + btype = param[1][1] + if btype == 'block': + self.blocks[node[1][1]] = self._node(param[2]) + return '' + return '' + + def _region(self, node): + """aka the plus sign ('+') block""" + tmpl = '.block(ctx.get_block(%s),%s,%s,%s)' + return tmpl % (escape(node[1][1]), + self._node(node[2]), + self._node(node[4]), + self._node(node[3])) + + def _helper(self, node): + return '.helper(%s,%s,%s,%s)' % (escape(node[1][1]), + self._node(node[2]), + self._node(node[4]), + self._node(node[3])) + + def _pragma(self, node): + pr_name = node[1][1] + pragma = self.env.pragmas.get(pr_name) + if not pragma or not callable(pragma): + return '' # TODO: raise? + raw_bodies = node[4] + bodies = {} + for rb in raw_bodies[1:]: + bodies[rb[1][1]] = rb[2] + + raw_params = node[3] + params = {} + for rp in raw_params[1:]: + params[rp[1][1]] = rp[2][1] + + try: + ctx = node[2][1][1] + except (IndexError, AttributeError): + ctx = None + + return pragma(self, ctx, bodies, params) + + def _partial(self, node): + """ + 2014.05.09 + This brings compatibility to the more popular fork of Dust.js + from LinkedIn (v1.0) + + Adding in `params` so `partials` function like sections. + updating call to .partial() to include the kwargs + + dust.js reference : + compile.nodes = { + partial: function(context, node) { + return '.partial(' + + compiler.compileNode(context, node[1]) + + ',' + compiler.compileNode(context, node[2]) + + ',' + compiler.compileNode(context, node[3]) + ')'; + }, + """ + if node[0] == 'body': + body_name = self._node(node[1]) + return '.partial(' + body_name + ', %s)' % self._node(node[2]) + return '.partial(%s, %s, %s)' % (self._node(node[1]), + self._node(node[2]), + self._node(node[3])) + + def _context(self, node): + contpath = node[1:] + if contpath: + return 'ctx.rebase(%s)' % (self._node(contpath[0])) + return 'ctx' + + def _params(self, node): + parts = [self._node(p) for p in node[1:]] + if parts: + return '{' + ','.join(parts) + '}' + return 'None' + + def _bodies(self, node): + parts = [self._node(p) for p in node[1:]] + return '{' + ','.join(parts) + '}' + + def _param(self, node): + return ':'.join([self._node(node[1]), self._node(node[2])]) + + def _filters(self, node): + ret = '"%s"' % self.auto + f_list = ['"%s"' % f for f in node[1:]] # repr? + if f_list: + ret += ',[%s]' % ','.join(f_list) + return ret + + def _key(self, node): + return 'ctx.get(%r)' % node[1] + + def _path(self, node): + cur = node[1] + keys = node[2] or [] + return 'ctx.get_path(%s, %s)' % (cur, keys) + + def _literal(self, node): + return escape(node[1]) + + +######### +# Runtime +######### + + +class UndefinedValueType(object): + def __repr__(self): + return self.__class__.__name__ + '()' + + def __str__(self): + return '' + + +UndefinedValue = UndefinedValueType() + +# Prerequisites for escape_url_path + + +def _make_quote_map(allowed_chars): + ret = {} + for i in range(256): + c = chr(i) + esc_c = c if c in allowed_chars else '%{0:02X}'.format(i) + ret[i] = ret[c] = esc_c + return ret + +# The unreserved URI characters (per RFC 3986) +_UNRESERVED_CHARS = (frozenset(string.ascii_letters) + | frozenset(string.digits) + | frozenset('-._~')) +_RESERVED_CHARS = frozenset(":/?#[]@!$&'()*+,;=") # not used +_PATH_RESERVED_CHARS = frozenset("?#") # not used + +_PATH_QUOTE_MAP = _make_quote_map(_UNRESERVED_CHARS | set('/?=&:#')) + +# Escapes/filters + + +def escape_uri_path(text, to_bytes=True): + # actually meant to run on path + query args + fragment + text = to_unicode(text) + if not to_bytes: + return unicode().join([_PATH_QUOTE_MAP.get(c, c) for c in text]) + try: + bytestr = text.encode('utf-8') + except UnicodeDecodeError: + bytestr = text + except: + raise ValueError('expected text or UTF-8 encoded bytes, not %r' % text) + return ''.join([_PATH_QUOTE_MAP[b] for b in bytestr]) + + +def escape_uri_component(text): + return (escape_uri_path(text) # calls to_unicode for us + .replace('/', '%2F') + .replace('?', '%3F') + .replace('=', '%3D') + .replace('&', '%26')) + + +def escape_html(text): + text = to_unicode(text) + # TODO: dust.js doesn't use this, but maybe we should: + # .replace("'", '&squot;') + return cgi.escape(text, True) + + +def escape_js(text): + text = to_unicode(text) + return (text + .replace('\\', '\\\\') + .replace('"', '\\"') + .replace("'", "\\'") + .replace('\r', '\\r') + .replace('\u2028', '\\u2028') + .replace('\u2029', '\\u2029') + .replace('\n', '\\n') + .replace('\f', '\\f') + .replace('\t', '\\t')) + + +def comma_num(val): + try: + return '{0:,}'.format(val) + except ValueError: + return to_unicode(val) + + +def pp_filter(val): + try: + return pprint.pformat(val) + except: + try: + return repr(val) + except: + return 'unreprable object %s' % object.__repr__(val) + + +JSON_PP_INDENT = 2 + + +def ppjson_filter(val): + "A best-effort pretty-printing filter, based on the JSON module" + try: + return json.dumps(val, indent=JSON_PP_INDENT, sort_keys=True) + except TypeError: + return to_unicode(val) + + +# Helpers + +def first_helper(chunk, context, bodies, params=None): + if context.stack.index > 0: + return chunk + if 'block' in bodies: + return bodies['block'](chunk, context) + return chunk + + +def last_helper(chunk, context, bodies, params=None): + if context.stack.index < context.stack.of - 1: + return chunk + if 'block' in bodies: + return bodies['block'](chunk, context) + return chunk + + +def sep_helper(chunk, context, bodies, params=None): + if context.stack.index == context.stack.of - 1: + return chunk + if 'block' in bodies: + return bodies['block'](chunk, context) + return chunk + + +def idx_helper(chunk, context, bodies, params=None): + if 'block' in bodies: + return bodies['block'](chunk, context.push(context.stack.index)) + return chunk + + +def idx_1_helper(chunk, context, bodies, params=None): + if 'block' in bodies: + return bodies['block'](chunk, context.push(context.stack.index + 1)) + return chunk + + +def size_helper(chunk, context, bodies, params): + try: + key = params['key'] + return chunk.write(unicode(len(key))) + except (KeyError, TypeError): + return chunk + + +def _sort_iterate_items(items, sort_key, direction): + if not items: + return items + reverse = False + if direction == 'desc': + reverse = True + if not sort_key: + sort_key = 0 + elif sort_key[0] == '$': + sort_key = sort_key[1:] + if sort_key == 'key': + sort_key = 0 + elif sort_key == 'value': + sort_key = 1 + else: + try: + sort_key = int(sort_key) + except: + sort_key = 0 + return sorted(items, key=lambda x: x[sort_key], reverse=reverse) + + +def iterate_helper(chunk, context, bodies, params): + params = params or {} + body = bodies.get('block') + sort = params.get('sort') + sort_key = params.get('sort_key') + target = params.get('key') + if not body or not target: + context.env.log('warn', 'helper.iterate', 'empty block or target') + return chunk + try: + iter(target) + except: + context.env.log('warn', 'helper.iterate', 'non-iterable target') + return chunk + try: + items = target.items() + is_dict = True + except: + items = target + is_dict = False + if sort: + try: + items = _sort_iterate_items(items, sort_key, direction=sort) + except: + context.env.log('warn', 'helper.iterate', 'failed to sort target') + return chunk + if is_dict: + for key, value in items: + body(chunk, context.push({'$key': key, + '$value': value, + '$type': type(value).__name__, + '$0': key, + '$1': value})) + else: + # all this is for iterating over tuples and the like + for values in items: + try: + key = values[0] + except: + key, value = None, None + else: + try: + value = values[1] + except: + value = None + new_scope = {'$key': key, + '$value': value, + '$type': type(value).__name__} + try: + for i, value in enumerate(values): + new_scope['$%s' % i] = value + except TypeError: + context.env.log('warn', 'helper.iterate', + 'unable to enumerate values') + return chunk + else: + body(chunk, context.push(new_scope)) + return chunk + + +def _do_compare(chunk, context, bodies, params, cmp_op): + "utility function used by @eq, @gt, etc." + params = params or {} + try: + body = bodies['block'] + key = params['key'] + value = params['value'] + typestr = params.get('type') + except KeyError: + context.env.log('warn', 'helper.compare', + 'comparison missing key/value') + return chunk + rkey = _resolve_value(key, chunk, context) + if not typestr: + typestr = _COERCE_REV_MAP.get(type(rkey), 'string') + rvalue = _resolve_value(value, chunk, context) + crkey, crvalue = _coerce(rkey, typestr), _coerce(rvalue, typestr) + if isinstance(crvalue, type(crkey)) and cmp_op(crkey, crvalue): + return chunk.render(body, context) + elif 'else' in bodies: + return chunk.render(bodies['else'], context) + return chunk + + +def _resolve_value(item, chunk, context): + if not callable(item): + return item + try: + return chunk.tap_render(item, context) + except TypeError: + if getattr(context, 'is_strict', None): + raise + return item + + +_COERCE_MAP = { + 'number': float, + 'string': unicode, + 'boolean': bool, +} # Not implemented: date, context +_COERCE_REV_MAP = dict([(v, k) for k, v in _COERCE_MAP.items()]) +_COERCE_REV_MAP[int] = 'number' +try: + _COERCE_REV_MAP[long] = 'number' +except NameError: + pass + + +def _coerce(value, typestr): + coerce_type = _COERCE_MAP.get(typestr.lower()) + if not coerce_type or isinstance(value, coerce_type): + return value + if isinstance(value, string_types): + try: + value = json.loads(value) + except (TypeError, ValueError): + pass + try: + return coerce_type(value) + except (TypeError, ValueError): + return value + + +def _make_compare_helpers(): + from functools import partial + from operator import eq, ne, lt, le, gt, ge + CMP_MAP = {'eq': eq, 'ne': ne, 'gt': gt, 'lt': lt, 'gte': ge, 'lte': le} + ret = {} + for name, op in CMP_MAP.items(): + ret[name] = partial(_do_compare, cmp_op=op) + return ret + + +DEFAULT_HELPERS = {'first': first_helper, + 'last': last_helper, + 'sep': sep_helper, + 'idx': idx_helper, + 'idx_1': idx_1_helper, + 'size': size_helper, + 'iterate': iterate_helper} +DEFAULT_HELPERS.update(_make_compare_helpers()) + + +def make_base(env, stack, global_vars=None): + """`make_base( env, stack, global_vars=None )` + `env` and `stack` are required by the Python implementation. + `global_vars` is optional. set to global_vars. + + 2014.05.09 + This brings compatibility to the more popular fork of Dust.js + from LinkedIn (v1.0) + + adding this to try and create compatibility with Dust + + this is used for the non-activated alternative approach of rendering a + partial with a custom context object + + dust.makeBase = function(global) { + return new Context(new Stack(), global); + }; + """ + return Context(env, stack, global_vars) + + +# Actual runtime objects + +class Context(object): + """\ + The context is a special object that handles variable lookups and + controls template behavior. It is the interface between your + application logic and your templates. The context can be + visualized as a stack of objects that grows as we descend into + nested sections. + + When looking up a key, Dust searches the context stack from the + bottom up. There is no need to merge helper functions into the + template data; instead, create a base context onto which you can + push your local template data. + """ + def __init__(self, env, stack, global_vars=None, blocks=None): + self.env = env + self.stack = stack + if global_vars is None: + global_vars = {} + self.globals = global_vars + self.blocks = blocks + + @classmethod + def wrap(cls, env, context): + if isinstance(context, cls): + return context + return cls(env, Stack(context)) + + def get(self, path, cur=False): + "Retrieves the value `path` as a key from the context stack." + if isinstance(path, (str, unicode)): + if path[0] == '.': + cur = True + path = path[1:] + path = path.split('.') + return self._get(cur, path) + + def get_path(self, cur, down): + return self._get(cur, down) + + def _get(self, cur, down): + # many thanks to jvanasco for his contribution -mh 2014 + """ + * Get a value from the context + * @method `_get` + * @param {boolean} `cur` Get only from the current context + * @param {array} `down` An array of each step in the path + * @private + * @return {string | object} + """ + ctx = self.stack + length = 0 if not down else len(down) # TODO: try/except? + + if not length: + # wants nothing? ok, send back the entire payload + return ctx.head + + first_path_element = down[0] + + value = UndefinedValue + + if cur and not length: + ctx = ctx.head + else: + if not cur: + # Search up the stack for the first_path_element value + while ctx: + if isinstance(ctx.head, dict): + if first_path_element in ctx.head: + value = ctx.head[first_path_element] + break + ctx = ctx.tail + if value is UndefinedValue: + if first_path_element in self.globals: + ctx = self.globals[first_path_element] + else: + ctx = UndefinedValue + else: + ctx = value + else: + # if scope is limited by a leading dot, don't search up tree + if first_path_element in ctx.head: + ctx = ctx.head[first_path_element] + else: + ctx = UndefinedValue + + i = 1 + while ctx and ctx is not UndefinedValue and i < length: + if down[i] in ctx: + ctx = ctx[down[i]] + else: + ctx = UndefinedValue + i += 1 + + if ctx is UndefinedValue: + return None + else: + return ctx + + def push(self, head, index=None, length=None): + """\ + Pushes an arbitrary value `head` onto the context stack and returns + a new `Context` instance. Specify `index` and/or `length` to enable + enumeration helpers.""" + return Context(self.env, + Stack(head, self.stack, index, length), + self.globals, + self.blocks) + + def rebase(self, head): + """\ + Returns a new context instance consisting only of the value at + `head`, plus any previously defined global object.""" + return Context(self.env, + Stack(head), + self.globals, + self.blocks) + + def current(self): + """Returns the head of the context stack.""" + return self.stack.head + + def get_block(self, key): + blocks = self.blocks + if not blocks: + return None + fn = None + for block in blocks[::-1]: + try: + fn = block[key] + if fn: + break + except KeyError: + continue + return fn + + def shift_blocks(self, local_vars): + blocks = self.blocks + if local_vars: + if blocks: + new_blocks = blocks + [local_vars] + else: + new_blocks = [local_vars] + return Context(self.env, self.stack, self.globals, new_blocks) + return self + + +class Stack(object): + def __init__(self, head, tail=None, index=None, length=None): + self.head = head + self.tail = tail + self.index = index or 0 + self.of = length or 1 + # self.is_object = is_scalar(head) + + def __repr__(self): + return 'Stack(%r, %r, %r, %r)' % (self.head, + self.tail, + self.index, + self.of) + + +class Stub(object): + def __init__(self, callback): + self.head = Chunk(self) + self.callback = callback + self._out = [] + + @property + def out(self): + return ''.join(self._out) + + def flush(self): + chunk = self.head + while chunk: + if chunk.flushable: + self._out.append(chunk.data) + elif chunk.error: + self.callback(chunk.error, '') + self.flush = lambda self: None + return + else: + return + self.head = chunk = chunk.next + self.callback(None, self.out) + + +class Stream(object): + def __init__(self): + self.head = Chunk(self) + self.events = {} + + def flush(self): + chunk = self.head + while chunk: + if chunk.flushable: + self.emit('data', chunk.data) + elif chunk.error: + self.emit('error', chunk.error) + self.flush = lambda self: None + return + else: + return + self.head = chunk = chunk.next + self.emit('end') + + def emit(self, etype, data=None): + try: + self.events[etype](data) + except KeyError: + pass + + def on(self, etype, callback): + self.events[etype] = callback + return self + + +def is_scalar(obj): + return not hasattr(obj, '__iter__') or isinstance(obj, string_types) + + +def is_empty(obj): + try: + return obj is None or obj is False or len(obj) == 0 + except TypeError: + return False + + +class Chunk(object): + """\ + A Chunk is a Dust primitive for controlling the flow of the + template. Depending upon the behaviors defined in the context, + templates may output one or more chunks during rendering. A + handler that writes to a chunk directly must return the modified + chunk. + """ + def __init__(self, root, next_chunk=None, taps=None): + self.root = root + self.next = next_chunk + self.taps = taps + self._data, self.data = [], '' + self.flushable = False + self.error = None + + def write(self, data): + "Writes data to this chunk's buffer" + if self.taps: + data = self.taps.go(data) + self._data.append(data) + return self + + def end(self, data=None): + """\ + Writes data to this chunk's buffer and marks it as flushable. This + method must be called on any chunks created via chunk.map. Do + not call this method on a handler's main chunk -- dust.render + and dust.stream take care of this for you. + """ + if data: + self.write(data) + self.data = ''.join(self._data) + self.flushable = True + self.root.flush() + return self + + def map(self, callback): + """\ + Creates a new chunk and passes it to `callback`. Use map to wrap + asynchronous functions and to partition the template for + streaming. chunk.map tells Dust to manufacture a new chunk, + reserving a slot in the output stream before continuing on to + render the rest of the template. You must (eventually) call + chunk.end() on a mapped chunk to weave its content back into + the stream. + """ + cursor = Chunk(self.root, self.next, self.taps) + branch = Chunk(self.root, cursor, self.taps) + self.next = branch + self.data = ''.join(self._data) + self.flushable = True + callback(branch) + return cursor + + def tap(self, tap): + "Convenience methods for applying filters to a stream." + if self.taps: + self.taps = self.taps.push(tap) + else: + self.taps = Tap(tap) + return self + + def untap(self): + "Convenience methods for applying filters to a stream." + self.taps = self.taps.tail + return self + + def render(self, body, context): + """\ + Renders a template block, such as a default block or an else + block. Basically equivalent to body(chunk, context). + """ + return body(self, context) + + def tap_render(self, body, context): + output = [] + + def tmp_tap(data): + if data: + output.append(data) + return '' + self.tap(tmp_tap) + try: + self.render(body, context) + finally: + self.untap() + return ''.join(output) + + def reference(self, elem, context, auto, filters=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + if callable(elem): + # this whole callable thing is a quirky thing about dust + try: + elem = elem(self, context) + except TypeError: + if getattr(context, 'is_strict', None): + raise + elem = repr(elem) + else: + if isinstance(elem, Chunk): + return elem + if is_empty(elem): + return self + else: + filtered = context.env.apply_filters(elem, auto, filters) + return self.write(filtered) + + def section(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, sections, + blocks, partials and context helpers. While it is unlikely you'll need + to modify these methods or invoke them from within handlers, the + source code may be a useful point of reference for developers.""" + if callable(elem): + try: + elem = elem(self, context, bodies, params) + except TypeError: + if getattr(context, 'is_strict', None): + raise + elem = repr(elem) + else: + if isinstance(elem, Chunk): + return elem + body = bodies.get('block') + else_body = bodies.get('else') + if params: + context = context.push(params) + if not elem and else_body and elem is not 0: + # breaks with dust.js; dust.js doesn't render else blocks + # on sections referencing empty lists. + return else_body(self, context) + + if not body or elem is None: + return self + if elem is True: + return body(self, context) + elif isinstance(elem, dict) or is_scalar(elem): + return body(self, context.push(elem)) + else: + chunk = self + length = len(elem) + head = context.stack.head + for i, el in enumerate(elem): + new_ctx = context.push(el, i, length) + new_ctx.globals.update({'$len': length, + '$idx': i, + '$idx_1': i + 1}) + chunk = body(chunk, new_ctx) + return chunk + + def exists(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, sections, + blocks, partials and context helpers. While it is unlikely you'll need + to modify these methods or invoke them from within handlers, the + source code may be a useful point of reference for developers.""" + if not is_empty(elem): + if bodies.get('block'): + return bodies['block'](self, context) + elif bodies.get('else'): + return bodies['else'](self, context) + return self + + def notexists(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + if is_empty(elem): + if bodies.get('block'): + return bodies['block'](self, context) + elif bodies.get('else'): + return bodies['else'](self, context) + return self + + def block(self, elem, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + body = bodies.get('block') + if elem: + body = elem + if body: + body(self, context) + return self + + def partial(self, elem, context, params=None): + """These methods implement Dust's default behavior for keys, sections, + blocks, partials and context helpers. While it is unlikely you'll need + to modify these methods or invoke them from within handlers, the + source code may be a useful point of reference for developers. + """ + if params: + context = context.push(params) + if callable(elem): + _env = context.env + cback = lambda name, chk: _env.load_chunk(name, chk, context).end() + return self.capture(elem, context, cback) + return context.env.load_chunk(elem, self, context) + + def helper(self, name, context, bodies, params=None): + """\ + These methods implement Dust's default behavior for keys, + sections, blocks, partials and context helpers. While it is + unlikely you'll need to modify these methods or invoke them + from within handlers, the source code may be a useful point of + reference for developers. + """ + return context.env.helpers[name](self, context, bodies, params) + + def capture(self, body, context, callback): + def map_func(chunk): + def stub_cb(err, out): + if err: + chunk.set_error(err) + else: + callback(out, chunk) + stub = Stub(stub_cb) + body(stub.head, context).end() + return self.map(map_func) + + def set_error(self, error): + "Sets an error on this chunk and immediately flushes the output." + self.error = error + self.root.flush() + return self + + +class Tap(object): + def __init__(self, head=None, tail=None): + self.head = head + self.tail = tail + + def push(self, tap): + return Tap(tap, self) + + def go(self, value): + tap = self + while tap: + value = tap.head(value) # TODO: type errors? + tap = tap.tail + return value + + def __repr__(self): + cn = self.__class__.__name__ + return '%s(%r, %r)' % (cn, self.head, self.tail) + + +def to_unicode(obj): + try: + return unicode(obj) + except UnicodeDecodeError: + return unicode(obj, encoding='utf8') + + +DEFAULT_FILTERS = { + 'h': escape_html, + 's': to_unicode, + 'j': escape_js, + 'u': escape_uri_path, + 'uc': escape_uri_component, + 'cn': comma_num, + 'pp': pp_filter, + 'ppjson': ppjson_filter} + + +######### +# Pragmas +######### + + +def esc_pragma(compiler, context, bodies, params): + old_auto = compiler.auto + if not context: + context = 'h' + if context == 's': + compiler.auto = '' + else: + compiler.auto = context + out = compiler._parts(bodies['block']) + compiler.auto = old_auto + return out + + +DEFAULT_PRAGMAS = { + 'esc': esc_pragma +} + + +########### +# Interface +########### + +def load_template_path(path, encoding='utf-8'): + """ + split off `from_path` so __init__ can use + returns a tuple of the source and adjusted absolute path + """ + abs_path = os.path.abspath(path) + if not os.path.isfile(abs_path): + raise TemplateNotFound(abs_path) + with codecs.open(abs_path, 'r', encoding) as f: + source = f.read() + return (source, abs_path) + + +class Template(object): + # no need to set defaults on __init__ + last_mtime = None + is_convertable = True + + def __init__(self, + name, + source, + source_file=None, + optimize=True, + keep_source=True, + env=None, + lazy=False, + ): + if not source and source_file: + (source, source_abs_path) = load_template_path(source_file) + self.name = name + self.source = source + self.source_file = source_file + self.time_generated = time.time() + if source_file: + self.last_mtime = os.path.getmtime(source_file) + self.optimized = optimize + if env is None: + env = default_env + self.env = env + + if lazy: # lazy is only for testing + self.render_func = None + return + (render_code, + self.render_func + ) = self._get_render_func(optimize) + if not keep_source: + self.source = None + + @classmethod + def from_path(cls, path, name=None, encoding='utf-8', **kw): + """classmethod. + Builds a template from a filepath. + args: + ``path`` + kwargs: + ``name`` default ``None``. + ``encoding`` default ``utf-8``. + """ + (source, abs_path) = load_template_path(path) + if not name: + name = path + return cls(name=name, source=source, source_file=abs_path, **kw) + + @classmethod + def from_ast(cls, ast, name=None, **kw): + """classmethod + Builds a template from an AST representation. + This is only provided as an invert to `to_ast` + args: + ``ast`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + (render_code, + render_func + ) = template._ast_to_render_func(ast) + template.render_func = render_func + template.is_convertable = False + return template + + @classmethod + def from_python_string(cls, python_string, name=None, **kw): + """classmethod + Builds a template from an python string representation. + This is only provided as an invert to `to_python_string` + args: + ``python_string`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + render_code = _python_compile(python_string) + template.render_func = _python_exec(render_code, name='render') + template.is_convertable = False + return template + + @classmethod + def from_python_code(cls, python_code, name=None, **kw): + """classmethod + Builds a template from python code object. + This is only provided as an invert to `to_python_code` + args: + ``python_code`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + template.render_func = _python_exec(python_code, name='render') + template.is_convertable = False + return template + + @classmethod + def from_python_func(cls, python_func, name=None, **kw): + """classmethod + Builds a template from an compiled python function. + This is only provided as an invert to `to_python_func` + args: + ``python_func`` + kwargs: + ``name`` default ``None``. + """ + template = cls(name=name, source='', lazy=True, **kw) + template.render_func = python_func + template.is_convertable = False + return template + + def to_ast(self, optimize=True, raw=False): + """Generates the AST for a given template. + This can be inverted with the classmethod `from_ast`. + + kwargs: + ``optimize`` default ``True``. + ``raw`` default ``False``. + + Note: this is just a public function for `_get_ast` + """ + if not self.is_convertable: + raise TemplateConversionException() + return self._get_ast(optimize=optimize, raw=raw) + + def to_python_string(self, optimize=True): + """Generates the Python string representation for a template. + This can be inverted with the classmethod `from_python_string`. + + kwargs: + ``optimize`` default ``True``. + + Note: this is just a public method for `_get_render_string` + """ + if not self.is_convertable: + raise TemplateConversionException() + python_string = self._get_render_string(optimize=optimize) + return python_string + + def to_python_code(self, optimize=True): + """Generates the Python code representation for a template. + This can be inverted with the classmethod `from_python_code`. + + kwargs: + ``optimize`` default ``True``. + + Note: this is just a public method for `_get_render_func` + """ + if not self.is_convertable: + raise TemplateConversionException() + (python_code, + python_string + ) = self._get_render_func(optimize=optimize) + return python_code + + def to_python_func(self, optimize=True): + """Makes the python render func available. + This can be inverted with the classmethod `from_python_func`. + + Note: this is just a public method for `_get_render_func` + """ + if self.render_func: + return self.render_func + if not self.is_convertable: + raise TemplateConversionException() + (render_code, render_func) = self._get_render_func(optimize=optimize) + return render_func + + def render(self, model, env=None): + env = env or self.env + rendered = [] + + def tmp_cb(err, result): + # TODO: get rid of + if err: + print('Error on template %r: %r' % (self.name, err)) + raise RenderException(err) + else: + rendered.append(result) + return result + + chunk = Stub(tmp_cb).head + self.render_chunk(chunk, Context.wrap(env, model)).end() + return rendered[0] + + def render_chunk(self, chunk, context): + if not self.render_func: + # to support laziness for testing + (render_code, + self.render_func + ) = self._get_render_func() + return self.render_func(chunk, context) + + def _get_tokens(self): + if not self.source: + return None + return tokenize(self.source) + + def _get_ast(self, optimize=False, raw=False): + if not self.source: + return None + try: + dast = ParseTree.from_source(self.source).to_dust_ast() + except ParseError as pe: + pe.source_file = self.source_file + raise + if raw: + return dast + return self.env.filter_ast(dast, optimize) + + def _get_render_string(self, optimize=True): + """ + Uses `optimize=True` by default because it makes the output easier to + read and more like dust's docs + + This was previously `_get_render_func(..., ret_str=True)` + """ + ast = self._get_ast(optimize) + if not ast: + return None + # for testing/dev purposes + return Compiler(self.env)._gen_python(ast) + + def _get_render_func(self, optimize=True, ret_str=False): + """ + Uses `optimize=True` by default because it makes the output easier to + read and more like dust's docs + + split `ret_str=True` into `_get_render_string()` + + Note that this doesn't save the render_code/render_func. + It is compiled as needed. + """ + ast = self._get_ast(optimize) + if not ast: + return (None, None) + # consolidated the original code into _ast_to_render_func as-is below + (render_code, + render_func + ) = self._ast_to_render_func(ast) + return (render_code, render_func) + + def _ast_to_render_func(self, ast): + """this was part of ``_get_render_func`` but is better implemented + as an separate function so that AST can be directly loaded. + """ + compiler = Compiler(self.env) + (python_code, + python_func + ) = compiler.compile(ast) + return (python_code, python_func) + + def __repr__(self): + cn = self.__class__.__name__ + name, source_file = self.name, self.source_file + if not source_file: + return '<%s name=%r>' % (cn, name) + return '<%s name=%r source_file=%r>' % (cn, name, source_file) + + +class AshesException(Exception): + pass + + +class TemplateNotFound(AshesException): + def __init__(self, name): + self.name = name + super(TemplateNotFound, self).__init__('could not find template: %r' + % name) + + +class RenderException(AshesException): + pass + + +class ParseError(AshesException): + token = None + source_file = None + + def __init__(self, message, line_no=None, token=None): + self.message = message + self.token = token + self._line_no = line_no + + super(ParseError, self).__init__(self.__str__()) + + @property + def line_no(self): + if self._line_no: + return self._line_no + if getattr(self.token, 'start_line', None) is not None: + return self.token.start_line + return None + + @line_no.setter + def set_line_no(self, val): + self._line_no = val + + def __str__(self): + msg = self.message + infos = [] + if self.source_file: + infos.append('in %s' % self.source_file) + if self.line_no is not None: + infos.append('line %s' % self.line_no) + if infos: + msg += ' (%s)' % ' - '.join(infos) + return msg + + +class TemplateConversionException(AshesException): + def __init__(self): + super(TemplateConversionException, self).__init__('only templates from source ' + 'are convertable') + + +class BaseAshesEnv(object): + template_type = Template + autoescape_filter = 'h' + + def __init__(self, + loaders=None, + helpers=None, + filters=None, + special_chars=None, + optimizers=None, + pragmas=None, + auto_reload=True): + self.templates = {} + self.loaders = list(loaders or []) + self.filters = dict(DEFAULT_FILTERS) + if filters: + self.filters.update(filters) + self.helpers = dict(DEFAULT_HELPERS) + if helpers: + self.helpers.update(helpers) + self.special_chars = dict(DEFAULT_SPECIAL_CHARS) + if special_chars: + self.special_chars.update(special_chars) + self.optimizers = dict(DEFAULT_OPTIMIZERS) + if optimizers: + self.optimizers.update(optimizers) + self.pragmas = dict(DEFAULT_PRAGMAS) + if pragmas: + self.pragmas.update(pragmas) + self.auto_reload = auto_reload + + def log(self, level, name, message): + return # print(level, '-', name, '-', message) + + def render(self, name, model): + tmpl = self.load(name) + return tmpl.render(model, self) + + def load(self, name): + """Loads a template. + + args: + ``name`` template name + """ + try: + template = self.templates[name] + except KeyError: + template = self._load_template(name) + self.register(template) + if self.auto_reload: + if not getattr(template, 'source_file', None): + return template + mtime = os.path.getmtime(template.source_file) + if mtime > template.last_mtime: + template = self._load_template(name) + self.register(template) + return self.templates[name] + + def _load_template(self, name): + for loader in self.loaders: + try: + source = loader.load(name, env=self) + except TemplateNotFound: + continue + else: + return source + raise TemplateNotFound(name) + + def load_all(self, do_register=True, **kw): + """Loads all templates. + + args: + ``do_register`` default ``True` + """ + all_tmpls = [] + for loader in reversed(self.loaders): + # reversed so the first loader to have a template + # will take precendence on registration + if callable(getattr(loader, 'load_all', None)): + tmpls = loader.load_all(self, **kw) + all_tmpls.extend(tmpls) + if do_register: + for t in tmpls: + self.register(t) + return all_tmpls + + def register(self, template, name=None): + if name is None: + name = template.name + self.templates[name] = template + return + + def register_path(self, path, name=None, **kw): + """\ + Reads in, compiles, and registers a single template from a specific + path to a file containing the dust source code. + """ + kw['env'] = self + ret = self.template_type.from_path(path=path, name=name, **kw) + self.register(ret) + return ret + + def register_source(self, name, source, **kw): + """\ + Compiles and registers a single template from source code + string. Assumes caller already decoded the source string. + """ + kw['env'] = self + ret = self.template_type(name=name, source=source, **kw) + self.register(ret) + return ret + + def filter_ast(self, ast, optimize=True): + if optimize: + optimizers = self.optimizers + else: + optimizers = UNOPT_OPTIMIZERS + optimizer = Optimizer(optimizers, self.special_chars) + ret = optimizer.optimize(ast) + return ret + + def apply_filters(self, string, auto, filters): + filters = filters or [] + if not filters: + if auto: + filters = ['s', auto] + else: + filters = ['s'] + elif filters[-1] != 's': + if auto and auto not in filters: + filters += ['s', auto] + else: + filters += ['s'] + for f in filters: + filt_fn = self.filters.get(f) + if filt_fn: + string = filt_fn(string) + return string + + def load_chunk(self, name, chunk, context): + try: + tmpl = self.load(name) + except TemplateNotFound as tnf: + context.env.log('error', 'load_chunk', + 'TemplateNotFound error: %r' % tnf.name) + return chunk.set_error(tnf) + return tmpl.render_chunk(chunk, context) + + def __iter__(self): + return self.templates.itervalues() + + +class AshesEnv(BaseAshesEnv): + """ + A slightly more accessible Ashes environment, with more + user-friendly options exposed. + """ + def __init__(self, paths=None, keep_whitespace=True, *a, **kw): + if isinstance(paths, string_types): + paths = [paths] + self.paths = list(paths or []) + self.keep_whitespace = keep_whitespace + self.is_strict = kw.pop('is_strict', False) + exts = list(kw.pop('exts', DEFAULT_EXTENSIONS)) + + super(AshesEnv, self).__init__(*a, **kw) + + for path in self.paths: + tpl = TemplatePathLoader(path, exts) + self.loaders.append(tpl) + + def filter_ast(self, ast, optimize=None): + optimize = not self.keep_whitespace # preferences override + return super(AshesEnv, self).filter_ast(ast, optimize) + + +def iter_find_files(directory, patterns, ignored=None): + """\ + Finds files under a `directory`, matching `patterns` using "glob" + syntax (e.g., "*.txt"). It's also possible to ignore patterns with + the `ignored` argument, which uses the same format as `patterns. + + (from osutils.py in the boltons package) + """ + if isinstance(patterns, string_types): + patterns = [patterns] + pats_re = re.compile('|'.join([fnmatch.translate(p) for p in patterns])) + + if not ignored: + ignored = [] + elif isinstance(ignored, string_types): + ignored = [ignored] + ign_re = re.compile('|'.join([fnmatch.translate(p) for p in ignored])) + for root, dirs, files in os.walk(directory): + for basename in files: + if pats_re.match(basename): + if ignored and ign_re.match(basename): + continue + filename = os.path.join(root, basename) + yield filename + return + + +def walk_ext_matches(path, exts=None, ignored=None): + if exts is None: + exts = DEFAULT_EXTENSIONS + if ignored is None: + ignored = DEFAULT_IGNORED_PATTERNS + patterns = list(['*.' + e.lstrip('*.') for e in exts]) + + return sorted(iter_find_files(directory=path, + patterns=patterns, + ignored=ignored)) + + +class TemplatePathLoader(object): + def __init__(self, root_path, exts=None, encoding='utf-8'): + self.root_path = os.path.normpath(root_path) + self.encoding = encoding + self.exts = exts or list(DEFAULT_EXTENSIONS) + + def load(self, path, env=None): + env = env or default_env + norm_path = os.path.normpath(path) + if path.startswith('../'): + raise ValueError('no traversal above loader root path: %r' % path) + if not path.startswith(self.root_path): + norm_path = os.path.join(self.root_path, norm_path) + abs_path = os.path.abspath(norm_path) + template_name = os.path.relpath(abs_path, self.root_path) + template_type = env.template_type + return template_type.from_path(name=template_name, + path=abs_path, + encoding=self.encoding, + env=env) + + def load_all(self, env, exts=None, **kw): + ret = [] + exts = exts or self.exts + tmpl_paths = walk_ext_matches(self.root_path, exts) + for tmpl_path in tmpl_paths: + ret.append(self.load(tmpl_path, env)) + return ret + + +class FlatteningPathLoader(TemplatePathLoader): + """ + I've seen this mode of using dust templates in a couple places, + but really it's lazy and too ambiguous. It increases the chances + of silent conflicts and makes it hard to tell which templates refer + to which just by looking at the template code. + """ + def __init__(self, *a, **kw): + self.keep_ext = kw.pop('keep_ext', True) + super(FlatteningPathLoader, self).__init__(*a, **kw) + + def load(self, *a, **kw): + tmpl = super(FlatteningPathLoader, self).load(*a, **kw) + name = os.path.basename(tmpl.name) + if not self.keep_ext: + name, ext = os.path.splitext(name) + tmpl.name = name + return tmpl + +try: + import bottle +except ImportError: + pass +else: + class AshesBottleTemplate(bottle.BaseTemplate): + extensions = list(bottle.BaseTemplate.extensions) + extensions.extend(['ash', 'ashes', 'dust']) + + def prepare(self, **options): + if not self.source: + self.source = self._load_source(self.name) + if self.source is None: + raise TemplateNotFound(self.name) + + options['name'] = self.name + options['source'] = self.source + options['source_file'] = self.filename + for key in ('optimize', 'keep_source', 'env'): + if key in self.settings: + options.setdefault(key, self.settings[key]) + env = self.settings.get('env', default_env) + # I truly despise 2.6.4's unicode kwarg bug + options = dict([(str(k), v) for k, v in options.iteritems()]) + self.tpl = env.register_source(**options) + + def _load_source(self, name): + fname = self.search(name, self.lookup) + if not fname: + return + with codecs.open(fname, "rb", self.encoding) as f: + return f.read() + + def render(self, *a, **kw): + for dictarg in a: + kw.update(dictarg) + context = self.defaults.copy() + context.update(kw) + return self.tpl.render(context) + + from functools import partial as _fp + ashes_bottle_template = _fp(bottle.template, + template_adapter=AshesBottleTemplate) + ashes_bottle_view = _fp(bottle.view, + template_adapter=AshesBottleTemplate) + del bottle + del _fp + + +ashes = default_env = AshesEnv() + + +def _main(): + # TODO: accidentally unclosed tags may consume + # trailing buffers without warning + try: + tmpl = ('{@eq key=hello value="True" type="boolean"}' + '{hello}, world' + '{:else}' + 'oh well, world' + '{/eq}' + ', {@size key=hello/} characters') + ashes.register_source('hi', tmpl) + print(ashes.render('hi', {'hello': 'ayy'})) + except Exception as e: + import pdb;pdb.post_mortem() + raise + + ae = AshesEnv(filters={'cn': comma_num}) + ae.register_source('cn_tmpl', 'comma_numd: {thing|cn}') + # print(ae.render('cn_tmpl', {'thing': 21000})) + ae.register_source('tmpl', '{`{ok}thing`}') + print(ae.render('tmpl', {'thing': 21000})) + + ae.register_source('tmpl2', '{test|s}') + out = ae.render('tmpl2', {'test': [''] * 10}) + print(out) + + ae.register_source('tmpl3', '{@iterate sort="desc" sort_key=1 key=lol}' + '{$idx} - {$0}: {$1}{~n}{/iterate}') + out = ae.render('tmpl3', {'lol': {'uno': 1, 'dos': 2}}) + print(out) + out = ae.render('tmpl3', {'lol': [(1, 2, 3), (4, 5, 6)]}) + print(out) + + print(escape_uri_path("https://en.wikipedia.org/wiki/Asia's_Next_Top_Model_(cycle_3)")) + print(escape_uri_component("https://en.wikipedia.org/wiki/Asia's_Next_Top_Model_(cycle_3)")) + print('') + ae.register_source('tmpl4', '{#iterable}{$idx_1}/{$len}: {.}{@sep}, {/sep}{/iterable}') + out = ae.render('tmpl4', {'iterable': range(100, 108)}) + print(out) + + tmpl = '''\ + {#.} + row{~n} + {#.} + {.}{~n} + {/.} + {/.}''' + ashes.keep_whitespace = False + ashes.autoescape_filter = '' + ashes.register_source('nested_lists', tmpl) + print(ashes.render('nested_lists', [[1, 2], [3, 4]])) + + +class CLIError(ValueError): + pass + + +def _simple_render(template_path, template_literal, env_path_list, + model_path, model_literal, + trim_whitespace, filter, no_filter, + output_path, output_encoding, verbose): + # TODO: default value (placeholder for missing values) + env = AshesEnv(env_path_list) + env.keep_whitespace = not trim_whitespace + if filter in env.filters: + env.autoescape_filter = filter + else: + raise CLIError('unexpected filter %r, expected one of %r' + % (filter, env.filters)) + if no_filter: + env.autoescape_filter = '' + + if template_literal: + tmpl_obj = env.register_source('_literal_template', template_literal) + else: + if not template_path: + raise CLIError('expected template or template literal') + try: + tmpl_obj = env.load(template_path) + except (KeyError, TemplateNotFound): + tmpl_obj = env.register_path(template_path) + + if model_literal: + model = json.loads(model_literal) + elif not model_path: + raise CLIError('expected model or model literal') + elif model_path == '-': + model = json.load(sys.stdin) + else: + with open(model_path) as f: + model = json.load(f) + + output_text = tmpl_obj.render(model) + output_bytes = output_text.encode(output_encoding) + if output_path == '-': + print(output_bytes) + else: + with open(output_path, 'w') as f: + f.write(output_bytes) + return + + +def main(): + # using optparse for backwards compat with 2.6 (and earlier, maybe) + from optparse import OptionParser + + prs = OptionParser(description="render a template using a JSON input", + version='ashes %s' % (__version__,)) + ao = prs.add_option + ao('--env-path', + help="paths to search for templates, separate paths with :") + ao('--filter', default='h', + help="autoescape values with this filter, defaults to 'h' for HTML") + ao('--no-filter', action="store_true", + help="disables default HTML-escaping filter, overrides --filter") + ao('--trim-whitespace', action="store_true", + help="removes whitespace on template load") + ao('-m', '--model', dest='model_path', + help="path to the JSON model file, default - for stdin") + ao('-M', '--model-literal', + help="the literal string of the JSON model, overrides model") + ao('-o', '--output', dest='output_path', default='-', + help="path to the output file, default - for stdout") + ao('--output-encoding', default='utf-8', + help="encoding for the output, default utf-8") + ao('-t', '--template', dest='template_path', + help="path of template to render, absolute or relative to env-path") + ao('-T', '--template-literal', + help="the literal string of the template, overrides template") + ao('--verbose', help="emit extra output on stderr") + + opts, _ = prs.parse_args() + kwargs = dict(opts.__dict__) + + kwargs['env_path_list'] = (kwargs.pop('env_path') or '').split(':') + try: + _simple_render(**kwargs) + except CLIError as clie: + err_msg = '%s; use --help option for more info.' % (clie.args[0],) + prs.error(err_msg) + return + + +if __name__ == '__main__': + main() diff --git a/src/lib/static_ashes.pyc b/src/lib/static_ashes.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f6f7567af1e3e906a3389aaf95ddad04bb065d GIT binary patch literal 99433 zcmeFa36xyddEfb}yU{>5u@N9aK-`KTAp!*0;7*YO7Z3nRNMr+g4G<&eRfuwco zNRm{O3%So9ZZf6`Wfk=M{CiDuogUwuKc?a+tL(CFNva!?*8%nBqZ2y=xGkwt=DU*Y6 zYw^JyNp)LNyTeu9QRHqoPOBp2b1c1b3pFW zn+D{2lIkOQ?xRKSLrL|qJooV;_u-@tHt$ZVPb3di_axQ5{On7rPxA9rQhl1A{YmwG z$pc5xCDr3e^@B-uGO11_)rtpiI;qYi zCsq|DRErXRV(>`qQQZmk=-6Y)>Bp1$fGTmLs%|L{OSR?6X*yUl#GXj1Cvqs9ETBNg zPbEk9Ce^yKj_ylpPbSsV{s1VPapu!Wb=C<0GM5+8D2jMrQk~Cp7mD2XC$;x$P>w#6 zRL}bBXOr4^QX5T9A5hD)^g*Sc;HKJi>gWgbq}p=lQm5NaPb4)8;3?4U_*HfPa8i3N zsh)Ft>HbTu2-u!?0tjDl0yuxj384M56TtkH=%+<7gpyt zXOmq=8doPnwPtc=G->{)Wu;Q7QA(b#e##AKCDm_oqMg*2CpA#+(=-9J`(}PXy>HbmtpP_%$%&FLMX({3t(Q&gc07OMZ+WaO4a8K$t&H$GQ5!rzvAw{mQ??0Uj0uO)xVxp|5{S}s5$M=6uBQus((GH zeayK(TjYK`ss3D^`}0Na8%gyS^4z~s?o17mM6aCe^={x8awH+)pLdUruVD za&5m-~|(}-<2HwZqNy2;l=Ms zs$WWK-vNe|j)EwW`I(-~&n7RvnY>8;Hwn##j(%Tq^m9q|H&pV`?@y|~nbdy3b^ZOK zt{+UQ|GiqJuBpKZ^zi^>h zKY6NM-a50be9v8X-@BvyY~|G4e5YD|y3#sTo2!qGrnPFl)o#|OJMH>>qg-iJ%bixO zTyKf5&=1;U=5^|_5piQ~8P@56n z6h+j9Q%&JbBQU7d0y${qp{MsxlqU{6dFaUQbYGeD!Suj$`}gkKTYln&@}Z~qmG>Mt z_(Hn>sizN>pFZ%+-hJsrdH48Ua>oy)`=2;`=s-F#x_S2mUvBn;-QzEm_dS0w-8V5& zK9H98KYQ?*{S-#&>F)7E`}a-kDDNNN^UUGB`^TT!QKo?M_<=*C&+LD8{~=yGbYO=o zz4Wv4fhWt)?o0PPO)0yd*#FG_Loc|#C-)y3S6xpYNJn><5AIG6?ca0wncZpm;NkS( zfr))(b!+ebi9OHk-v8{ry<^l)jpcpM?HfN-o_KoqGtVsU<>-MU`7(|vmmsW;u!9so!4p4l-vad6+B{Um_JK0vTLePKsd=ES}a9Ofk+ zmiO*{cK1{JCdymC0z?>+J%`hM&#K9QVdC%;6NmO6I(%qf`Kbd3_8QI;`_kw3@7Xu; z-tsdC1oPpL>j_{eVR1VN}*h)AnOo$Nta;LAXk_di+Q zz4tlF*qgu1&`<2o(1edB4)1w71NYcy{lD|y))KtDy;cmpQ5WNzoUKFP(D!(qOzC7i z!Y)qkzJKiQvAe6abDcHZEn)JVP2|l5<8Ph{#{c-qxk`O@Y-WD0Q|9TZc6*`q@Xnnl zq0P>;vUg@5?9}_Mj%s>hVs8hzl8jYhzmYoCo}29qDepW9y)<^~9i4yA@79;MK6Y@| z%d_Y~cYbL1ok#Dw^P$OOw;dZhzWtTWFMe?I@$H+pQQk3^C~mdn z-FS6d6m_CL3)EUv`3i*{-@fe**RW7&R_0ods+iXHV=rw#_Q=+)F8hwhHg9`n>x;J? zdr9?e+qR7+(JnW>HhZGCPsc89YYQYNW-BLKosFcj_DdAUp+>8Y zjdcdN-?qJTowAR;e9PGO$8MptS4dBho+3S?PXto|c#nJ_-$&vV;Z2lNgqZu7Jfrw8>uw-?DrVL0Ft29ujFCW&+g=jy$o?7gh4v4oy= z%R-wQ(w8R(T+`a5J)Ge7AZG(*zgkLODWQA})4*|ebhqyyr`Fb;-kL<-3)INCnN#giQmuBS*=|Xk_(5v{ z>Y!DwM{nz$=PI+E+P-FUzPVK`aIp*X%`1WSDb=v$e(y8c}=a5kB8}usMI4KI~k$`+>2$?%i2$E}VGiuAQys%+9ef|IF5> zchW#KkV#X}*usUjX!B&PJ?ZYuHz(=z5wcnvxFn^OrFEq>{6Ab;Rk~iOfwiSeQQ$6K z)o9sDVa!Q0=3E&CB|Ov{%Q4v8VR}uNkBo0smkpm;DSQiU0fb(40n@IXZ=2oB)+$xV zcGB-H)T#z$p7~O%|B#Z`&*M3n)p|l18e75XT_q3rq4_h6iSqQHucl1{kWGlEI}XA)D*fDM37LL5l}JQbg600WFeq*^e8$l|8In6rWk zCMWBSdV6wmPF1VTNuY<48cH|+5fZQjVTd`wYeo$EzcFxyTwq{_<#5t`NL}UOgJK@; zcSgddS<4_3^Qf<@-1S8nsyAJ&i$D`pDKea`H)<`vo?fE|B3!7a(e6L$s5K%oHDS_q zE|Aj_1e3u{rS(u(K~)9Vp5&omX0T~~a=&MOPPWopunG;(5>^3Y5UCqQln~ra zSyID5g1{1_;7Q3lJss~zM4>XNB7YoA!5l+)&;>!Cdf&c^TH5oI(#w!G?T>JqafrYm z4O1$~o0hqB+tb$}xtXfIkw3R&xMUPhD=+nTC`8fC%1d9DVK8t%miIU*eH~$c3$6HQ z-V$Ih1bL!!;zX_aX9K@5TwKeP@GnUO8MIbO`JCV{9-MtzB{jJg9}VUo@r-{*5KL%9Jm-D>T_imWS+Agn z__RUkq)lr6Cm9HKp|^uOn7z zhq>QS+B7hX+P3cNNfT<|C)H9h?cq{z?XXWhl3|!SJ2O5z!h_tp^n=Wl^!s5yc0^jz z1T(YHkUvy8IYYMpQ*G8upCge88>XZ|Vy1u5NJB>*N@`5&Sxp5GnWWaOl~vs`qo?=47=#(>BtzP_~rQ>S`;Y4XWp7Ug}DlK1n|}>?f!z zvkx|acry~d`cp^DwC*5r;p(n#3|J?QG}a|USb~~x+LGY$7c+al2MhwG)rc7DNo4Gq zbEf6UXiM4LP2c4{&&*8KYTE64$2}Ns2n?as{%I->HFsbT1uau^)f}mw2Es!mF1#ya z&6onOG5jE+c^}Z&2sF%qF&YJv`ven#BVeNO%#3>*`Zk_3QVC>{S((a$O7G=j>^O8` zApm1`H9p^{g>1euiqR0wkW&2%{1&iz7e>I0hE}92(lKoQDv%191z;qHOe4UC(2Un` zuRtc~|7Rue0Fov-C7Sp*Wa+kDX-#r&mlV0&OUsky!ThCz$;IVKpK#$d#i4Cgs8 zhEeA%&vS6kT;-e<$tx?8=I^SGSC;oSWJPj@I({~<<7Zt5QuZ+kwY|q5OfHUC6w;Fs z@txPG77C@X-;;onP=zCm$YqfEbk6=>!avRYyQa4)VI*nZR8k3u&!r{YR62W8>BuGU zKqa89Q>6@OQ>BZeibi;4l!k6rLqRS^gVsoRM~j!M#SBI1>_dTIIqUwWZZrA6eCT=0Y^*(XO1SmB}Z2soEUmEh{dS ze>F^@<)kn^#SV@Axb7!+A3BsySS;`)eUILiTru^)JN0fV(%LF?45_bDmEUEu%}Tvh zo2;FmNw3$pej)D`3|_A*ujsh%ijMoP=(s7-O;nAQb2x%C2F`7CvtO)wCl&k(f8j#P z^n)#>tpn>y>-ZZkZG~@bEZrE_fi3)&OO|ZnVFfdXZ)J{8VMor#$lZHH_}I)m{ia#^Jd?JQc5?=sRV_2KtyIj+ zV|~WgbEyTqp3YC7zB1BA(|OUfe@3+}(O-gQcSGqRT$UrHZX-;ZrirjSpWwxGk1lc# zSleZ;5Z$7EA-zx85=|z9R5)UT#l8_{g-^{_(+`l@Q#yA^zi?E_&gWIW_=B|9;enB5 zYnHEBzH*awfwYDhh2yc-tL2FVt>ww*Q48-QaeHuBLJ=4H9fb&sv`jILOtG}yq8Bu( z7E7l{pu|Zsg^6NU6wZgt#KJau-cQl*To2farg$^>fBq(+{odp2ExdSc5ytb<*++?X z=!s^nSIlrw^$}HNBNfyd+7V>f`h<7i&*= z+-38q@Dtrp?bm>ospjAC=K)PHPDc2D1FYp5*bVu;gVD{%Akg)51tLGr2kBm2%3QjQ zcdB1KI7C&y&tK-<5=xp=OfUt(6!~QlQP^9{-QXZ%=Gby)Gh1b;gF^Iqg?jWnRsBcx zNRkf+g`A6f!WirAjh>pUdU`{gk9#8$>Fv#;Vbq6j`A}c>@TSMPy+Yl7fvWy9f4$wF zP#i>M_Ojj9>|i#C!A)ZRVo;+gaqt#~tJ#Q9TVOC7`3l}k$6_!Arf*EpS5QeeA^kTh zHc)VzdF;eQF15(r=_NyVB?*z}^W^_k%LIIr;R+H@Fn@UFvg7wwX!{nud%K>0{gC!W zy7(vRw3?UE>OrPnmmsYgXj(0k$dEQ19XKvx_RU@kBJ;ssLEtU0cd1Lo*sQu$gFPD( z(FjEZ2yzjlK-1JEG_<6IN+nTg7bA&DyeM6-h6U4|e48HA#hQpj7&bJ@b-f|CT8J}= zt5#>atr^Cic`R#OT~^UAcCUslgA|Qr38KmnkceHzpXvE8k-#>U9+1yTNFhKQ7_-I1 zWUz0M_WW~jpr1)eE5ZEju>hVu)Z*-BF(iDwknx0k7pvD2+fN^|_#Gxf%q zT6KS;y!G7OV|Q(f&_30b|F^rU>23wsO@(7;if}`xh2*WN@`+Bvl9n(+48m53-PTx9 zU5FauP?$mQbq;FGC{d+sm58(An|lSfcmGwwJ9+)BOxnz zi!@Bkvm~IS*0o}4;f*7p2Ko|lq0ZYq+uY+@EQ+36C=LVTmWvMMQ4!C{sKk>4L5ln^ zM#;r51FjH~EM6g^6e5gwgk-h31t=?q90G&gd47L$PXGXK1#ZS$=G^HtnW?yuF&dP0 z98RiHbdHOvf2bCo4fWRO9WljDX1tXrp?vj=nI2aK8Ud+53D~u!bW>?9zpI#z@K{K! z6k{>Z4IpbinvKQlzTFEv`A06$2k06V3o~BLo@IeitS@3_%53@kEh=g6@lCeXJO>4X zhWEZ^*L~^icyh$UA>0^uQ*yLu$$zg2TM9USjd(nm!MjL2#NBhHvkY=qt{J>yFLi?L z2s*+x^+;Hga-`GPT{gD*KOfYRP?0Tj@_g66k z>hEgHG~h+}){>rzbYga0U!{;pl4`{I&Zrjm>;9g833JRVm9R$#U!Hd%4F$p?r(g zOxog&OSdaeysc4tY4S{ZAirHuJX|`iTjAlP$B{J%vmW*KT>pb|(z~hm5sjoGI>gLY z^8ZGJ=0<)Ad%=f#b7?fjRXq@A@wm1~#JHY65nSnm-Qi^+jMxi|3BEOiYv@*VKjNL3 z0vgv85G+J`wV%f;TA2kqVu&LPgpZR5!`N4eVhrwY{UtGPy8L9CFA)sr7 z*hl?8ByaLpR9?>?!516Rb3Ad9PjU`QrT9ULfDL@Y`UD&`Mh*-OTk;c0@^St-Y8a~i zm;-Xo*Tru49F8vf`c0{ZnssM3yMe_WR@c1<7>$>+I|JLN9CrLrg(559{ z2%2yU2KbJTpe(Ta44U4KlA3yzzK+bisZzk9k7b#OjPsvzAPh7x_2kIe--k`EGMijL zw;sX!usV6ew7|UO-1_6tRMaBm;yTf$?5d z@$xPHx#c3O%i$z{!b+ORpYqSt#k#)ODpv0Dvd~SI41{7JX_e}oR3VJ%ExJgPFfBH; z42Q)9s`W%&3mBQh(ieuEH36(4tKaEI^yRcJuX6F{23BJ}=6b=tBv_Bs5SlUBnCF^8 zccDqn;B`<%3q*!Zs@PBJi%;n?q{|6iE^sl-t@@-NRi4z$J`F*NR7=hZ@ViKaL51Ei zybAqe7}M&y;;MVae%%afdKalM=hu|3f^`k^o&l0gVa%v3{U>y^7g4^6TFhU-2>3^! z6s~_bOa$|j6V7m$!C>+_Zw||^sG)FhhY}ipt3TrYP%dRiu3m4(;gVo!(mx>(#4qC6 zlp;Py+SDb&B>{n%TPkL1AX)f;qc6yJyYe_-X(YRv^YiUTd(_YS$@J`|C*gy_VCEJv z({dcQ;^k}f-kZ97n=Zv?F|=g$Z9wI2{G0|<4gp1YOOeC_V=oM`RDxpuXJ|^Q!eW33 z3`L3}x&(TK`4CJf@=J}KSyt-0vA+CpW347%1k07o`EUte5kM?q`NSd@fnH;sC{tUi~psnhNM zD9M<#rpsdAucb(^BR3%x{`4Jn+{B+3&WuqVTxVbnBqZJ=@2`Ff@2~mLhSIwR!UsH9 z8V{egY~Yxbmvj!-t_U;zZe7Y;y1j6!UlQ?Ksp|XGl@%m#Fs>R|F*1~X2R9lP|0n)w zv8JXn2(da%qKuGU&_AEo%|w|c-s`-Ea9h?DZo(5UA+_Kr81o?6CbFF*z`xceF&qjf zokj9VmksWVW7w34NbB<3&h>m5(x}|1d1a=;CcPw6hF5SGbuR*S_LJ&ix?)CMSoaIe zl2iXQ2eTB9^;kUx5CmZZK^UJg1w&wo(yRAUwLjbpzSSodGz!q_e@BF*zo(9<@+6q0 z#;}kY&vC;j!6wh|&-$|W_@m;Jnys_40*^tbe=S(gk+W;zZG+}*&&p#{yra(Z@Ol2i z*G#p^cub=gKnQ}lmnS_^WIMO^bJhC~knf_deBV*P(IuRz5-6jM~B?ZqPm0rdqgwA44R~@+!lT5JN!Iy|4|SNhn(gMB{PIKgdr|h+q&XGj3c#V z5TZp#%A~r@bgEBdZ9(w6YO53?T(-lkP8=CoZjc?+Z0@0Eja{p<|HKn6l@yW%){>N9 zE3Vx7VT6$py0s0YTbn@|+e=xaLi0fba0nHk+I0lbxG;rfyi8gS^l5m1a9;T?I{>;~JC6OV$`8ukkh`|;c*e_hCZ@Dcd0N6Kd1fJq9RX;rVJ$)0G>wn+M z)bc0ln>c&|h7!`Dey!lI(2#$J7ZBTYB@};AH$j|d)vLY7>+-(Ak3$JY8^+Gd#5 zVrjy*XgA3M^8|2-cQLM7ugbxN3Z3PieYJEJ1)(S^9K_3Hjt9$tU7W!W_tRz?6tN=N zfX@tcMVZDBcMD3H|Ch(Ow6cmvlQZz2|0x-L)bB$h} zy`*XHFKMa4+cyms%fdsww1$GE@8_)nK6X5+uAV%DQF%ueK>{1(izPZ=UDBNm0!7h3 zCfAE8d$C`(78^-6fs?sAPy4ypr`Orcpl1$cySJxn1hKG}5{m;Bgo1BC8kaTt&JNzK z&qbv4EZZ>ETG}>)S6X_9t)Q}X3M!9<3eC=Kd|)3#*;HI+qoo%6Zz)8klE(yLR>9y&%>mrNa9Mm4ap5<=gCwxJVCA(wA95JSmJR;7*2yBL|Ffs=vE$w&VquVm zwWP%-wo4R6u#)9gBWR;6Vqi+>TBv&vhu5`^Fo>LWrE3PR<>__2Wy%*q(Xkm#V1@kh)#Ht(?OE&a>fhteX@86xaE(sjM|Q@AjF=-oxg}wy}OSK8jq6cTm%dA9!CMFVEGm{FY>*7G4^a(pO zYOR^dLT!(bR4rwKW`yg3g?4?8*reu+^7Nm;X`M@kS1qh_Pl#6Efzuf|@H(F?&n8EI`(~$oH?Ps-NW2_rbJp=V=Y2{9&;P$p{=LGyjLk6I9Ji?;rte&=fai ztkl5N60s zdvx?G(^}b#8F$9gx}?GCj3Fzsl55RwXCWHfDQ&{A&Q=5VuyCt;F=J{f5eSS>>6Ph` znC=pJbbLwA#L)}Nc|SK9<%DP9JoD7<;LUrFv#c4tIXVpi!KQ2pG&kDYkQ)=Fvl|os z9f5mg@US`=kZ}KhMnJfgOV6yAz!6bb!rz85J(gdw7X~?jtNlXLLaI#(IKg06q0wn4 zwrc`oMQQpo0{jSxMPLGbWvG3k3#^NVj*U}WOP;4#mb4yA&fes>URbs^M!{o;ee;Ix zhzRo`C=Q!qK(_lP>@^|RgM+)yepbet1|Zftgn36>iJ7DX>j*!(gK1D2YlYNodWV(Ft0*i@R9F#?WL@NWG%0xrO>{ZE9P#D{`fYjqL)oNF=PChs#@8VBe`XYt?OK?Tv`Jj$zHt zjw;nVQ?o{eeUh`ns|KuOIrX99p(*&ds9f2&T^2%u!_CC z=POkWuSXysRmGrd+^c1e?~)cz#n3DsWqrSBj4xZ@<;&Yv;EJ@tMWtxbk}`!WO6QA- zq6uf%6uTGIPR`CxS7wR!IY%MTRHZZ9E^FVEZ4div9@Lxj4Q)0T*m50kt(~vUME6<4 z)GcAWhtrEh?JPL>!o89G%jDQiV->&qRsT^)LN*@|HT^TfpVee0SE1CS*bkSk8dwo? zKRoK1OJY932c&@n2QXE1KE#uNM>|%F6;Kxa<{Ou1_?nY!M#K55&}5Kd#u_e%BU4iu z>c_WJQ?4|$ykDUWCP`aZi{48G?EV8*GGkG_S`H>G{%Jv2j4@fUvOA|%=rn3sFI;6Q z*@l8z;i;IouayR5PD!tm!qj<#%U2I?Tp;Cw2hKvqFuI*Hm6=ogzH{gsiOncO{-nQ3 z<1*aWn54g^i-c+*{ugzt0SQerTie(F(UG6;lF`YL@0T<}8W4@p#@H@#Pq5d^I4btS z05eC0XNK4cJUHb8F=~C_g?~>VfJi9V+#J4$9&Hr8SO3 zz`>8_`f8{7W}V#^FyZyNL*tsvp;5io2};`F1SeI+QBf4S(K)!^eN@!ZH998hby`Wu zJ|>F8us9-0Ih=vzBcgN+)=kdgBrG2frNgkw&fz?)Tb#H~$3*F?jXEq!iA_4XNQrmp z>>(wH?0dHpH|Y2%<=mKHLMVZQ5ld6x7I$D-nW(NT8T;O749_J8)hs!zGGd6%| z0k3Y?i4&IKbG1r?t${4*n>DeP9sbxSCul#{GAme5>xc@Ww>WzS)~IU( zkyt-!zgxDX)IPdfjmow2M0JIa@-aK@*pH|u?}OgHC|j$Ud+?+>?aBz@Bnw?K_n)xp zRBe{sRx(eZG8UQ@_J9wkVBtRyv$f83n1nb$uU{(PBN#0VzZBwI3Ggctpb<=i99u0) zFhzs*jm5&$3%8(=?0o41;~g&g5Q-e(5)LaV!I{95__MI$H~k*Hp$P+f?>w)el{lUQ zAOe@wteUN;blS{@$lo^^wxd-d<@TugU*g5S)49D>|5W5Gag}?kG)1ldd-#-fB#p%o ze+K)5Bm^`AUm`A?R=L3)L2!bz^d;{{gca`P8JQ=`tk5vFtDX>pMkZ*}f)mCH&TFX3 zAZEkZ&;n#HTa_U}WUy(D>SWBD zE(V@p8L<(-$;kK`uQ5FdR(hJ0`B8F!v?fcNli347*!Y|+XTsx(Ak-9U=&8(xvhW24 zNf|LG$_OO`nq$myokX@5v~k^DbaRQcNn`CmTx953EEHT#qWaiLLNs(;nM~B9?++PH__i!!G(61G3^W z!GQ6A^${ROyb)1Bdyn7bm2Y;mYXD1}j5w5B$IxDCR$q~j4tc}v)9eu1AOzt<&I3Gc+7_}!?YtNqb`QKSzIpQ zzh5bhYu67QR4NvIqT!KSsMOhYll$OleKPpc|CtK@tA-|gV`5@M{M{FNq)@0J#5r!- z0q>r;j3Qb|5(O|{gC;58qKPxYpZ6Yrw?_rL9>O!+_1Hc}4#3>N=r9z!&OVSFVU!C> z%;RdQR$OG?wvh%==6TlPn4J z?%^!$cfBqFk;r2)4-mDAXfQ@XZsEvneOjzP*ao28^xA;VK0!Y4m^u(78A{gGaFhR} zgKImDR%d~eN92$Ymo01g;KSw3Tbi34u4Ypt-&n*qli8g~U7xg4%}a%@Bove%u`)ed zOQkg|ai_;es>7*Pk(#+Og8O%9(p4Hj$(n=(KWlK^uI0Lx=!lhh{F8>0mqLi|BB7DV z<@gXOK>XlKfpbMTQA71+qWNO{h2Lw`N$+jLZ%Bgh0-3Lsm(b zIeRs1GNuZwuq=X&DhoUKR#K@(YN?>HiXwV5u*?ya>OXV)GIic0kO;q}1zb%aO<|pA zB(3jB5Q(u_oNVII4LupRcpcLTay1>Z5tB|lxbl8}joc*h8Mmn`JuFPgVpP|0LnkCI zqaTdiWxLYKx+2U@zrdx(pb_HseOT;r&f;Bezf6}3@&B%+T~f~mlTA8qny#tqi$^Kz znipz@5AEyMzfhC!c)R`y(!x7)sqUzC@98UDRitCyTgl)tHpz}HL$_{e?LcANvE|Z^ ziw>{&z>4#*nl)?Po$9*=DN&djH)hTvMN{4TNvQWBKzq@4DFC|n9F7h^y7$WhK+6Om zS-#_rK^)V;I4z(O{{r7*^{$%ZbM68?3?;R!JCSZ9c4n^sGsWreQ}5k^Q7~-HKd@7j1k73$RnrVBAcN00@2*L`lCW5cc0cDSC4!DBP>HS?;CKG3` zg}rP4VM7rAUatJA0k$s;J6kG&QfBfpE|#bXq4DZ7_wdvY3^D12mK>KNMdRk5kjQ91qyON3u}}d<0ESAB9ZXiX9+U?z)V7t) z_liYKWD*lA7z+YVl{7xPqC(@tr)r0jfc%k&^isbwbxubZ@ zr)6*ckN+PM&{fB4aU(0sSa}e8z_544|DGKR!9(u? zWR;waEYu)2@E9uCk`-})p$P~s8ns~LLag*lT&!1wAG($9QtlrrSKM{K{JP%d&__wc zZn7W_B%af9N>#W*mkHL0=(kil!bA zHf^S93B2xFFlq?CXPuX*KKe@W2Ak&huH0XR#XO@mzd_(j`1Y>|mG5 zl4W!Xt!3D3a>I2KS|Cy6c9~@q9B*}>uL#kNGbVyrJKqCT?b6f?>t%QJ{7QRye(?w2 z;bxv+X&cY4dIL|Uh!?7CHOeH7%xcZG@yHDjuE7!R3h3A&zk{Da;|l7qQX2vZzFGZM^-0%oEvO_Vo8AU<*Z{xCP{FqAT2Y6d zq5_CoI04J^Gi(wm`_@LQ2*wL3H$2GK@{8vYL$T6)^On};U@wC3ju*GIjtBh&fc^oQ z;f&PduC5oeEJu{5B|7~l8ZnbBj-d6XAQmS7s?sgc&7Z<0DU~&|6aPH;B$?Oxo+ab2 zu_r5pg#eo=l@b;xlHmHnb@o0pzUcbo4)!{@x+rd&F6%3();=Y}P69SYOZ9XIG@Le_ zX8JG5?*DKKo7|?)s`nxWNs3HdiWmwANb0oEl4Iv0`7a)Z|hIfH!SbWgZrY+C1oTZB(DdRc$E zv7RQx3%&~v<#pl>^laQqPq%Ih{-_8-4|ZUp1V|U2{Q{+&Vn5zRxuWT6zMC%6rp0-K zxyxFmH%#+=Z)Aq|goRB`{#3WEF~z6xh>fDum2@@D>NcpaSh$L+zM$p^L|TJv;E5aQ z&%m1^fnrUJ#Q6zv9smi~*?WAS6ja6G_IQlI56xP8`%nVsVa_rC$p;~dhhW)!uuDG* zmsp?e`a=`67Kqk(alw|C?|S&Id!D~&YsOCBW~G5EQ?JdOYNr2;Uu$v>WX2wzf2mfD zrJ~(<2P?5N{+hVE$^J)!#ma*@j%1uX+aVZ|gXk81Ucj{xgj`;a!}t~)Cf3659Gm>G z_X~b_=VQ0Nc>M8OZhQCbTesb@eaCwrF-biZVU{}|d*s~@-&(lvB^o>|KSn}wz`+<832~V=d)8 zC=-Z!_|TJgzNbvwDVBbt!(p2X`NO zdh!E@4;ByTWWGc(siJwY1p@Hv)D8+c9kca(QcDL8h-krVqK*)p} za?mwvtRoJES;TTfz%5+tleAd-d?YY^1tsaW26W<$LU9Bz9f?jmdLhQrF zBfR~cBZm;k0s9%!IBGvjJ9Ixw6JO7+p@CRL)~^fLt)~HjSjWunjvX`ZY>u1FzPUYa zZuiZqxM>=>{@@#b%yG)cI``al&wH}0FLbjW@+YJ5NraiNjGGgFG7>lKz|d;}O?o4j zD>+`GtZckax4x{=QmCw=vE$T)94{B)W+9dD;#H0dXTZYphaqPNWXHJ(+s&_yt$NgZ zK(@+2zU~O=YFX+7e^Y_M5HKRMzkUDdQ$TVquY0?vlmupd$v07>~7@&9{M8L=T|pOJa3UzY~yQ_u+xAsH5I6quefBkUqFgU zn+29pw$1t%A_BuRq58znV>w?c#VxRBv!KXbe+7d8x3G|T0RBG#PWpKOPTP>K=Fg6u z8Jc5>P-qUj$hdH$s6SvRAYlMlO<)yOhDIFV;EmuV_B}TfXjH6URm|6J){?f`ch*h- zn0A>>z}xK$cN$)rr-)S!JId2|XRBovplWL3eG>=9IU%junTQ0M7CKcE^ z9zm~SUZ-13E@&*Oowl(yu%H%Z!!SaN z1Amzo(-F}#>Vp0X{8dBIeW6)#Pe_%Zex@}+NGVoKlc7D|s|2^3$mU%*=Y(E}7_vGA z^lY(e{$&8Msn*OHhY{MQaNf+RPUB2$sq1k0d-%5hs3~pt1cBe}$(WN~FR+BHUB?Wf zHlw-}FuVDv08Ewu3Z-P@8D>VyOF;C<+k!|K@9C2Wd=T){on#t1o==@W5r)@Y%B|`7 z6@-rjHLI|4@Lb;o&kcVT@DP{tH3Uz349B$SV2tTx)5jSB1y!Nc3P*p*z$FTM7?@GgT!`0LM&VgLb*#o>Vctf zABip$?s{t|z*!XTSU^JLG2SH?`%7aM@qST9K{#JWP>7=l6nAI9xVsk^w*@c+h(g3Y z$<2k^HqB7J*Ri0H5&Ex=&*26YOkpWAX;rhseUB)O#QB{ZMkV9{OEo zCiUP%{>A-%BewQ@SwSFrvz+LpuK_(`Y{)s%SY51b(0HFa9(Kgofxajc~= zP0FiASK1Z36$v!4aOe^GVZE!$ejz?QHhqk&ZbwtS(XS54{F}7Krvqx(24hlNo3|cQ z-)`fXV(W)&3F*Mq5vD>`!T5ZL6v~yhQRWmmbwT3fZK3bqSVH%P)CO~Ww)Lk)Jn-A0^puggxbpHLRW}@$oNUXKo;J}IW{4?gPLN; z)+3FZgs_Fk$==49lD+-BqS!bL%F8;5fn&%xnCVDky+7kLC$cyyVkBbMmL<*Ar3-9^ zOLGW_qh3&*&6Ng9ym#d6YO1=VE{<%<>w@$MqH8Xc&Oct_`@P4+kMt;=y}op$@#WE)acJnaH1my6)|6`G5<`oaGPosvEOFqW;S=5qNVNaxb3d^x!Y{9 z%Mfqd(&}giqSLSqOt~DgJ7=OSyPTG-jPda}`(qccMs%dO>FoSTt4(Tc@D*uR>Ekpu z-K&e0h;LV(XE`*{XRcG!5v#*Q#_46GW!lsS>YO@jmSlF80cF3Qiu~)XNgeWHer`7C zmiyJ{gDdqOy)*1&d#zp6=u1D^m=}m!~T|+t{q%OuFU+kFhu>v^XU%PIPVube$!e_QZxg(-+E**Us)J zKYo(v$69-4tgA*BOh7;nY7`Qi7?oJc7vz~{7mv;jJd1Q4$3l6o-qPM$We9s`lww)# zt-Z`&Y6$x{NM{#crVokazNFMXQudb+N#B~EJy)9yiV&eq?m4h8-LsE{_|JKMMC55I zVu>re&eqa7WvHfXzcgXb<*3u|%*-uJ&M%lyO@8V7Rkesxz12n->Tr`TM3K=B!DYso zdUK}f+)zHa@0;EsotWqYZwv;!-xN8zRwh$|ghoA`V&Y*hhhY{j>&)8QAi9;BU=h>P zUqMWVxj|0`Uyv5;s3F9;a80H#a06Ni-;OlyKnj6zm>35|Gg2C?SXcRyZRwv$UbV8U z?|V&;5z2?VX*;Kxc32gEs47QwsVp$OxX_yBqTThkVZePauXKm4@4!tDFz{axM73O6 zhN_s_HLxPV(SixVGR-b8JxHw4pPrweWzU)!i4MV5kFC$?NRP}<85X>OtjM1Ml(X$t zymAOPeVp4wSO}n;$i9&v4IY%Lh=BArLQL|ZlvaffK>^oHH_wZ{$I}TYJO+*Oie3j;U~C3+eX8l zWd{O-TRwQC$m~Ee2P-cnbDW$;+Pj6$q3I4Kb3Vg#&c|?<>Wma0454g79LxBzM0o%S zgwFkdRA4rWLQR8JKhRxCg1-Z$8cCu#@6FZ*Ifm6nEQG?QQiu#Tq(N#RZg!ph2w?{+ z;O}buc%0=&hl6S5G1fvtI}2-P{nM!VXEt{J$9*kqJsEc=L&IY4MP)YP?qu6{vu(?Q zlkJ*+XWO-Og34UCl@f2T1qHOyDUG$p9}NYYXug?N$VsPlnb8GU;;+;N<2SS8bn0&G z&#BMf{tI*EJ-7P!j`+qukTS_lM5yVGi;x1E)TK`C>wwk92Y<8@47wlpa zQH&O+b|qWZ$!TgB;vn=dJ|Vwi-;iICyTkicWm;FkuBjkcida*yYw!#lgY|}q-+6|y zn{w+kW3d%2_}mV5(ZsF3Bj2*{9D9*JqVVbRwy9Bt@$0`SUYzpQSyO(SG{CY5kmc#H>Ii%?2PkS1VT*I9`Mu_q01@$U0SCDZro6YuPnb$}`k8KSwLP zC}q(`i5{(33Zr3uf>CCfOTNF|)&TI*1uWUJ6)Xp~U2~aRm7Wk@Y~7Y)#XvKuC-)15H? z`A4K6=iCk@H8``61w^iLs8h|`O}$QQH>yS%mC%IQcDHq$uAQo!tIs#b%KO`*JWW&LHLEpG5Uj!sXZxjEtx>+f zj$tu|s+>-6 z$ooq?ZIxfDmF34o!&8gqT|^_!*vYqI$#}t$Q?yZi6>)SHfDZch4pSZsq|%%@1yIzp z9D-(5WfMBtE_r&sEz;>MxcTrffYF$b^A@3VTmmO+fDwOQ-4(H}GR&Tf>DcFa=|H(;*hS6<{r^&|sOiX{B}u zfvqY&LP8uJ31f{=32spv@Z58B#qF6(P(CpOC!$GL6 zI2gz;U9aus(V-s;E!_vjSbxK*)0N1>g?#d$QZ?G4E*AQCdTF z$J-usWP7zdrF8@de9|;wG~(d)zugu(6n5Q-2KQ^nFTs9?6im)~T-R@Wn6# zgl9IPRaZJfDSZdNBPKT#7{<|1Yf;H$#}ybRoG{@@p@N)n<^W8glYkbqq-)w#gN;EZ z7`5NMsm{NI8;SpX>DN5N3M$YZ2!h?s>X4wEL^t%T=*EX>S9Bw0yjpDV3W|($i6kPJ zTVwWyiH2;wZ^C6J#IoJeJmP5s{V=zHKZ~idTjil|aZdBo%4~k#?kzvX@)M%^?1Xq< z;UC=kDTa@na0fYCA$S%>Eajb@!ggVQM<&|fYQN`Y?LdFILq~qcUBE%6Z+Z6*+%yuS zCB9K>w)g{Apvs1P^d86_A2eJr??Dk5R|I^k$u+&C3YkkFP4NT=$v0FInJmX;#xn{_ z^Wk8NhXNm@#Ix)1q(@=2{#*&maDw{@{RX97$2eFDgtk_`qx=;5ZzVj@m(bVZ zHy>xA{i*qCc}m37RnRHvj`HJPVOVCXfBPCPvZH>dIpxCV8*F=@OF|ULCTeCor{E zoNS+RsB{GLs2`KHFkbB9yD~m>^N-D<+{bBJGCES)I50f0d|;F-Q-WM|u)TI~Th8A~ zQfmiB21W+;JItmbx4^ot=65v?zM+94@W^VpCcMYg;UN8p4zLb^D_e-n)Sy*_h#*8R zaGP0+Kg)L+ik1(eu;}~-p}};$-K<~{W%5CT!buL9j+7%+n3Yx>W7J#4AXm`0UMf#z zCIhA*Dip4<+E{slr92$XR-W>n!nQbfVm(d8NKI)%sOBl0qO5$51w%5^+gf9et+T=( zTF!z--9a8kjBle`F-dH0eNIn*lf3jislKyo&lVlmV2d{=%SP}#4PR!_>t-G<88oyQ zv2Uw(nq4gh>La|H4-_Jt(>)}t1>{^6&Oxk<7Mog2g*H#f&O?Zyth5WbPI**!l)VE_ zrEx)_k5~|$7JMXIeU*6(G8!HWwNhqQ57fH=x9Beb-ghu(u6?;VP8Pr(k^%{^X=9TA zS-{KD9FmbRsto(3R_4HzRUv@n$4BH~4b>+itc_qoCTj*)aLHB1%@+#MA3K$B+x0S| zy5yoCnujN_+dHZ+e+aSj=#dC|{F-)cev}EZy~ppD-|_-SOTir!U1ee`Bxi=~*_*%=-P(j$k$J`l~g` zqb;q;)v4lE_<@BWipqFbNpmH`Oi8XPUDe&pKu|TxjFZTeDIm&Oy#^xTLt?5v7yLTu z2h6#}A>px$IONOY9wH9@8_};q$;>)4z`C?lFt0A(^5lq(M`pd6VYE39vX4&ne_LYj zhuypJ@~eWQq_wuv9Oi!n6|d?sQR1#)} z$NrCZ!?|=TiCU~dg}mY)i%|!4dbnInJQ|VB4h}w!&pp2!Ql_QHfhrkM#V>$Ser&mT0%2oc&LKq7|8~1CvyjhJip_NHQ ze_>)Gosfb#8J+OXagr|M4jl<|eoK>O@5SG?w}t@&cZHV6di}(A_V!lZM{ZJE zx>Vuy7G(7xdcSa7`1@%!O}IN;TDRQV_C&igU0}5!Y?tXa61BxEtJwx8@L6tB`x=DX z%L?4qxA~vPbPE}w+=NaeH&%|)Cq2qJGHZNd{h5Ws9*6r2UE9JM?GrVPd(q9Ek z#TG4;d<7sUB>tQrXZH=NIUXvtS%2!XyDRPofC~JjX^R$W>qRf^OkY={_3ahXAH;e9 z+YhS;YDF^O)+?z;Zv8ir5Z$3Gut2)5k!rSeg%X>D)mWxUE48DEzLgrxRdPdU-*G>- zKJ2-NgZ>i#J~lqO{Jmd04A94FY(@|1TB*HHTu&Ttjs_ceysnoTA} zfnsPwe&262SVJ7#;iEk4LG-(H>#B6=7mN4=wfv|$6-)S}NslmPt~1wVbq)~(5q4LX zm5DaeR2Q{4RSDg8I@PbneuS#NFQ-akYnQFjf^Sfyjgx-vQl7SxXSKyV4P1Sl95`Dr zG&mawJm}0}aW;xy99C^&xq;jzoDH9b(ApYy?lu&0r7SN`Qe%eNhTFB#Ze2+{)%g6p zpd_Nv$u5`D{>OMAGJIRw2UH?oJPA-}M*A%K3fi~Yld%R7upuaT0L>oSAHEFj^C?y? z{KNz;c*h1tnj`U~xM}6jFAw-N!B(L4Og=MQK4rioo_>t6H(A%_>h1L7!h|<;5s{>y ztzbg6a&DF1zR#r}#A0e@VSwLA$iJ^jhq_qMwBfyqbqH>`V$^^Da_3EvrYV z`qJ%2`pw*$iTAw+j?tNK{FeFF`vmckpnGZclD3paRbg4}G*MQAhbNc2f9reLEP!`v}Hrz)*Vn_Vju z*qI(z=lDy~gIv1Z>{AaNcAyAG6w*rbchpsDQ(aw+0~JaY!TeUL$Q_Jw54YVn=@1-DTS;3NENbd;%-R;q!|1Q8bX;hXiPF>X$-{ z$RmIw!dA2yqX$()keL5SI@#;x+4CT%+Hg$_H= z5YO|=7i_zj=w9%f+~E$b2?)&o7PcR zSodB(QLklzD6-bFXa=FE{xtR$9eYW@Nx}9d{te}Lcb{r08PJU#-h?n99CVwT1o>%( z!4xPh4l=Y<)uC+`qrM}CxMj&QDb*pQtqDa^IOs;gx9eO>zn$C6n|8j$0`kkb0we;vG(q8%xnD7m>d|54_%Vo=g2FY`l96Tv6vf%u z&KhI0Em%UtNU1>_N9s>w=&~|ZNERd3WFY4%um^u$zVptA3=Co~rh0TDsyEMORu=1s z)mCTJB7&nlIRq0E&mwxM8_L-M$k{=L8kX=~7&WZP3QMiwnvTNobDkYDR|Z`^MH~K0 zK}XAVP{_4V5z3ufl5-=|(J3-Q?%!maZpwO~cEpjv-aNttCz_VKlTw0WtoFgE4kWH|usawOK+572m32 z<|>U2L4(XO)<82)G3w3kuHP)hsWs0@rc}zUSr9anPMHYGaBR9p#Sy7jZ*=sw8rH?^ zKv6f90+C#CY9^Vi5Fd6$Em< z-HK^a0wUx~q`^H6kxYv=FU5X&bHap|u&}VPhvvj3;q|8M40 z-}xxIq)tv;7^Kdasu{hWv#!NXN0{)sl%d|o`yvOV%q>f$jPReu0Ba;3g3(3}*K8K` ze!kwLLj~iwWg;vUz7i}&VWt-}_OXplreI{y4r5B9#}hzq_Myr`SL`?k$=-K}rQb&* zt`)de(3u3uvym{p88Q?7iJh2@NCgZ)1sjoC^EH5j8K>c(>6SR0L)P%a6LJ!cvwT>6&>@(}!G+nm)A2xh zD#0dN`&SUOr)tN_PgGh2CqwUL7NZblL~w{%lgj_fClBX{s7rbdwe2EJ|25Ml(grAM2zV!6CWR9#U1wmntuj# z=}0wDjysU?dF&;m)*H4TGdKAnH1)<0^3PHZ3KB0$LGtUFg0yH_LZcYEo>P|FN|{{+ zKR59Dqzc=r=QyTGFmQ*!#~;0vBoF;VFf?%*!s+K|o0)G9qb)``{Q;iFP`fkV&$UnO zPLERdHZ?eA!iNXe3~3?rH3P&EppiH#L%-%4KT1LxhSk*OoL2Mo(h_?7k;eUnFYP(I z0ARM(Iv8(Mk>*p$t1w}Tai|8eg^$k8v6Bc|EdgKN@{*ECPv|Walc?;X#$GX=%)DrE zFlYtJ;c`X2359r6QIiy?t-AuDI`5@(CTT8*2nj;?EXp#4?;=H`Hvc3#lGwtl7tJ3vLfopWOv zHH5rW6`7)5nbgxr*h;-|ZXOL6$v#)G7KcV^c@5f1X8O_=3x8HUutq{#sG>kzV3h+* zjCDOHI|Vr03ejW%9p>He?Moi0VR5;g}BYC@##-J#@beJ|HUHZnJ*YG8|GQ(B-9^2hQLg7aTkk5dj4C79yGj%l<*Fg3 zV^@HB;g4d454k^(eiigs@g9}>?u%E!` zwaMwLbyOIcUZILhZJ0Hw{BR08-n9n+fSUp{@%94>e14I@=jGmZAlp{p4o32;<`t~% z@zy;(T;os8E6SJS5e3!0GhPwl)0g8BMM151z$yl^;Sq~jgD1G$QLp-xB4l5$ z?`}iqi3ICH>p(e)JyE<$bbRYNGSYXzRCejT`}sV!n0!jNZ|YWpDirRdSU`s^7Q)B1_G_MU+pFgCan`+ zqgCRP=K34t&r*GYM8U&aJIzh((}!-F2Xu?m!29=Ek(+3g^HT8Bwf1%)Aid_YVe@o_Fym1-6{pSqbq=Di&4vc%Fz_=AlFiu!m@H0&t3(M?9 z&lDs$8Pg6{mM|;OOPcpB(qd#%nrVs2|ApP~RoV>&7Yh%N`Id(0?L~0knO6z|B;Vad zB8g0eels;A^IWaguiYG{NPtAmjR~`D@;q3%CM-c#soffqj;Wt|J50PGI2~B*im?0` z8g4k*(Mn5tCoI(om4zB>+ZS}>7TrJdvu~$ye`X`~e;CEH;9Z6DS3_oJ(Iyga zJJ3bgyq0@5BbFyyjZo1EHJU5c>`Sn{he+8~J9Fxcb8MeZvY|-V`_?B-&w-dfE!?CH znUuG-`-j|yF}agGKSKC)m0ds+q+92p} zcN@QM*~(H>{u!FkgWGPOn&#NX^yl=HyDai?aqIik=Jcni^jFkY2`Tn%TTvjDP|Tdr zQyg{~#vB|7nW3WfQGvDd7?+6}bHAEpQp~+)po!7Q=7;5euFuWYs&%$&!ny27j%;I$ znEM!uF#~Ut%f>nAK|qJ9`bGKaU!#`aD29&7Nriu2ONBk*pG%h8Ib=h~m}+=q2E~&n znyB*ZMf$@^eO{Lz)8z}gK#hs_Z%cn#DUE|`@AlEDevP}_;Qt%K&U-md;KT6bIhTnIVj8|O_j)g{;?#F z4V&^MOug%us>B@fW;fhlh3K!qG{s5-7tl(WhJTs_1us^Pg8DsstB*rtH%D+DS$%bQ zIi&0#cPXYPluXSr#mJc#jGP|8xfjJY3y#52ui-W0OLxrsHQZ9vepihatdodKSOj{l z*`Tqw;m>at=$i!6TKmbQQ7(#j;Vj#albd%+C0=$+-cOxZsC(Z{RsUSw8+G?&G--ne z3thEO%*`WnD+#(8S{<_0`F+aT9sL47xaH5O_4N z=V|{(Q@ZKD>Z0JvOGDJV>mIG53qEn;6klF}1`5ajks7#y>fkLtan_^3gYNF1_j04% z@I597YT~nRDkf*gJPGATjFM-PN?EM7Fn8Qb7JWV=K`(aSb5!Jeg0eQKzni#(+`S@E z>po8EkMd3{RC_fj*ina1`Fh&dj`#TN%=l-f;h%*CPdLQcP}Vak#;nmu`vNzCw6fXR z8)&2O7VMQv$e_`7Rd*#YSYCs5#ya^(>q8SS%y|cJ@NTvUiH*yEEi=5YUYodOVo2eo1d*tRyytZ&>#7%uRPiIGTYuwaw^-&xym1F z5Z0+uJ5O{5#7acD!hU1zP;18B^Ui zoJN+)=MuXH+r7f5VAy}l`Yf<9;$RuPBcSmIo)`SR>sU4eu|S3P8noCbP&42s%2S#` z_a0PJY}#LDeMY0+o}45kuc7I3znyIaQ?8WT9rg&#qx!OGGih9v>S+QEnZK`0v!i#1 zy*7)zg*@QRac}A4d87GO+5!ozfb+Uuv>jA$5l>cYuceKLvMBiR`Sz3ZokrEqHI+Zp zvgXa?e8UN1Fsq^Gs>t-`RjxPz<+@Ml5pL2mT>8!{y&uu+Jg$^HRLU*5dO0ZKsextd zZ~&|!W^Y}|ZOl74L+kvXNYSc}m`bMVte1vU8}cH?^-A!sO2WbPuP@VjiW+mhENU=$ zq7i{aEUCycz4$^oqj?UiFl_a)(`EJ!%P}HnWuC*jjL2E#8mWr~8t7W9T{~+uC~%F_ zu|{L9)3HW_H5W7{)@ZEr0u3}Va#)?=H5as8jc0)d1zzh9*SNq9O6w8JGs^$%9EYFqPH;7*-aQK1?CcHA;S+A4rvx4F<|=JJ*8EG- zDRs+o6_iU-rqoQ!Jvi~7_QC%Cb9%ADU60Dab5+-i0~8~e^_2hdns@%P9|zr@AW zMQE>0PSdD1GRt;&cV=w$ukq=&GyOZN`)BeZ&eS+QEt(zW4)J8R@}UbvIY1?zg`dnx zx3^M}e%}e$`u43N*V|}KGP25{wOYm@=eR;`y^|&L$Rnkp!Hwkg@OJftPZNRo{fZ5I zgan*i{JG@74Ym~rGk9`~GC@k=isxm>tb&VPrY!6kNpQ|Os$C4>INh| z(W%c8k6Y;mW;zzCqM+QGcxv}m4sU5aTsVsr*fBMwH>SG#(4FDXhf7~-ROV_^Q$;-3 z)Rgv@o|@{G9A)LI(V|x^@_OH0f)$}J39Vz%91Pgb5nXWK^*Q*$$?Q#&|yudxsFU2;E1cKBys-4mD(*XbBQ{j9Gwyf1MXtA;mS+ zqt3se$e!^oM(wSJyHVafafnHDw)tuy6PIk=t+gv!&E_q`*sJ0|)Z^T|6P#LvM@u=9V9@z8mp=s_ zWk}?>+t@k0D=ywSJIe4urF4p8#MS@J*MAD+KW=HHqF5N`YvX$~l*3h$3Fth8I47Pj? z#OH@b0&4iY3Y;T_r;*{pVLi^oaF`TKJ~0MYr1EcxBh2uU@@#TzM@iVe)`3S%i!y(I=BDq2lz8@}B{b-Vug`+17&}nO>WM z@zGL*kbZJy#0v!cSwTXNDrfZ-pY( zEf%Y%Uuy)hKGDMjt~7cq7HOA5j`Z(~4+z#VFKOOj#pBB0gB?u;J_uevwFnz{nY`I)H$6>Ux z5dZALCPQ`)Mh`N)okVymdcWL&dho0+NGO=(FFUGN&~{xss%rEvYg8k&eS~EbqH{)8 z^exRse}i&&O_6}RFRaYsfnfG0^OdoSVnJg+Wjj<+Byf?uL6C?2AUH95k2hg9n~god zh|(K)$yibmTE=B!k#?zWMC|R-OH`b1iFMlW)YGz@J+UJ7_(|C!D-3d1E&(ybr(L9FOf<=BFsCdMr({S;S{l;vwGy+ofUJ^ zjV5wPC5tp`vv%U9f2jAmn^{^3{sZNyr+wrf3P}G5fc~%QuQf5YVuJ|DO7y2j zS`%~pR0vzP$^x@xvHAYBZWM(`H=L@Tx~JDybaR5MsLwi0o5j41)*trtDL+CfXDsfM z+`rL>R`lrto6Er(Ja#mmp2f@!qj{vUnMwU+-gNC*6wV><2m}|A**cuO%1D=z=NaMV z^K#Kz^yCvf;tAbxmnXOfNT1W~=XELTGOf$s5sHh3JSQAmVMzP$5{kvP^>xqBRC$=5 z-~&^CKUqNfFKFAp6J$k#nw!>Xw>Ll~D`DZ zw&#?MQ2My@MJ|OJ%N{qa^3?y|+_}fdb=~)UW?$qkm%Dt9$j~82e2ApPUFxlxrd(1c zCDEmp@+B3@imBP<&Tu*6?#yx?=CZ48lG;U5)Jd8kErJ3Cin<6;yJ?dmNKw~Ln!1h! zG*7rqP&5w%O;a>&fz(NiK9PRD-`~A=hLq&=pP@@~4$r;!+;h+4_ddV#JDL9Vymc-e zGuhixzPP}uvUY;&w7+29X^5E`@gZtNF6%{;!3N3M4O(Az=re5>cf=#@QEx7`dpep8 zOLwqq_nmeT>R3wA#L14$&d#2o%8u-4Zd%_-nQ1(44O}dK{UY58`u5A@?w7e`Y99PF z{uZtIV@=~`K3CYJWhl;x(X@#Q|}K z`w>3$?ViJMNto}SA008H^@;b;f%>K{Z@A<#6mpPi3cagS&S?K zHR;-eA!lTFpuNaz4m-14bOL-V6b0G~Vtg4~QVUrY(F`$kwb1>H-jhwlShv7J5}}^v z@0lr!yz@k)^QdxIC4oP2%}o!PO+yXJh!g8koSeJB$-+cc1XmsK3U?IGrmRS*i71a> zC`XI_iHQm6A?+7wUmqp*`2Q1Ho>)baDQ@uSE1=k=Nf5RcMy6Q+TGZf4I?>Pk?Hrjv z-nYP9ORE}vuok(R*+`N3snq5M^O9hzz#bv2FfrjsD6%lul@xdS2!PAd?ej-kAMVQ3 z)OQ0PAH~J6*MOdf2Dz6);cDo04zadA)lyZ2;nEMdioqL{Y$vC~{W)x;F>AzO;ZeB=0vcK;r^VW=M`f>Wifsy(?TWb{!=nJVEA!Ud0lI z9i_o81REZeV3>V7U5qb_&F*s5Q5tV>pjmFUwjwCR4P=;6PEEAqNS~v%aHuz8gBmOW z`C}R^Qx{~s9mODm`K(2|DE0taqj7k&Z|c7Oo>NDG{~zE?jeDZ6vDax<4rhWLd*Nv* zFNHJw@9f&4O$PBVHXn@IVa+EF07mk~vpNBB1}=4n*v7FJVL~t}&buf8(l3Y64{?#h zsQ4_vYCTjLFUg`lkVid{eFV;6vq%oNbcXF!P>OJ=T&cC&3rn>`9qMSnE5s|VRT|C2 z0IR+lFcK}-^ilc$%w;0B?l+H-87!3-{C-fApf;ufbB7ky8T~Xu#$X|n`iyZRHyMUe z2Q~=`R~Z=QhLe;D-}8)%GEOW|ax1g!Oi%|$zO_0YiVzF_i@Gb)>h6Zj|0ym_NP0#Z zkHer)SfhPLL!bsKK;b3=mAqFh$E`fhLBt)?7^TOgs3(2Y=b$cC-yoYF4v45yDQvy0 zUWoFh2hj+)NBDUP?~;ir+~7<#Z~f3Ed&UWY!DPjHqVJd*H==kxtFL9HGPx)(g(lzC zz<^}nWmpfvi_&v^NVUl7A*7qLryeoFj3WE~s! z0x2wP+@^e=D)?D0Z4Ik8ZZrpfY@0FmqDEQ^Lz&C@L#Xu+As~GR0+Ng?_X|j(17WPN zSd{3}(!2Nudohv9lmDu|njUa(@X%ue$}{SS#8n-qMXbF|)0=mU*a)r``wy)OQv5Ag zzRKt)h%>Z_{h`sMhTjala%w3co$EeIYNxntNs-8=_`#>rF=wT+_T1-VrpkkSYF#e) zsy_OF=!7Qj<2ioYyIDKXsiDKvj6C!*4MWk`E7cGPtMn zIb78#9Pa5H4}fnvH}O;F@BlJ6sfTa+PO)e?BF7dLdzq^nf*3>AACbX+4($qI67sUT zTLcZ@(#?!lrS28=!mt_PW)LpV&X;h_?r{V z?r*x>gv;5FJS(>)D@P&L@{6kl|;k=$@x9 zpTGF3p8g{qB%kNCCTCxI4CmB=Y@$7L{0D1RGT918#Av3YbF@= zTf8{YWl6A5csHsAJ7i)W{lsy;29cg{fsNe$uNt6wzko6Y&pxhCZ7#cv6SEq6;2!Y zsU1l~FNeY|lrM7`-3($hMjY;R}aPEw@e`vu>r^{q2kvzP5>tM0FnOI<^ z+a7Jrii2EdAFcQ}KwIDqy&4lMOud(4hu32~82DtlY1t5YXYLpBnH!xb?tjmjdzO~n zKXdOpZBX@ZHEkbBC+q)hu7bZZA=TbF3gQ0ANnh=D7b_oYVw^sNbM>;8$+gC=(wM86 zx%Bn)jeNTEX*1%OBZ!|QMbaQKy-CW8Jjh9s?vPJXbmCw3Pt(3^nuJ_}I1>@=wFHS; zJ5rttLuqi5k0mHEP}@|o_#=S|#o!*KA)>-M6q+Cii#so6!5ds(o4fStl?uNLuUvRT zYKm0ZP$>Q}xxVGzU(o~I3#~=YBbf_CD|sN$o%~07@GBZA$znQeVKQ94idHS|8Lgk8 z07J4mp7LJu#2C={*@2GCl9wHCuZdnQc@!InGmtO5@VIr0-O+1);buy^Zn4#wya`;q z{~U8b1#ycybPY*g!$La@Q=Z9!LYfi=-iVE(xPFh)bapro;d}HiqQGdrBYs4?{-^v2 zqo>up(>&@;h{c|O{p6Q;66&Wy0a#m=uTJGgW8fqw`8!O?Z)#FLz^BC@Wa^q8nnWxj z6yVS{fLU8h2}CVzLz5&3G)T{r(UB5n7U^gZ)uC}J*EufX;53SZ6JZ?@EIsthkJt&u zh7s1ahIz;nk1BH0WkrJs>jLjs8Piw8H0nrN-3hKOcGv;1E4lZ|eUTU<=9X8&B+4bZ zW>=@N=s6%R00qd_M!#?ZpmuY5D z_J5@Z*0tPz+krh(Qhj3zCATM%-pmG)Au)tuB_b>aWXIV|Cb-v|E9VBaI`W5n`Azlj zQJQZf@o=^P?nUrjwKtfx{*y#|bJn?(un-JyKxiRXquuHOziir*w3w z!)rj*K8<J`uZ^RL<-duea9BjaD7cVO|$|Xf}9ZZME6X{8$#%NIGoW!zJvF?VsfW zTQ>L)GXrH?`;-?+jzBP04p)!%*&suj#26WT&KrdxhrA0Zsc_VUg-eJ6y=0`g4{3mo zpgaK5l;IBM1}Fs7;)ejMwZ~0_M6{1clfvYy7zLOVVG=eI#&H#x45_Jj`>zsuM);z> z%-wf1f=08p#$oAV^SuLq!;UD@CvmiGtDi$f+_|ZPE>{;8Ysq1~aR!L6mXjaE;w>_n zvGM%tSI*B}zHn*oa@gv(2z~|ZE|pQTSNVptJXB(sh-k zxCX^qUG8DQpb$V!|6f7?A+`)tvDtSw=a}tpq5T_m+ zft61gWw+>(-=a|V1a17*C>y*Q7LDnHDLHp-w=5v2>SE4t=3?PZLf_3QBisa$srF3^ zYXlA1=h{8D%w+4Xo{Atio(QvjPwhkRSNmtO+DC1A2`6YTWXSaL2@aAP6Lw?nU1Rli zGu6D}Xf;R>7%06Y(fXQ2z+oLH)4Mrby!|x^da`D+NI~Q26y+fnke7+Vcj-C*uHOC< zghHSwLeU*z)TGacmH7bM^~3T3#s6htprPb$9**Uc;S54trWED}o8^x6z&_ zOqFgNTv~2kKWJLZftsC#weqpr>=GWo2Opj7taT0^pDkbL;5odoj9(#CH|RFHZ+)pD zVxHA!y6tMDNX^I7>O5^JM2eFs4g_vJROcG`bN)8xvSH!QUq9rMCsv)%2~ zm)owd~p0o)R4?)5^kTkfWm!Ozh)z9opP!2c_T z+gkg$CP@#Z8bx%a6Pz%HDl5S zz7dbIm}^=Dh&!+ym`PMZVYFIm;4m~^CrsNGBNX4Z*o%{ZO5zy6Q{mUhWh2m>?O(IA zC%Z@KR|EI*7JCC)?S*CH7B<->AmtaHqbUoW^3O7I%3`%@4W;bY1P!XOXVwn{)dvhY0;7POUr-Ig=-jp{%IbOs4=7Uc zKO{i$IvA*`D26*p_wiF$G@)!C@Ms9Uyz?5FCBViK%xZ3&v+Sbz0Zy14Pv+WDsTAj~ z9q@!AWf=QX@xiJOkW)k`x(XCznCG!JIXk4G_kbVbo7QOiMpXl9Z0lJvt-fnv#N*oz z9-3Vaen_Qge^f)7S?i`5%B{ukMC27V_9^iu`BDTzP1lRP<~cVg?L##9nT$Rl>yA!N zcch5Ch?vpgSm?&mtGOnX(`uR*w7N{D)~{yUNmgF8;oCDJ;w;ME9p@d(J{OLHf$f^Y zglJ=lc9?2YNl15IipZc}s~_bBjN^_*y1I;ln4U7I%_dmGb;xjC!k&T)+Th-4vdFP= zy}s1tlyymhRIy0OOo&_L^iXPXL-=T0!b?Z%AvcKg(Lrj|CN%F@W9!<@g%)OZ$OlKP zmr+uY2768OQBxs5a#Xn=I%o*H;Fg=sTM|{hL#c0drnk1Tr~%Zl=w}nalVn8d%@|`SZBg(D?kVxAKnqoRCIq{( zxV+5=D#h~{)Ip7>TKr~@$m}zV-v^}*VNldO(dLfCYIROydJS`KP_5ITUe3u2E5+j@ zj{j#v+6&E2XStSHY!)?C`XkBBlHgw;6VwH#!df=#un2^{niXk~0PzrFvt*6BD47uO zLUIdFLZ6SSJL4im0y4`63+;kFio^a@ID=*b1Zn%?!(&M@k}zgY3n+-TiJtCPvw^m>>3qgUuEu zgC(fWKeME5h_@(dC)DsaiZ2IVnsa_h2zwyMP`sU?zio4WT%Xfj$;oMINi5C1r22&U z(kJzHr z?k3hl{GDcPd-u?Dy=W`~Vy`i&sR5>99srfOOTbwBfTuSMN>2z3w~Mzo1=?9_ zVRXcSkY^cb69;<OIt$$PnrA)$(#f{n^C z@&`X!S~>du-e_&bEl%fPL=fm6YdJ!<3S zI!qN0VERw;QMM4p^eb9QX=r@=_@-Fco7cq1Qjv*ym4zy&&(&IsYMf?d6F(!B99Qo? zDSX+WxRDBi4(^C7>l$1dDsc8QlpS95EVo~J`D)_?>$<}ZTrtnOE^ven3q~zNl}+;l z)rx;PLRiR*G^R?Jd04}w3=DC;WAkU?E)p;{_DTP2M99{$_=u6HTo zvQ?Z8j>+TTzB1rJb-yr*7(V#_r`hTOuG3GK3)P4FKV9~K(A$HB>LER-9=_+RRo1VV zk}aB*3f({8%+_p@9=UIl9&sNYa?`$}P~E2t|2$u&b;p_Vxo(Se{Y>q4`Bv@j^Bm>2 z+$?)Z`1ynK|2tTg6V&quuQ!{^wS~sPjdd5u zZ?Ge4{qW-(H1zNj8|#0mv9W$KeSZTmOJ%lhqY1#J7gY@HqSVw;*Z=hqC2D-#LY`cO zZgx7W?NcXD)*7>S>bL5vIxB3p*;+cOtCMHj^@XGD${bXwa;3Ric?CUmxpHjr?&5N- z^3?Gz6{MrV7(CsTn6ZAytzsQG+Ff~KQrtpr8=u=Cvh+{}98+ZYX7AD``;@!xo##_Wa zq;)*uMLq80(zREbjfBIjfTB}Cwb5~O{>3N6>{NxlzYsH9cU%$7&8A~9l*vNlW1_w7 z&$uqDA7MF`0~BASYA;Pv@@nG|7$t5Do28!Inkcls>a7=)@>v>uS@mhR1sf*5E?r07 z46buyYoYOmm}L7;{2}j((udCLCj9gk>#3!pUa^}4+K#H@t;5>WKp8H7xK!BS94VK1 zdnzpu0`S{xw&HM)c#F(*;VpJ7d#!YXjFm8bV!8PCHn>>hh->9Go)bfbAKs;ksq!uM znQT+V#Xh-O5H!~n>~tIWjMQKPRV|Z3F)&e|kRM6h@;=5J(o%f#u;^XV&HZRyENI_8^-MJ6jFFrV?BxjV;pkPk9oY=2FcLit3n7xcu2 z6zvm<*a<^=@R%;&6!eK^x9`TnV!KnVH$qFWqtYIg`g0`5)UA8%;a+aJM?y=R=VJKf z*wAZoXqnSp@I4oZ%j^}YmIT3Hsq`FBEX2lun;15lT4-ckO#~x?RaB>!)9dVD49n*% zG+f8mCwWJeYd&p$CPaE7kRHHTN2l%z1BZOqA}t3B z>F)OYE;~Ni*%)_s=683cGOI*Zithm_qY~n1A^&k}AGhLbA1GA6Q}QihDY7+^q#l=f zj5h?0dDwRmL%EA2nl|Qe0SQzuAzAX^X#Rm?h3awNdNlvQW4=DYyL^y-L%<5Q(l7ul ziS=7J$z$*_#XRTc$5;FPH4h_Ub|l7dL~jq`Fq!ce{rv z6@XM;=+T!j>MxW(dil~EmiX0fC&LlxftH3Wf@qVz7EqdRzE zG+8&0iNRrjGa|;eh&-lLlyhA1Q?7kxCPiN{zf|3heZSq)+|k}rIS9R@_ z(85H#cp}^BNbp2#SI5+sb_aKpN(b$rD?>-M-j>iu zNW0pN1ww|FRmY4!uC;>fYb;N;S{&cfE@w?Qai5tHp_=Its_r4`Y}HmMPor}OI-io8 z#WdA2r`3a*6{`m`Gp2uVa*YK-yPDafkX_mTHG*FAz$wPF=I3f&EguTRbwztO-H!TS z>53et_0MD+;~DPBxhB2l1)n$q-_qS)?I+5;Ui))VX)~R{po|{wJ_77noaxTo*XAhk ze(mLHI-F4saAQ%1@wfisvy8frP-AvG0FA~*@!kkTQ3+TxF^o)5#&)GyWf$fZ1~m_? zx}MDr&>RTEww~-XJw}zqV@~(^B+Euk`I#CSVmz*N#ddoi%EwvTSKfb|vk&%;h&vH;pJ~AE~f`^F1DeIEiee+(EgCp$^$$JVw79OX* zupUu+)hd|AY@PRGncch+J1b^RxaaE(t!Yf9qLq7as!}mSU#M0qVU6{rOINhJH8!P- z^HeIbkyR?;Oe(BQu1fllY+~4V@gJb+;ScI>Ng!Fp(@9gh&~!0A(@xg7wonkE!Tn%y zxn46JV@xV5?WMu9YhkSP7#)Y#KF37?O-1_)q?c@h{q}x4$SeHkp7dUJy$$v}J#-Kz zK1KOm81g)w73)Wp%CWykrCuWPxK=pptq>r1$=_D;c_lxo)z*-{Du-AZRAyu zU(n^3m3&3XFDlupH|{9eMdES6QSmQo)H)W47ut*UdWEwE5pvrei)wwT-f4HNQ@l2p zoWFcN`7ZpsV3Qdo zvr3*&@|2RNl{~BDIVB%ba!$#dl8-C7L}H-20mthYMP-OfzFQA3=(G81P4cSloz)A% zspV#4$zw9dqL#J#4m-PCURsuFhjTs3s(KUuHc@hsm{M0b-js`RUB+j-?g zRYSEiAw|0l-<2NWb8b)zVEX)N1cUB!?ZXd~Z(paoW5c3=@A^9|m1Wrm^ho86KY=A1m!Djq-QzWN~`9bgDE}8sYCIevgcA8J}Q9V9Vs5$;V0`EFCF5 ziIvEIClLFy+=1fM(7xhc(#hh!;pvgmnX%#VZIh+($?>hFua|V>{gy}6x}nnO Date: Fri, 17 Mar 2017 11:20:18 -0500 Subject: [PATCH 23/31] Fix memory debug and tab --- src/build_file.py | 274 ++++++++++-------------- src/codelet.py | 18 +- src/command_line.py | 322 +++++++++++++++------------- src/create_man.py | 10 +- src/cython_setup.py | 41 ---- src/entity.py | 86 ++++---- src/irp_stack.py | 260 ---------------------- src/irpf90.py | 139 ++++++------ src/irpf90_indent.py | 414 +++++++++++++++++++----------------- src/irpf90_t.py | 48 ++--- src/irpman.py | 17 +- src/irpy_files.py | 23 +- src/module.py | 129 +++++------ src/parsed_text.py | 160 +++++++------- src/preprocessed_text.py | 268 ++++++++++++----------- src/profile.py | 49 ++--- src/regexps.py | 32 +-- src/routine.py | 111 +++++----- src/templates/finalize.f90 | 2 +- src/templates/irp_lock.f90 | 2 +- src/templates/irp_stack.f90 | 151 +++++++++++++ src/templates/module.f90 | 1 - src/templates/provider.f90 | 7 +- src/templates/writer.f90 | 69 ------ src/touches.py | 46 ++-- src/util.py | 178 +++++++++------- src/vim.py | 35 +-- 27 files changed, 1369 insertions(+), 1523 deletions(-) delete mode 100755 src/cython_setup.py delete mode 100644 src/irp_stack.py create mode 100644 src/templates/irp_stack.f90 delete mode 100644 src/templates/writer.f90 diff --git a/src/build_file.py b/src/build_file.py index 2115734..e4cd072 100644 --- a/src/build_file.py +++ b/src/build_file.py @@ -36,6 +36,7 @@ irp_id = irpf90_t.irp_id cwd = os.getcwd() + def dress(f, in_root=False): #(str,bool) -> str """ Transfoms the filename into $PWD/IRPF90_temp/f @@ -65,15 +66,13 @@ def create_build_touches(l_irp_m, ninja): result_ninja = '\n'.join([ "build {target_o}: compile_fortran_{irp_id} {target_F90} | {list_of_modules_irp}", - " short_in = {short_target_F90}", - " short_out = {short_target_o}", - "" + " short_in = {short_target_F90}", " short_out = {short_target_o}", "" ]) result_make = '\n'.join([ "{target_o}: {target_F90} | {list_of_modules_irp}", - '\t@printf "F: {short_target_F90} -> {short_target_o}\\n"', - "\t@$(FC) $(FCFLAGS) -c $^ -o $@", "" + '\t@printf "F: {short_target_F90} -> {short_target_o}\\n"', + "\t@$(FC) $(FCFLAGS) -c $^ -o $@", "" ]) result = result_ninja if ninja else result_make @@ -96,15 +95,12 @@ def create_build_archive(l_irp_o, l_usr_o_wo_main, l_ext_o, l_irp_sup_o, ninja=T list_of_object = ' '.join(l_irp_o + l_usr_o_wo_main + l_ext_o + l_irp_sup_o) - result_ninja = '\n'.join([ - "build {lib}: archive_{irp_id} {list_of_object}", - " short_out = {short_lib}", - ""]) + result_ninja = '\n'.join( + ["build {lib}: archive_{irp_id} {list_of_object}", " short_out = {short_lib}", ""]) result_make = '\n'.join([ - "{lib}: {list_of_object}", - '\t@printf "Archive: {short_lib}\\n"', - "\t@$(AR) cr $@ $^", ""]) + "{lib}: {list_of_object}", '\t@printf "Archive: {short_lib}\\n"', "\t@$(AR) cr $@ $^", "" + ]) result = result_ninja if ninja else result_make return result.format(**locals()) @@ -124,8 +120,9 @@ def create_build_link(t, l_irp_m, l_usr_m, l_ext_m, ninja=True): basename = os.path.basename(filename) if basename != progname: - from util import logger - logger.info('program-name `{0}` != file-name `{1}` (using file-name for now...)'.format(progname,basename)) + from util import logger + logger.info('program-name `{0}` != file-name `{1}` (using file-name for now...)'.format( + progname, basename)) target = dress(filename, in_root=True) short_target = filename @@ -138,14 +135,13 @@ def create_build_link(t, l_irp_m, l_usr_m, l_ext_m, ninja=True): result_ninja = '\n'.join([ "build {target}: link_{irp_id} {target_o} {irp_lib} | {list_of_module}", - " short_out = {short_target}", - ""]) + " short_out = {short_target}", "" + ]) result_make = '\n'.join([ - "{target}:{target_o} {irp_lib} | {list_of_module}", - '\t@printf "Link: {short_target}\\n"', - "\t@$(FC) $^ $(LIB) -o $@", - ""]) + "{target}:{target_o} {irp_lib} | {list_of_module}", '\t@printf "Link: {short_target}\\n"', + "\t@$(FC) $^ $(LIB) -o $@", "" + ]) result = result_ninja if ninja else result_make @@ -186,10 +182,10 @@ def create_build_compile(t, l_module, l_ext_modfile=[], ninja=True): # Expensive and stupid. We can create a dict to do the loockup only once for m in t.needed_modules_usr: - # m is name - for x in l_module: - if m in x.gen_mod and x.filename != t.filename: - needed_modules.append("%s.irp.o" % x.filename) + # m is name + for x in l_module: + if m in x.gen_mod and x.filename != t.filename: + needed_modules.append("%s.irp.o" % x.filename) from util import uniquify needed_modules = uniquify(needed_modules) @@ -206,51 +202,47 @@ def create_build_compile(t, l_module, l_ext_modfile=[], ninja=True): target_module_F90 = dress(short_target_module_F90) needed_modules_irp += [target_module_o] - list_of_modules = ' '.join(map(dress, needed_modules)) + list_of_modules = ' '.join(map(dress, needed_modules)) list_of_modules_irp = ' '.join(map(dress, needed_modules_irp)) inline_include = True if not inline_include: - #Wrong name, this not work! - #list_of_includes = ' '.join(map(lambda x: dress(x, in_root=True), t.includes)) - raise NotImplemented + #Wrong name, this not work! + #list_of_includes = ' '.join(map(lambda x: dress(x, in_root=True), t.includes)) + raise NotImplemented else: - #The include have already by included - list_of_includes = ' ' - + #The include have already by included + list_of_includes = ' ' + l_build = [ "build {target_o}: compile_fortran_{irp_id} {target_F90} | {list_of_includes} {list_of_modules} {list_of_modules_irp}", - " short_in = {short_target_F90}", - " short_out = {short_target}", - "" + " short_in = {short_target_F90}", " short_out = {short_target}", "" ] l_build_make = [ "{target_o}: {target_F90} | {list_of_includes} {list_of_modules} {list_of_modules_irp}", - '\t@printf "F: {short_target_F90} -> {short_target}\\n"', - "\t@$(FC) $(FCFLAGS) -c $^ -o $@", "" + '\t@printf "F: {short_target_F90} -> {short_target}\\n"', "\t@$(FC) $(FCFLAGS) -c $^ -o $@", + "" ] # No need of module when compiling the irp_module. if t.has_irp_module: l_build += [ "build {target_module_o}: compile_fortran_{irp_id} {target_module_F90} | {list_of_includes} {list_of_modules} ", - " short_in = {short_target_module_F90}", - " short_out = {short_target_module_o}", - "" + " short_in = {short_target_module_F90}", " short_out = {short_target_module_o}", "" ] l_build_make += [ "{target_module_o}: {target_module_F90} | {list_of_includes} {list_of_modules}", - '\t@printf "F: {short_target_module_F90} -> {short_target_module_o}\\n"', - "\t@$(FC) $(FCFLAGS) -c $^ -o $@", "" + '\t@printf "F: {short_target_module_F90} -> {short_target_module_o}\\n"', + "\t@$(FC) $(FCFLAGS) -c $^ -o $@", "" ] l_cur = l_build if ninja else l_build_make return '\n'.join(l_cur).format(**locals()) -def create_build_remaining(f,ninja): +def create_build_remaining(f, ninja): """ Create the build command for the remaining file f. f is a file name (str). """ @@ -271,10 +263,10 @@ def create_build_remaining(f,ninja): if extension.lower() in ['f', 'f90']: result = ["build {target_o}: compile_fortran_{irp_id} {target_i}"] - result_make = [ - '{target_o}: {target_i}', - '\t@printf "F: {short_target_o} -> {short_target_i}\\n"', - "\t@$(FC) $(FCFLAGS) -c $^ -o $@", ""] + result_make = [ + '{target_o}: {target_i}', '\t@printf "F: {short_target_o} -> {short_target_i}\\n"', + "\t@$(FC) $(FCFLAGS) -c $^ -o $@", "" + ] elif extension.lower() in ['c']: result = ["build {target_o}: compile_c_{irp_id} {target_i}"] @@ -283,105 +275,77 @@ def create_build_remaining(f,ninja): result += [" short_in = {short_target_i}", " short_out = {short_target_o}", ""] - result_final = result if ninja else result_make + result_final = result if ninja else result_make return '\n'.join(result_final).format(**locals()) -def create_makefile(d_flags,d_var,irpf90_flags,ninja=True): +def create_makefile(d_flags, d_var, irpf90_flags, ninja=True): - result = ["IRPF90= irpf90", - "IRPF90FLAGS= %s" % irpf90_flags, - "BUILD_SYSTEM= %s" % ('ninja' if ninja else 'make'), - ""] + result = [ + "IRPF90= irpf90", "IRPF90FLAGS= %s" % irpf90_flags, + "BUILD_SYSTEM= %s" % ('ninja' if ninja else 'make'), "" + ] # Export all the env variable used by irpf90 - result += ['.EXPORT_ALL_VARIABLES:', - '', - '\n'.join("{0} = {1}".format(k, v) for k, v in sorted(d_flags.iteritems())), - '', - '\n'.join("{0} = {1}".format(k, ' '.join(v)) for k, v in sorted(d_var.iteritems())), - ''] + result += [ + '.EXPORT_ALL_VARIABLES:', '', '\n'.join("{0} = {1}".format(k, v) + for k, v in sorted(d_flags.iteritems())), '', + '\n'.join("{0} = {1}".format(k, ' '.join(v)) for k, v in sorted(d_var.iteritems())), '' + ] - result += [ r'# Dark magic below modify with caution!', - r'# "You are Not Expected to Understand This"', - r"# .", - r"# /^\ .", - r'# /\ "V",', - r"# /__\ I O o", - r"# //..\\ I .", - r"# \].`[/ I", - r"# /l\/j\ (] . O", - r"# /. ~~ ,\/I .", - r"# \\L__j^\/I o", - r"# \/--v} I o .", - r"# | | I _________", - r"# | | I c(` ')o", - r"# | l I \. ,/", - r"# _/j L l\_! _//^---^\\_", - r""] + result += [ + r'# Dark magic below modify with caution!', r'# "You are Not Expected to Understand This"', + r"# .", r"# /^\ .", r'# /\ "V",', + r"# /__\ I O o", r"# //..\\ I .", r"# \].`[/ I", + r"# /l\/j\ (] . O", r"# /. ~~ ,\/I .", r"# \\L__j^\/I o", + r"# \/--v} I o .", r"# | | I _________", r"# | | I c(` ')o", + r"# | l I \. ,/", r"# _/j L l\_! _//^---^\\_", r"" + ] - result += ["", - "ifeq ($(BUILD_SYSTEM),ninja)", - "\tBUILD_FILE=IRPF90_temp/build.ninja", - "\tIRPF90FLAGS += -j", - "else ifeq ($(BUILD_SYSTEM),make)", - "\tBUILD_FILE=IRPF90_temp/build.make", - "\tBUILD_SYSTEM += -j", - "else", - "DUMMY:", - "\t$(error 'Wrong BUILD_SYSTEM: $(BUILD_SYSTEM)')", - "endif"] + result += [ + "", "ifeq ($(BUILD_SYSTEM),ninja)", "\tBUILD_FILE=IRPF90_temp/build.ninja", + "\tIRPF90FLAGS += -j", "else ifeq ($(BUILD_SYSTEM),make)", + "\tBUILD_FILE=IRPF90_temp/build.make", "\tBUILD_SYSTEM += -j", "else", "DUMMY:", + "\t$(error 'Wrong BUILD_SYSTEM: $(BUILD_SYSTEM)')", "endif" + ] - result += ["", - "define run_and_touch", - " $(BUILD_SYSTEM) -C $(dir $(1) ) -f $(notdir $(1) ) $(addprefix $(CURDIR)/, $(2)) && touch $(2)", - "endef", - "", - "EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1)", - "", - ".PHONY: all", - "", - "all: $(BUILD_FILE)", - "\t$(call run_and_touch, $<, $(EXE))", - "", - ".NOTPARALLEL: $(EXE)", - "$(EXE): $(BUILD_FILE)", - "\t$(call run_and_touch, $<, $(EXE))", - - "$(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print)", - "\t$(IRPF90) $(IRPF90FLAGS)", - "", - "clean:", - '\trm -f -- $(BUILD_FILE) $(EXE)' - '\t$(shell find IRPF90_temp -type f \\( -name "*.o" -o -name "*.mod" -name "*.a" \\) -delete;)', - "veryclean: clean", - "\trm -rf IRPF90_temp/ IRPF90_man/ irpf90_entities dist tags"] + result += [ + "", "define run_and_touch", + " $(BUILD_SYSTEM) -C $(dir $(1) ) -f $(notdir $(1) ) $(addprefix $(CURDIR)/, $(2)) && touch $(2)", + "endef", "", "EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1)", "", + ".PHONY: all", "", "all: $(BUILD_FILE)", "\t$(call run_and_touch, $<, $(EXE))", "", + ".NOTPARALLEL: $(EXE)", "$(EXE): $(BUILD_FILE)", "\t$(call run_and_touch, $<, $(EXE))", + "$(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print)", + "\t$(IRPF90) $(IRPF90FLAGS)", "", "clean:", '\trm -f -- $(BUILD_FILE) $(EXE)' + '\t$(shell find IRPF90_temp -type f \\( -name "*.o" -o -name "*.mod" -name "*.a" \\) -delete;)', + "veryclean: clean", "\trm -rf IRPF90_temp/ IRPF90_man/ irpf90_entities dist tags" + ] import util - data = '%s\n' % '\n'.join(result) - util.lazy_write_file('Makefile',data,conservative=True) + data = '%s\n' % '\n'.join(result) + util.lazy_write_file('Makefile', data, conservative=True) + def create_make_all_clean(l_main): - # - '''Create the ALL and CLEAN target of Makefile + # + '''Create the ALL and CLEAN target of Makefile Note: Portability doesn't mater. -delete is maybe not posix but -exec rm {} + is far more ugly! ''' - l_executable =' '.join(dress( t.filename, in_root=True) for t in l_main) + l_executable = ' '.join(dress(t.filename, in_root=True) for t in l_main) - output = [".PHONY : all", - "all: {l_executable}", - "", - ".PHONY: clean", - "clean:", - '\tfind . -type f \( -name "*.o" -o -name "*.mod" \) -delete; rm -f {l_executable} --' - ""] + output = [ + ".PHONY : all", "all: {l_executable}", "", ".PHONY: clean", "clean:", + '\tfind . -type f \( -name "*.o" -o -name "*.mod" \) -delete; rm -f {l_executable} --' + "" + ] + + return ['\n'.join(output).format(**locals())] - return [ '\n'.join(output).format(**locals())] def create_var_and_rule(d_flags, ninja): @@ -392,59 +356,49 @@ def create_var_and_rule(d_flags, ninja): # Rules t = [ - "rule compile_fortran_{irp_id}", - " command = $FC $FCFLAGS -c $in -o $out", - " description = F : $short_in -> $short_out", - "", - "rule compile_c_{irp_id}", + "rule compile_fortran_{irp_id}", " command = $FC $FCFLAGS -c $in -o $out", + " description = F : $short_in -> $short_out", "", "rule compile_c_{irp_id}", " command = $CC $CFLAGS -c $in -o $out", - " description = C : $short_in -> $short_out", - "", - "rule compile_cxx_{irp_id}", + " description = C : $short_in -> $short_out", "", "rule compile_cxx_{irp_id}", " command = $CXX $CXXFLAGS -c $in -o $out", - " description = C++ : $short_in -> $short_out", - "", - "rule archive_{irp_id}", - " command = $AR cr $out $in", - " description = Archive: $short_out", - "", - "rule link_{irp_id}", - " command = $FC $FCFLAGS $in $LIB -o $out", - " description = Link: $short_out", - "" + " description = C++ : $short_in -> $short_out", "", "rule archive_{irp_id}", + " command = $AR cr $out $in", " description = Archive: $short_out", "", + "rule link_{irp_id}", " command = $FC $FCFLAGS $in $LIB -o $out", + " description = Link: $short_out", "" ] output += ['\n'.join(t).format(irp_id=irpf90_t.irp_id, **d_flags)] return output - # Environment variables d_default = { - "FC": "gfortran", - "FCFLAGS": "-O2", - "AR": "ar", - "RANLIB": " ranlib", - "CC": "gcc", - "CFLAGS": "-O2", - "CXX": "g++", - "CXXFLAGS": "-O2", - "LIB": ""} + "FC": "gfortran", + "FCFLAGS": "-O2", + "AR": "ar", + "RANLIB": " ranlib", + "CC": "gcc", + "CFLAGS": "-O2", + "CXX": "g++", + "CXXFLAGS": "-O2", + "LIB": "" +} d_flags = dict() for k, v in d_default.iteritems(): - d_flags[k] = os.environ[k] if k in os.environ else v + d_flags[k] = os.environ[k] if k in os.environ else v include_dir = ' ' + ' '.join(["-I %s" % (i) for i in command_line.include_dir]) d_var = dict() for k in ['SRC', 'OBJ']: - d_var[k] = os.environ[k].split() if k in os.environ else [] + d_var[k] = os.environ[k].split() if k in os.environ else [] def create_generalmakefile(ninja): - create_makefile(d_flags,d_var, include_dir,ninja) + create_makefile(d_flags, d_var, include_dir, ninja) + def run(d_module, ninja): #(Dict[str,Module],bool) -> str @@ -485,7 +439,7 @@ def run(d_module, ninja): l_irp_sup_o = ["irp_touches.irp.o"] l_irp_sup_s = ["irp_touches.irp.F90"] - if command_line.do_assert: + if command_line.do_debug or command_line.do_assert: l_irp_sup_o += ["irp_stack.irp.o"] l_irp_sup_s += ["irp_stack.irp.F90"] @@ -518,13 +472,13 @@ def run(d_module, ninja): output = create_var_and_rule(d_flags, ninja) if not ninja: - output += create_make_all_clean(l_mod_main) - + output += create_make_all_clean(l_mod_main) + # Create all the .irp.F90 -> .o for m in l_mod: output.append(create_build_compile(m, l_mod, l_ext_m, ninja)) - output.append(create_build_touches(l_irp_m,ninja)) + output.append(create_build_touches(l_irp_m, ninja)) # All the objects. Kind of, only need usr without main for the static library output.append(create_build_archive(l_irp_o, l_usr_o_wo_main, l_ext_o, l_irp_sup_o, ninja)) @@ -533,13 +487,13 @@ def run(d_module, ninja): output.append(create_build_link(i, l_irp_m, l_usr_m, l_ext_m, ninja)) # Remaining files - for i in l_irp_sup_s[1:]+l_ext_s: + for i in l_irp_sup_s[1:] + l_ext_s: output.append(create_build_remaining(i, ninja)) filename = os.path.join(irpdir, "build.ninja" if ninja else "build.make") data = '%s\n' % '\n\n'.join(output) import util - util.lazy_write_file(filename,data,touch=True) + util.lazy_write_file(filename, data, touch=True) return diff --git a/src/codelet.py b/src/codelet.py index d5e6f25..2007c28 100644 --- a/src/codelet.py +++ b/src/codelet.py @@ -27,8 +27,9 @@ from command_line import command_line import irpf90_t + def run(): - template = """ + template = """ program codelet_%(name)s implicit none integer :: i @@ -58,12 +59,11 @@ end """ - name, NMAX, precondition, filename = command_line.codelet - if precondition is None: - precondition = "" - else: - precondition = "PROVIDE "+precondition - - from util import lazy_write_file - lazy_write_file(filename,template%locals()) + name, NMAX, precondition, filename = command_line.codelet + if precondition is None: + precondition = "" + else: + precondition = "PROVIDE " + precondition + from util import lazy_write_file + lazy_write_file(filename, template % locals()) diff --git a/src/command_line.py b/src/command_line.py index ebd2d36..5a1c3ee 100644 --- a/src/command_line.py +++ b/src/command_line.py @@ -32,125 +32,150 @@ import re description = "IRPF90 Fortran preprocessor." options = {} -options['a'] = [ 'assert' , 'Activates ASSERT statements. If absent, remove ASSERT statements.', 0 ] -options['c'] = [ 'codelet' , 'entity:NMAX or entity:precondition:NMAX : Generate a codelet to profile a provider running NMAX times', 1 ] -options['C'] = [ 'coarray' , 'All providers are coarrays', 0 ] -options['d'] = [ 'debug' , 'Activates debug. The name of the current subroutine/function/provider will be printed on the standard output when entering or exiting a routine, as well as the CPU time passed inside the routine.', 0 ] -options['D'] = [ 'define' , 'Defines a variable identified by the IRP_IF statements.', 1 ] -options['g'] = [ 'profile' , 'Activates profiling of the code.', 0 ] -options['h'] = [ 'help' , 'Print this help', 0 ] -options['I'] = [ 'include' , 'Include directory', 1 ] -options['j'] = [ 'ninja' , 'Use Ninja instead of make', 0 ] -options['i'] = [ 'init' , 'Initialize current directory. Creates a default Makefile and the temporary working directories.', 0 ] -options['l'] = [ 'align' , 'Align arrays using compiler directives and sets the $IRP_ALIGN variable. For example, --align=32 aligns all arrays on a 32 byte boundary.', 1 ] -options['m'] = [ 'memory' , 'Print memory allocations/deallocations.', 0 ] -options['n'] = [ 'inline' , ' : Force inlining of providers or builders', 1 ] -options['o'] = [ 'checkopt' , 'Shows where optimization may be required', 0 ] -options['p'] = [ 'preprocess' , 'Prints a preprocessed file to standard output. Useful for debugging files containing shell scripts.', 1 ] -options['r'] = [ 'no_directives', 'Ignore all compiler directives !DEC$ and !DIR$', 0 ] -options['s'] = [ 'substitute' , 'Substitute values in do loops for generating specific optimized code.', 1 ] -options['t'] = [ 'touch' , 'Display which entities are touched when touching the variable given as an argument.', 1 ] -options['v'] = [ 'version' , 'Prints version of irpf90', 0 ] -options['w'] = [ 'warnings' , 'Activate Warnings', 0 ] -options['z'] = [ 'openmp' , 'Activate for OpenMP code', 0 ] -options['G'] = [ 'graph' , 'Print the dependecy-graph of the entities (dots format)', 0 ] -options['T'] = [ 'Task' , 'Auto-parallelism ', 0 ] +options['a'] = ['assert', 'Activates ASSERT statements. If absent, remove ASSERT statements.', 0] +options['c'] = [ + 'codelet', + 'entity:NMAX or entity:precondition:NMAX : Generate a codelet to profile a provider running NMAX times', + 1 +] +options['C'] = ['coarray', 'All providers are coarrays', 0] +options['d'] = [ + 'debug', + 'Activates debug. The name of the current subroutine/function/provider will be printed on the standard output when entering or exiting a routine, as well as the CPU time passed inside the routine.', + 0 +] +options['D'] = ['define', 'Defines a variable identified by the IRP_IF statements.', 1] +options['g'] = ['profile', 'Activates profiling of the code.', 0] +options['h'] = ['help', 'Print this help', 0] +options['I'] = ['include', 'Include directory', 1] +options['j'] = ['ninja', 'Use Ninja instead of make', 0] +options['i'] = [ + 'init', + 'Initialize current directory. Creates a default Makefile and the temporary working directories.', + 0 +] +options['l'] = [ + 'align', + 'Align arrays using compiler directives and sets the $IRP_ALIGN variable. For example, --align=32 aligns all arrays on a 32 byte boundary.', + 1 +] +options['m'] = ['memory', 'Print memory allocations/deallocations.', 0] +options['n'] = ['inline', ' : Force inlining of providers or builders', 1] +options['o'] = ['checkopt', 'Shows where optimization may be required', 0] +options['p'] = [ + 'preprocess', + 'Prints a preprocessed file to standard output. Useful for debugging files containing shell scripts.', + 1 +] +options['r'] = ['no_directives', 'Ignore all compiler directives !DEC$ and !DIR$', 0] +options['s'] = [ + 'substitute', 'Substitute values in do loops for generating specific optimized code.', 1 +] +options['t'] = [ + 'touch', 'Display which entities are touched when touching the variable given as an argument.', + 1 +] +options['v'] = ['version', 'Prints version of irpf90', 0] +options['w'] = ['warnings', 'Activate Warnings', 0] +options['z'] = ['openmp', 'Activate for OpenMP code', 0] +options['G'] = ['graph', 'Print the dependecy-graph of the entities (dots format)', 0] +options['T'] = ['Task', 'Auto-parallelism ', 0] + class CommandLine(object): + def __init__(self): + global options + self._opts = None + self.argv = list(sys.argv) + self.executable_name = self.argv[0] - def __init__(self): - global options - self._opts = None - self.argv = list(sys.argv) - self.executable_name = self.argv[0] + @irpy.lazy_property + def defined(self): + return [a for o, a in self.opts if o in ["-D", '--' + options['D'][0]]] - @irpy.lazy_property - def defined(self): - return [ a for o,a in self.opts if o in [ "-D", '--'+options['D'][0] ] ] + @irpy.lazy_property + def graph(self): + return next((a.split() for o, a in self.opts if o in ["-G", '--' + options['G'][0]]), []) - @irpy.lazy_property - def graph(self): - return next((a.split() for o,a in self.opts if o in ["-G", '--'+options['G'][0] ]),[]) - - @irpy.lazy_property - def include_dir(self): - l = [] - for o,a in self.opts: - if o in [ "-I", '--'+options['I'][0] ]: - if len(a) < 1: - print "Error: -I option needs a directory" - if a[-1] != '/': - a = a+'/' - l.append(a) - return l - - @irpy.lazy_property - def inline(self): - return next( (a for o,a in self.opts if o in [ "-n", '--'+options['n'][0] ]),'') + @irpy.lazy_property + def include_dir(self): + l = [] + for o, a in self.opts: + if o in ["-I", '--' + options['I'][0]]: + if len(a) < 1: + print "Error: -I option needs a directory" + if a[-1] != '/': + a = a + '/' + l.append(a) + return l - @irpy.lazy_property - def substituted(self): - self._substituted = {} - for o,a in self.opts: - if o in [ "-s", '--'+options['s'][0] ]: - k, v = a.split(':') - v_re = re.compile(r"(\W)(%s)(\W.*$|$)"%k.strip()) - self._substituted[k] = [v, v_re] - return self._substituted + @irpy.lazy_property + def inline(self): + return next((a for o, a in self.opts if o in ["-n", '--' + options['n'][0]]), '') - @irpy.lazy_property - def codelet(self): - for o,a in self.opts: - if o in [ "-c", '--'+options['c'][0] ]: - buffer = a.split(':') - filename = 'codelet_'+buffer[0]+'.irp.f' - if len(buffer) == 2: - return [buffer[0], int(buffer[1]), None, filename] - elif len(buffer) == 3: - return [buffer[0], int(buffer[2]), buffer[1], filename] - else: - print """ + @irpy.lazy_property + def substituted(self): + self._substituted = {} + for o, a in self.opts: + if o in ["-s", '--' + options['s'][0]]: + k, v = a.split(':') + v_re = re.compile(r"(\W)(%s)(\W.*$|$)" % k.strip()) + self._substituted[k] = [v, v_re] + return self._substituted + + @irpy.lazy_property + def codelet(self): + for o, a in self.opts: + if o in ["-c", '--' + options['c'][0]]: + buffer = a.split(':') + filename = 'codelet_' + buffer[0] + '.irp.f' + if len(buffer) == 2: + return [buffer[0], int(buffer[1]), None, filename] + elif len(buffer) == 3: + return [buffer[0], int(buffer[2]), buffer[1], filename] + else: + print """ Error in codelet definition. Use: --codelet=provider:NMAX or --codelet=provider:precondition:NMAX """ - sys.exit(1) + sys.exit(1) - @irpy.lazy_property - def preprocessed(self): - return [a for o,a in self.ops if o in [ "-p", '--'+options['p'][0] ] ] + @irpy.lazy_property + def preprocessed(self): + return [a for o, a in self.ops if o in ["-p", '--' + options['p'][0]]] - @irpy.lazy_property - def touched(self): - return [a for o,a in self.ops if o in [ "-t", '--'+options['t'][0] ] ] + @irpy.lazy_property + def touched(self): + return [a for o, a in self.ops if o in ["-t", '--' + options['t'][0]]] - @irpy.lazy_property - def align(self): - return next( (a for o,a in self.opts if o in [ "-l", '--'+options['l'][0] ]),'1') + @irpy.lazy_property + def align(self): + return next((a for o, a in self.opts if o in ["-l", '--' + options['l'][0]]), '1') - @irpy.lazy_property - def coarray(self): - return any(o for o,a in self.opts if o in [ "-C", '--'+options['C'][0] ]) + @irpy.lazy_property + def coarray(self): + return any(o for o, a in self.opts if o in ["-C", '--' + options['C'][0]]) - @irpy.lazy_property - def warnings(self): - return any(o for o,a in self.opts if o in [ "-W", '--'+options['W'][0] ]) + @irpy.lazy_property + def warnings(self): + return any(o for o, a in self.opts if o in ["-W", '--' + options['W'][0]]) - @irpy.lazy_property - def openmp(self): - return any(o for o,a in self.opts if o in [ "-z", '--'+options['z'][0] ]) + @irpy.lazy_property + def openmp(self): + return any(o for o, a in self.opts if o in ["-z", '--' + options['z'][0]]) - @irpy.lazy_property - def ninja(self): - return any(o for o,a in self.opts if o in [ "-j", '--'+options['j'][0] ]) + @irpy.lazy_property + def ninja(self): + return any(o for o, a in self.opts if o in ["-j", '--' + options['j'][0]]) - @irpy.lazy_property - def directives(self): - return not(any(o for o,a in self.opts if o in [ "-r", '--'+options['r'][0] ])) + @irpy.lazy_property + def directives(self): + return not (any(o for o, a in self.opts if o in ["-r", '--' + options['r'][0]])) - def usage(self): - t = """ + def usage(self): + t = """ $EXE - $DESCR Usage: @@ -158,37 +183,38 @@ Usage: Options: """ - t = t.replace("$EXE",self.executable_name) - t = t.replace("$DESCR",description) - print t - print_options() - print "" - print "Version : ", version - print "" + t = t.replace("$EXE", self.executable_name) + t = t.replace("$DESCR", description) + print t + print_options() + print "" + print "Version : ", version + print "" - def opts(self): - if self._opts is None: - optlist = ["",[]] - for o in options: - b = [o]+options[o] - if b[3] == 1: - b[0] = b[0]+":" - b[1] = b[1]+"=" - optlist[0] += b[0] - optlist[1] += [b[1]] - - try: - self._opts, args = getopt.getopt(self.argv[1:], optlist[0], optlist[1]) - except getopt.GetoptError, err: - # print help information and exit: - self.usage() - print str(err) # will print something like "option -a not recognized" - sys.exit(2) - - return self._opts - opts = property(fget=opts) - - t = """ + def opts(self): + if self._opts is None: + optlist = ["", []] + for o in options: + b = [o] + options[o] + if b[3] == 1: + b[0] = b[0] + ":" + b[1] = b[1] + "=" + optlist[0] += b[0] + optlist[1] += [b[1]] + + try: + self._opts, args = getopt.getopt(self.argv[1:], optlist[0], optlist[1]) + except getopt.GetoptError, err: + # print help information and exit: + self.usage() + print str(err) # will print something like "option -a not recognized" + sys.exit(2) + + return self._opts + + opts = property(fget=opts) + + t = """ def do_$LONG(self): if '_do_$LONG' not in self.__dict__: self._do_$LONG = False @@ -199,13 +225,14 @@ def do_$LONG(self): return self._do_$LONG do_$LONG = property(fget=do_$LONG) """ - for short in options: - long = options[short][0] - exec t.replace("$LONG",long).replace("$SHORT",short) #in locals() - - @irpy.lazy_property - def do_run(self): - return not(any( (self.do_version, self.do_help, self.do_preprocess, self.do_touch, self.do_init))) + for short in options: + long = options[short][0] + exec t.replace("$LONG", long).replace("$SHORT", short) #in locals() + + @irpy.lazy_property + def do_run(self): + return not (any( + (self.do_version, self.do_help, self.do_preprocess, self.do_touch, self.do_init))) # @irpy.lazy_property # def do_Task(self): @@ -213,17 +240,20 @@ do_$LONG = property(fget=do_$LONG) command_line = CommandLine() + def print_options(): - keys = options.keys() - keys.sort() - import subprocess - for k in keys: - description = options[k][1] - p1 = subprocess.Popen(["fold", "-s", "-w", "40"],stdout=subprocess.PIPE,stdin=subprocess.PIPE) - description = p1.communicate(description)[0] - description = description.replace('\n','\n'.ljust(27)) - print ("-%s, --%s"%(k,options[k][0])).ljust(25), description+'\n' - print "\n" + keys = options.keys() + keys.sort() + import subprocess + for k in keys: + description = options[k][1] + p1 = subprocess.Popen( + ["fold", "-s", "-w", "40"], stdout=subprocess.PIPE, stdin=subprocess.PIPE) + description = p1.communicate(description)[0] + description = description.replace('\n', '\n'.ljust(27)) + print("-%s, --%s" % (k, options[k][0])).ljust(25), description + '\n' + print "\n" + if __name__ == '__main__': - print_options() + print_options() diff --git a/src/create_man.py b/src/create_man.py index b6c7bc0..447f367 100644 --- a/src/create_man.py +++ b/src/create_man.py @@ -27,9 +27,10 @@ from entity import Entity from routine import Routine from irpf90_t import mandir -from util import parmap, build_dim,lazy_write_file +from util import parmap, build_dim, lazy_write_file import os + def do_print_short(entity): assert type(entity) == Entity str_ = "{0:<35} : {1:<30} :: {2:<25} {3}".format(entity.prototype.filename[0], @@ -107,7 +108,6 @@ def do_print(entity, d_entity): lazy_write_file("%s%s.l" % (mandir, name), '%s\n' % str_) - ###################################################################### def do_print_subroutines(sub): assert type(sub) == Routine @@ -164,11 +164,11 @@ def run(d_entity, d_routine): l_subs = d_routine.values() - l_data_to_write = [("%s.l" % os.path.join(mandir, s.name), do_print_subroutines(s)) for s in l_subs] - + l_data_to_write = [("%s.l" % os.path.join(mandir, s.name), do_print_subroutines(s)) + for s in l_subs] def worker(l): - filename, text = l + filename, text = l lazy_write_file(filename, text) parmap(worker, l_data_to_write) diff --git a/src/cython_setup.py b/src/cython_setup.py deleted file mode 100755 index 9010539..0000000 --- a/src/cython_setup.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -# IRPF90 is a Fortran90 preprocessor written in Python for programming using -# the Implicit Reference to Parameters (IRP) method. -# Copyright (C) 2009 Anthony SCEMAMA -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Anthony Scemama -# LCPQ - IRSAMC - CNRS -# Universite Paul Sabatier -# 118, route de Narbonne -# 31062 Toulouse Cedex 4 -# scemama@irsamc.ups-tlse.fr - -from distutils.core import setup -from distutils.extension import Extension -from Cython.Distutils import build_ext -import os - -to_remove = """__init__.py cython_setup.py version.py command_line.py""".split() -ext_modules = [] - -for f in os.listdir('.') if f.emswith(".py") and not f in to_remove: - module = f.split('.')[0] - ext_modules.append(Extension(module,list(f))) - -setup(name = 'IRPF90 extensions', - cmdclass = {'build_ext': build_ext}, - ext_modules = ext_modules) diff --git a/src/entity.py b/src/entity.py index 2c2e80b..f43fcf2 100644 --- a/src/entity.py +++ b/src/entity.py @@ -30,6 +30,7 @@ from command_line import command_line import sys from lib.manager import irpy + class Entity(object): '''All lines between BEGIN_PROVIDER and END_PROVIDER included @@ -163,26 +164,31 @@ class Entity(object): def io_er(self): if not self.is_main: result = [] - + from util import mangled - from util import ashes_env + from util import ashes_env name = self.name - d_template= {'name':name, - 'fmodule':self.fmodule, - 'same_as' : self.same_as, - 'do_debug':command_line.do_debug, - 'children':mangled(self.needs,self.d_entity), - 'group_entity': [{'name':n,'dim':build_dim(self.cm_d_variable[n].dim,colons=True)} for n in self.l_name]} + d_template = { + 'name': name, + 'fmodule': self.fmodule, + 'same_as': self.same_as, + 'do_debug': command_line.do_debug, + 'children': mangled(self.needs, self.d_entity), + 'group_entity': [{ + 'name': n, + 'dim': build_dim( + self.cm_d_variable[n].dim, colons=True) + } for n in self.l_name] + } - - return ashes_env('io.f90',d_template).split('\n') + return ashes_env('io.f90', d_template).split('\n') def reader(self): - return io.er.split('TOKEN_SPLIT')[0] + return io.er.split('TOKEN_SPLIT')[0] def writer(self): - return io.er.split('TOKEN_SPLIT')[1] + return io.er.split('TOKEN_SPLIT')[1] @irpy.lazy_property_mutable def is_read(self): @@ -275,8 +281,8 @@ class Entity(object): # ~ # ~ # ~ @irpy.lazy_property def is_protected(self): - return self.text[0].lower.startswith('begin_provider_immu') - + return self.text[0].lower.startswith('begin_provider_immu') + @irpy.lazy_property def type(self): # () -> str @@ -303,9 +309,11 @@ class Entity(object): 'name': self.name, 'type': self.type, 'main': self.is_main, - 'dim': build_dim(self.dim,colons=True), - 'protected': '\n'.join(self.allocater+self.builder) if self.is_protected else False} - return d_template + 'dim': build_dim( + self.dim, colons=True), + 'protected': '\n'.join(self.allocater + self.builder) if self.is_protected else False + } + return d_template ############################################################ @irpy.lazy_property @@ -340,8 +348,11 @@ class Entity(object): return { 'name': self.name, - 'l_module': [n for n in build_use(self.parents + [self.name], self.d_entity,use=False)], - 'l_ancestor': [n for n in mangled(self.parents, self.d_entity)]} + 'l_module': + [n for n in build_use( + self.parents + [self.name], self.d_entity, use=False)], + 'l_ancestor': [n for n in mangled(self.parents, self.d_entity)] + } ########################################################## @@ -374,11 +385,13 @@ class Entity(object): from util import mangled - import util + import util name = self.name - l_module = [x for x in build_use([self.name] + self.to_provide, self.d_entity,use=False)] + l_module = [x for x in build_use([self.name] + self.to_provide, self.d_entity, use=False)] l_children = [x for x in mangled(self.to_provide, self.d_entity)] + l_entity = [self.d_entity[n] for n in self.l_name] + l = ashes_env.render('provider.f90', { 'name': name, 'l_module': l_module, @@ -386,40 +399,40 @@ class Entity(object): 'do_debug': command_line.do_debug, 'do_openmp': command_line.do_openmp, 'do_task': command_line.do_Task, - 'do_corray': command_line.do_coarray, - 'dim': ','.join(self.dim), + 'do_corray': command_line.do_coarray, + 'dim': ','.join(self.dim), + 'l_entity': [{ + 'name': i.name, + 'dim': ','.join(i.dim) + } for i in l_entity] }) return [i for i in l.split('\n') if i.strip()] @irpy.lazy_property def allocater(self): - if not self.is_main: - return [] - from util import mangled import util name = self.name - l_module = [x for x in build_use([self.name] + self.to_provide, self.d_entity,use=False)] + l_module = [x for x in build_use([self.name] + self.to_provide, self.d_entity, use=False)] if self.is_protected: - l_module.remove(self.fmodule) - + l_module.remove(self.fmodule) l_dim = [{'name': name, 'rank': i + 1, 'value': dimsize(k)} for i, k in enumerate(self.dim)] - l = ashes_env.render('allocater.f90', { 'name': name, 'l_module': l_module, 'do_debug': command_line.do_debug, 'do_corray': command_line.do_coarray, + 'do_memory': command_line.do_memory, 'dim': ','.join(self.dim), 'l_dim': l_dim }) return [i for i in l.split('\n') if i.strip()] - - ########################################################## +########################################################## + @irpy.lazy_property def builder(self): if not self.is_main: @@ -458,7 +471,6 @@ class Entity(object): text += map(lambda x: ([], Simple_line(line.i, x, line.filename)), build_call_provide(vars, self.d_entity)) - # ~#~#~#~#~# # Create the subroutine. # ~#~#~#~#~# @@ -470,11 +482,11 @@ class Entity(object): # Add the use statement result += ["subroutine bld_%s" % (self.name)] - l_use = build_use([self.name] + self.needs, self.d_entity,use=False) - if self.is_protected: - l_use.remove(self.fmodule) + l_use = build_use([self.name] + self.needs, self.d_entity, use=False) + if self.is_protected: + l_use.remove(self.fmodule) - result += ['USE %s'%n for n in l_use] + result += ['USE %s' % n for n in l_use] import parsed_text # Move the variable to top, and add the text diff --git a/src/irp_stack.py b/src/irp_stack.py deleted file mode 100644 index fe321bc..0000000 --- a/src/irp_stack.py +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/env python -# IRPF90 is a Fortran90 preprocessor written in Python for programming using -# the Implicit Reference to Parameters (IRP) method. -# Copyright (C) 2009 Anthony SCEMAMA -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Anthony Scemama -# LCPQ - IRSAMC - CNRS -# Universite Paul Sabatier -# 118, route de Narbonne -# 31062 Toulouse Cedex 4 -# scemama@irsamc.ups-tlse.fr - - -import util -from command_line import command_line - -do_assert = command_line.do_assert -do_debug = command_line.do_debug -do_openmp = command_line.do_openmp -do_memory = command_line.do_memory - -import irpf90_t - -FILENAME = irpf90_t.irpdir+"irp_stack.irp.F90" - -def create(): - - txt = """ -module irp_stack_mod - integer, parameter :: STACKMAX=1000 - character*(128),allocatable :: irp_stack(:,:) - double precision,allocatable :: irp_cpu(:,:) - integer,allocatable :: stack_index(:) - logical :: alloc = .False. - integer :: nthread - character*(128) :: white = '' -end module - -subroutine irp_enter(irp_where) - use irp_stack_mod - integer :: ithread - character*(*) :: irp_where -""" - if not do_openmp: - txt += """ - ithread = 0 -""" - else: - txt += """ - integer, external :: omp_get_thread_num - integer, external :: omp_get_num_threads - ithread = omp_get_thread_num() -""" - - txt += "$1" - - if do_memory: - txt+=""" - if (.not.alloc) then -""" - if do_openmp: - txt += """ - !$OMP PARALLEL - !$OMP SINGLE - nthread = omp_get_num_threads() - !$OMP END SINGLE - !$OMP END PARALLEL -""" - else: - txt += """ - nthread = 1 - """ - txt += """ - print *, 'Allocating irp_stack(',STACKMAX,',',0:nthread,')' - print *, 'Allocating irp_cpu(',STACKMAX,',',0:nthread,')' - print *, 'Allocating stack_index(',0:nthread,')' - endif""" - txt +=""" -$2 -end subroutine - -subroutine irp_enter_f(irp_where) - use irp_stack_mod - integer :: ithread - character*(*) :: irp_where - """ - if do_openmp: - txt += """ - integer, external :: omp_get_thread_num - integer, external :: omp_get_num_threads - ithread = omp_get_thread_num() -""" - else: - txt += """ - ithread = 0 -""" - txt += """ -$1 -""" - if do_memory: - txt+=""" - if (.not.alloc) then -""" - if do_openmp: - txt += """ - !$OMP PARALLEL - !$OMP SINGLE - nthread = omp_get_num_threads() - !$OMP END SINGLE - !$OMP END PARALLEL -""" - else: - txt += """ - nthread = 1 -""" - txt +=""" - print *, 'Allocating irp_stack(',STACKMAX,',',0:nthread,')' - print *, 'Allocating irp_cpu(',STACKMAX,',',0:nthread,')' - print *, 'Allocating stack_index(',0:nthread,')' - endif -""" - txt += """ -$2 -end subroutine - -subroutine irp_leave (irp_where) - use irp_stack_mod - character*(*) :: irp_where - integer :: ithread - double precision :: cpu -""" - if do_openmp: - txt += """ - integer, external :: omp_get_thread_num - ithread = omp_get_thread_num() - """ - else: - txt += """ - ithread = 0 - """ - txt += """ -$3 -$4 -end subroutine -""" - - # $1 - if do_assert or do_debug: - s = """ - if (.not.alloc) then - """ - if do_openmp: - s += """ - !$OMP PARALLEL - !$OMP SINGLE - nthread = omp_get_num_threads() - !$OMP END SINGLE - !$OMP END PARALLEL - !$OMP CRITICAL - if (.not.alloc) then - allocate(irp_stack(0:STACKMAX,0:nthread)) - allocate(irp_cpu(0:STACKMAX,0:nthread)) - allocate(stack_index(0:nthread)) - stack_index = 0 - alloc = .True. - endif - !$OMP END CRITICAL - endif - stack_index(ithread) = min(stack_index(ithread)+1,STACKMAX) - irp_stack(stack_index(ithread),ithread) = irp_where""" - else: - s += """ - nthread = 1 - if (.not.alloc) then - allocate(irp_stack(0:STACKMAX,1)) - allocate(irp_cpu(0:STACKMAX,1)) - allocate(stack_index(2)) - stack_index = 0 - alloc = .True. - endif - endif - stack_index(1) = min(stack_index(1)+1,STACKMAX) - irp_stack(stack_index(1),1) = irp_where""" - if do_memory: - txt+=""" - print *, 'Allocating irp_stack(',STACKMAX,','0:nthread,')' - print *, 'Allocating irp_cpu(',STACKMAX,','0:nthread,')' - print *, 'Allocating stack_index(',0:nthread,')'""" - else: - s = "" - txt = txt.replace("$1",s) - - # $2 - if do_debug: - txt = txt.replace("$2",""" - print *, ithread, ':', white(1:stack_index(ithread))//'-> ', trim(irp_where) - call cpu_time(irp_cpu(stack_index(ithread),ithread))""") - else: - txt = txt.replace("$2","") - - # $3 - if do_debug: - txt = txt.replace("$3",""" - call cpu_time(cpu) - print *, ithread, ':', white(1:stack_index(ithread))//'<- ', & - trim(irp_stack(stack_index(ithread),ithread)), & - cpu-irp_cpu(stack_index(ithread),ithread)""") - else: - txt = txt.replace("$3","") - - # $4 - if do_debug or do_assert: - txt = txt.replace("$4",""" - stack_index(ithread) = max(0,stack_index(ithread)-1)""") - else: - txt = txt.replace("$4","") - - txt += """ -subroutine irp_trace - use irp_stack_mod - integer :: ithread - integer :: i -""" - if do_openmp: - txt += """ -!$ integer, external :: omp_get_thread_num -!$ ithread = omp_get_thread_num() -""" - else: - txt += """ - ithread = 0 -""" - txt += """ - if (.not.alloc) return - print *, 'Stack trace: ', ithread - print *, '-------------------------' - do i=1,stack_index(ithread) - print *, trim(irp_stack(i,ithread)) - enddo - print *, '-------------------------' -end subroutine - -""" - - util.lazy_write_file(FILENAME,txt) - diff --git a/src/irpf90.py b/src/irpf90.py index 506e0e4..5a6df5e 100644 --- a/src/irpf90.py +++ b/src/irpf90.py @@ -35,125 +35,120 @@ except: from command_line import command_line from irpy_files import Irpy_comm_world + def main(): vim.install() if command_line.do_help: command_line.usage() - return + return if command_line.do_version: from version import version print version - return + return if command_line.do_init: - from build_file import create_generalmakefile - create_generalmakefile(command_line.do_ninja) - return + from build_file import create_generalmakefile + create_generalmakefile(command_line.do_ninja) + return comm_world = Irpy_comm_world() - if command_line.do_graph: - # Create a dot reprenstion of the dependency graph. - # Merge inside a subgraph the Entity provided together + # Create a dot reprenstion of the dependency graph. + # Merge inside a subgraph the Entity provided together - def print_full_diagram(l_entity): + def print_full_diagram(l_entity): - l_entity_not_leaf= [e for e in l_entity if e.needs] - print 'digraph Full { ' - for e in l_entity_not_leaf: - print ' %s -> { %s } ' % (e.name, ' '.join(e.needs)) - print '}' + l_entity_not_leaf = [e for e in l_entity if e.needs] + print 'digraph Full { ' + for e in l_entity_not_leaf: + print ' %s -> { %s } ' % (e.name, ' '.join(e.needs)) + print '}' - - - def print_subgraph(l_tuple,name,color): - for i,s in enumerate(l_tuple): - print ' subgraph cluster_%s_%s {' % (name,i) + def print_subgraph(l_tuple, name, color): + for i, s in enumerate(l_tuple): + print ' subgraph cluster_%s_%s {' % (name, i) print ' %s ' % ' '.join(s) print ' color = %s ' % color print ' }' - comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. - - print_full_diagram(comm_world.d_entity.values()) + comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know. + + print_full_diagram(comm_world.d_entity.values()) print 'digraph Compact { ' print ' graph [ordering="out" splines=true overlap=false];' - l_main_usr = set([entity for entity in comm_world.d_entity.values() if entity.is_main]) + l_main_usr = set([entity for entity in comm_world.d_entity.values() if entity.is_main]) l_main_head_usr = set([entity for entity in l_main_usr if entity.l_others_name]) - l_set_main_head_name = [ set(e.l_name) for e in l_main_head_usr] + l_set_main_head_name = [set(e.l_name) for e in l_main_head_usr] - print_subgraph(l_set_main_head_name,'usr',color='blue') + print_subgraph(l_set_main_head_name, 'usr', color='blue') - from util import l_dummy_entity + from util import l_dummy_entity - l_set_dummy_name= l_dummy_entity(comm_world.d_entity) - print_subgraph(l_set_dummy_name,'dummy',color='red') + l_set_dummy_name = l_dummy_entity(comm_world.d_entity) + print_subgraph(l_set_dummy_name, 'dummy', color='red') #~=~=~=~= # Create List Node Uniq - #~=~=~=~= + #~=~=~=~= - from util import split_l_set, flatten - l_main_dummy_name, s_exculde_dummy_name = split_l_set(l_set_dummy_name) - l_name_dummy_name_flatten = flatten(l_set_dummy_name) + from util import split_l_set, flatten + l_main_dummy_name, s_exculde_dummy_name = split_l_set(l_set_dummy_name) + l_name_dummy_name_flatten = flatten(l_set_dummy_name) l_main_head_dummy = set([comm_world.d_entity[name] for name in l_name_dummy_name_flatten]) - s_exculde_dummy = set([comm_world.d_entity[name] for name in s_exculde_dummy_name]) - - l_node_uniq = (l_main_usr | l_main_head_dummy) - s_exculde_dummy - - - #~=~=~=~= - # Create All edge - #~=~=~=~= - # We need to remove the spurious edge caused by the the dummy multiples providers - d_need = dict() - for e in l_node_uniq: - d_need[e.name] = set(e.needs) + s_exculde_dummy = set([comm_world.d_entity[name] for name in s_exculde_dummy_name]) + l_node_uniq = (l_main_usr | l_main_head_dummy) - s_exculde_dummy #~=~=~=~= # Create All edge #~=~=~=~= - # Draw the eddge - # If a arrow if arriving into Multipliple provider and if it is bold this mean it use all the entity inside it. + # We need to remove the spurious edge caused by the the dummy multiples providers + d_need = dict() + for e in l_node_uniq: + d_need[e.name] = set(e.needs) + + #~=~=~=~= + # Create All edge + #~=~=~=~= + # Draw the eddge + # If a arrow if arriving into Multipliple provider and if it is bold this mean it use all the entity inside it. from util import uniquify l_set_multiple = uniquify(l_set_dummy_name + l_set_main_head_name) - l_name_usr = [e.name for e in l_main_head_usr] - for source,l_target in d_need.items(): + l_name_usr = [e.name for e in l_main_head_usr] + for source, l_target in d_need.items(): - if source in l_name_usr: - color = 'blue' - elif source in l_name_dummy_name_flatten: - color = 'red' - else: - color = 'black' + if source in l_name_usr: + color = 'blue' + elif source in l_name_dummy_name_flatten: + color = 'red' + else: + color = 'black' - for s in l_set_multiple: - if s.issubset(l_target): - print ' %s -> %s [color="%s", penwidth=2]' %(source,sorted(s).pop(), color) - l_target = l_target - s + for s in l_set_multiple: + if s.issubset(l_target): + print ' %s -> %s [color="%s", penwidth=2]' % (source, sorted(s).pop(), color) + l_target = l_target - s - if l_target: - print ' %s -> { %s } [color="%s"]'% (source,' '.join(l_target), color) - - print ' }' - return + if l_target: + print ' %s -> { %s } [color="%s"]' % (source, ' '.join(l_target), color) + print ' }' + return if command_line.do_preprocess: for filename, text in comm_world.preprocessed_text: - if filename in command_line.preprocessed: - for line in text: - print line.text - return + if filename in command_line.preprocessed: + for line in text: + print line.text + return if command_line.do_touch: for var in command_line.touched: @@ -163,7 +158,7 @@ def main(): print "Touching %s invalidates the following entities:" % var for x in sorted(d_entity[var].parents): print "- %s" % (x, ) - return + return if command_line.do_codelet: import profile @@ -176,16 +171,20 @@ def main(): comm_world.create_buildfile(command_line.do_ninja) comm_world.write_modules() - + comm_world.create_touches() comm_world.create_man() + if command_line.do_debug or command_line.do_assert: + comm_world.create_stack() + if command_line.do_profile: import profile profile.run(comm_world.d_entity) if command_line.do_openmp: - comm_world.create_lock() + comm_world.create_lock() + if __name__ == '__main__': main() diff --git a/src/irpf90_indent.py b/src/irpf90_indent.py index 0b62aad..2a02bb1 100755 --- a/src/irpf90_indent.py +++ b/src/irpf90_indent.py @@ -24,249 +24,269 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - import sys import re LENMAX = 70 tabn = 2 -tab = " "*tabn +tab = " " * tabn + class Grep(object): - re_begin_program = re.compile(r"^\s*program\s",flags=re.I) - def begin_program(self,string): - return re.match(self.re_begin_program,string) is not None + re_begin_program = re.compile(r"^\s*program\s", flags=re.I) - re_end_program = re.compile(r"\s*(end\s*!?$|end\s*program)",flags=re.I) - def end_program(self,string): - return re.match(self.re_end_program,string) is not None + def begin_program(self, string): + return re.match(self.re_begin_program, string) is not None - re_begin_subroutine = re.compile(r"^\s*(recursive)?\s*subroutine\s",flags=re.I) - def begin_subroutine(self,string): - return re.match(self.re_begin_subroutine,string) is not None + re_end_program = re.compile(r"\s*(end\s*!?$|end\s*program)", flags=re.I) - re_end_subroutine = re.compile(r"\s*(end\s*!?$|end\s*subroutine)",flags=re.I) - def end_subroutine(self,string): - return re.match(self.re_end_subroutine,string) is not None + def end_program(self, string): + return re.match(self.re_end_program, string) is not None - re_begin_function = re.compile(r"^.*function\s+.*\(",flags=re.I) - def begin_function(self,string): - return re.match(self.re_begin_function,string) is not None + re_begin_subroutine = re.compile(r"^\s*(recursive)?\s*subroutine\s", flags=re.I) - re_end_function = re.compile(r"\s*(end\s*!?$|end\s*function)",flags=re.I) - def end_function(self,string): - return re.match(self.re_end_function,string) is not None + def begin_subroutine(self, string): + return re.match(self.re_begin_subroutine, string) is not None - re_begin_provider = re.compile(r"^\s*&?begin_provider\s",flags=re.I) - def begin_provider(self,string): - return re.match(self.re_begin_provider,string) is not None + re_end_subroutine = re.compile(r"\s*(end\s*!?$|end\s*subroutine)", flags=re.I) - re_end_provider = re.compile(r"^\s*end_provider\s*(!.*)?$", flags=re.I) - def end_provider(self,string): - return re.match(self.re_end_provider,string) is not None + def end_subroutine(self, string): + return re.match(self.re_end_subroutine, string) is not None - re_begin_do = re.compile(r"^\s*do\s+",flags=re.I) - def begin_do(self,string): - return re.match(self.re_begin_do,string) is not None + re_begin_function = re.compile(r"^.*function\s+.*\(", flags=re.I) - re_end_do = re.compile(r"^\s*end\s*do\s*(!.*)?$",flags=re.I) - def end_do(self,string): - return re.match(self.re_end_do,string) is not None + def begin_function(self, string): + return re.match(self.re_begin_function, string) is not None - re_begin_if = re.compile(r"^\s*if(\(|\s+).*(&|then)\s*(!.*)?$",flags=re.I) - def begin_if(self,string): - return re.match(self.re_begin_if,string) is not None + re_end_function = re.compile(r"\s*(end\s*!?$|end\s*function)", flags=re.I) - re_else = re.compile(r"^\s*else",flags=re.I) - def xelse(self,string): - return re.match(self.re_else,string) is not None + def end_function(self, string): + return re.match(self.re_end_function, string) is not None - re_end_if = re.compile(r"^\s*end\s*if\s*(!.*)?$",flags=re.I) - def end_if(self,string): - return re.match(self.re_end_if,string) is not None + re_begin_provider = re.compile(r"^\s*&?begin_provider\s", flags=re.I) - re_begin_select = re.compile(r"^\s*select\s*case",flags=re.I) - def begin_select(self,string): - return re.match(self.re_begin_select,string) is not None + def begin_provider(self, string): + return re.match(self.re_begin_provider, string) is not None - re_case = re.compile(r"^\s*case\s*\(",flags=re.I) - def case(self,string): - return re.match(self.re_case,string) is not None + re_end_provider = re.compile(r"^\s*end_provider\s*(!.*)?$", flags=re.I) - re_end_select = re.compile(r"^\s*end\s*select\s*(!.*)?$",flags=re.I) - def end_select(self,string): - return re.match(self.re_end_select,string) is not None + def end_provider(self, string): + return re.match(self.re_end_provider, string) is not None - re_continuation = re.compile(r"^\s*\S+.*&") - def continuation(self,string): - return re.match(self.re_continuation,string) is not None + re_begin_do = re.compile(r"^\s*do\s+", flags=re.I) + + def begin_do(self, string): + return re.match(self.re_begin_do, string) is not None + + re_end_do = re.compile(r"^\s*end\s*do\s*(!.*)?$", flags=re.I) + + def end_do(self, string): + return re.match(self.re_end_do, string) is not None + + re_begin_if = re.compile(r"^\s*if(\(|\s+).*(&|then)\s*(!.*)?$", flags=re.I) + + def begin_if(self, string): + return re.match(self.re_begin_if, string) is not None + + re_else = re.compile(r"^\s*else", flags=re.I) + + def xelse(self, string): + return re.match(self.re_else, string) is not None + + re_end_if = re.compile(r"^\s*end\s*if\s*(!.*)?$", flags=re.I) + + def end_if(self, string): + return re.match(self.re_end_if, string) is not None + + re_begin_select = re.compile(r"^\s*select\s*case", flags=re.I) + + def begin_select(self, string): + return re.match(self.re_begin_select, string) is not None + + re_case = re.compile(r"^\s*case\s*\(", flags=re.I) + + def case(self, string): + return re.match(self.re_case, string) is not None + + re_end_select = re.compile(r"^\s*end\s*select\s*(!.*)?$", flags=re.I) + + def end_select(self, string): + return re.match(self.re_end_select, string) is not None + + re_continuation = re.compile(r"^\s*\S+.*&") + + def continuation(self, string): + return re.match(self.re_continuation, string) is not None + + re_declaration = re.compile(r"^.*::.*$") + + def declaration(self, string): + return re.match(self.re_declaration, string) is not None - re_declaration = re.compile(r"^.*::.*$") - def declaration(self,string): - return re.match(self.re_declaration,string) is not None grep = Grep() + class indent(object): + def __init__(self): + """Run the program""" + self.run() - def __init__(self): - """Run the program""" - self.run() + def format_declaration(self, string, n): + l, r = string.split('::') + return l.strip().ljust(n) + ' :: ' + r.strip() - def format_declaration(self,string,n): - l,r = string.split('::') - return l.strip().ljust(n) + ' :: '+ r.strip() - - def format_continuation(self,string,n): - buffer = string.split('&') - if len(buffer) == 1: - l = buffer[0] - return l - else: - l, r = buffer - return l.strip().ljust(69-len(n)) + '&'+ r.strip() - - def get_filename(self): - """The file name is the first argument""" - if '_filename' not in self.__dict__: - try: - self._filename = sys.argv[1] - except: - self._filename = None - return self._filename - filename=property(fget=get_filename) - - def get_text(self): - """The text of the file is a list of lines""" - if '_text' not in self.__dict__: - if self.filename is not None: - f = open(self.filename,'r') - self._text = f.read().splitlines() - f.close() - else: - self._text = sys.stdin.read().splitlines() - return self._text - text=property(fget=get_text) - - def indentlevel(self,line): - line = line.rstrip() - k=0 - if len(line) > 0: - while line[k] == ' ': - k+=1 - return k - - def run(self): - lines = self.text - indent0 = " "*self.indentlevel(self.text[0]) - k = indent0 - line = "" - for i in range(len(self.text)): - prevline = line - line = self.text[i].strip() - if grep.continuation(line): - line = self.format_continuation(line,k) - - if grep.continuation(prevline): - print k+2*tab+self.format_continuation(line,k+2*tab) - continue - - if grep.begin_subroutine(line): - print line - k = indent0+tab - continue - - if grep.begin_function(line): - print line - k = indent0+tab - continue - - if grep.begin_program(line): - print line - k = indent0+tab - continue - - if grep.begin_provider(line): - if line[0] != '&': - k = indent0+tab - if grep.begin_provider(self.text[i+1].strip()): - print " "+line - else: - print line + def format_continuation(self, string, n): + buffer = string.split('&') + if len(buffer) == 1: + l = buffer[0] + return l else: - print line - continue + l, r = buffer + return l.strip().ljust(69 - len(n)) + '&' + r.strip() - if grep.declaration(line): - print k+self.format_declaration(line,30) - continue + def get_filename(self): + """The file name is the first argument""" + if '_filename' not in self.__dict__: + try: + self._filename = sys.argv[1] + except: + self._filename = None + return self._filename - if grep.begin_do(line): - print k+line - k += tab - continue + filename = property(fget=get_filename) - if grep.begin_if(line): - print k+line - k += tab - continue + def get_text(self): + """The text of the file is a list of lines""" + if '_text' not in self.__dict__: + if self.filename is not None: + f = open(self.filename, 'r') + self._text = f.read().splitlines() + f.close() + else: + self._text = sys.stdin.read().splitlines() + return self._text - if grep.xelse(line): - print k[:-tabn]+line - continue + text = property(fget=get_text) - if grep.begin_select(line): - print k+line - k += 2*tab - continue + def indentlevel(self, line): + line = line.rstrip() + k = 0 + if len(line) > 0: + while line[k] == ' ': + k += 1 + return k - if grep.case(line): - print k[:-tabn]+line - continue - - if grep.end_do(line): - k = k[:-tabn] - print k+line - continue - - if grep.end_if(line): - k = k[:-tabn] - print k+line - continue - - if grep.end_select(line): - k = k[:-2*tabn] - print k+line - continue - - if grep.end_subroutine(line): - print line + def run(self): + lines = self.text + indent0 = " " * self.indentlevel(self.text[0]) k = indent0 - continue + line = "" + for i in range(len(self.text)): + prevline = line + line = self.text[i].strip() + if grep.continuation(line): + line = self.format_continuation(line, k) - if grep.end_function(line): - print line - k = indent0 - continue + if grep.continuation(prevline): + print k + 2 * tab + self.format_continuation(line, k + 2 * tab) + continue - if grep.end_provider(line): - print line - k = indent0 - continue + if grep.begin_subroutine(line): + print line + k = indent0 + tab + continue - if grep.end_program(line): - print line - k = indent0 - continue + if grep.begin_function(line): + print line + k = indent0 + tab + continue - print k+line + if grep.begin_program(line): + print line + k = indent0 + tab + continue + + if grep.begin_provider(line): + if line[0] != '&': + k = indent0 + tab + if grep.begin_provider(self.text[i + 1].strip()): + print " " + line + else: + print line + else: + print line + continue + + if grep.declaration(line): + print k + self.format_declaration(line, 30) + continue + + if grep.begin_do(line): + print k + line + k += tab + continue + + if grep.begin_if(line): + print k + line + k += tab + continue + + if grep.xelse(line): + print k[:-tabn] + line + continue + + if grep.begin_select(line): + print k + line + k += 2 * tab + continue + + if grep.case(line): + print k[:-tabn] + line + continue + + if grep.end_do(line): + k = k[:-tabn] + print k + line + continue + + if grep.end_if(line): + k = k[:-tabn] + print k + line + continue + + if grep.end_select(line): + k = k[:-2 * tabn] + print k + line + continue + + if grep.end_subroutine(line): + print line + k = indent0 + continue + + if grep.end_function(line): + print line + k = indent0 + continue + + if grep.end_provider(line): + print line + k = indent0 + continue + + if grep.end_program(line): + print line + k = indent0 + continue + + print k + line def main(): - indent() + indent() if __name__ == '__main__': - main() - + main() diff --git a/src/irpf90_t.py b/src/irpf90_t.py index 7d845d6..4f9b65e 100644 --- a/src/irpf90_t.py +++ b/src/irpf90_t.py @@ -24,7 +24,6 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - irpdir = "IRPF90_temp/" mandir = "IRPF90_man/" @@ -35,6 +34,7 @@ irp_id = abs(crc32(os.getcwd())) from lib.manager import irpy from util import logger + class Line(object): def __init__(self, i, text, filename): self.i = i @@ -48,36 +48,36 @@ class Line(object): def __repr__(self): return "%20s:%5d : %s (%s)" % (type(self).__name__, self.i, self.text, self.filename) + class LineWithName(Line): + @irpy.lazy_property + def subname(self): + buf = self.lower + if not buf.endswith(')'): + buf += "()" - @irpy.lazy_property - def subname(self): - buf = self.lower - if not buf.endswith(')'): - buf += "()" + l_buf = buf.split('(') + l_name = l_buf[0].split() - l_buf = buf.split('(') - l_name = l_buf[0].split() + if len(l_name) < 2: + import loger + logger.error("Syntax Error: %s" % line) + sys.exit(1) + return l_name.pop() - if len(l_name) < 2: - import loger - logger.error("Syntax Error: %s" % line) - sys.exit(1) - return l_name.pop() l_type = [ - 'Empty_line', 'Simple_line', "Declaration", "Continue", "Begin_provider", - "Cont_provider", "End_provider", "Begin_doc", "Doc", "End_doc", - "Begin_shell", "End_shell", "Begin_template", "End_template", "Subst", - "Assert", "Touch", "SoftTouch", "Irp_read", "Irp_write", "Irp_If", - "Irp_Else", "Irp_Endif", "Openmp", "Directive", "Use", "Do", "Enddo", "If", - "Elseif", "Else", "Endif", "Select", "Case", "End_select", "Provide", "NoDep", "Return", "Include", - "Implicit", "Free", "End", "Provide_all","Contains",'Type','End_module','Interface','End_interface', - 'Where','Elsewhere','Endwhere'] + 'Empty_line', 'Simple_line', "Declaration", "Continue", "Begin_provider", "Cont_provider", + "End_provider", "Begin_doc", "Doc", "End_doc", "Begin_shell", "End_shell", "Begin_template", + "End_template", "Subst", "Assert", "Touch", "SoftTouch", "Irp_read", "Irp_write", "Irp_If", + "Irp_Else", "Irp_Endif", "Openmp", "Directive", "Use", "Do", "Enddo", "If", "Elseif", "Else", + "Endif", "Select", "Case", "End_select", "Provide", "NoDep", "Return", "Include", "Implicit", + "Free", "End", "Provide_all", "Contains", 'Type', 'End_module', 'Interface', 'End_interface', + 'Where', 'Elsewhere', 'Endwhere' +] for t in l_type: globals()[t] = type(t, (Line, ), {}) -for t in ['Subroutine', 'Function', 'Program', 'Call','Module']: - globals()[t] = type(t, (LineWithName, ), {}) - +for t in ['Subroutine', 'Function', 'Program', 'Call', 'Module']: + globals()[t] = type(t, (LineWithName, ), {}) diff --git a/src/irpman.py b/src/irpman.py index 198200a..144a61b 100755 --- a/src/irpman.py +++ b/src/irpman.py @@ -24,17 +24,16 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - import os import sys if __name__ == "__main__": - from irpf90_t import mandir - entity = sys.argv[1].lower() - - filename = '%s.l'% entity - if filename not in os.listdir(mandir): - print "Error: `%s` does not exist"% entity - sys.exit(-1) + from irpf90_t import mandir + entity = sys.argv[1].lower() - os.system("man %s" % os.path.join(mandir,filename)) + filename = '%s.l' % entity + if filename not in os.listdir(mandir): + print "Error: `%s` does not exist" % entity + sys.exit(-1) + + os.system("man %s" % os.path.join(mandir, filename)) diff --git a/src/irpy_files.py b/src/irpy_files.py index 9f6dfae..cf21007 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -10,6 +10,7 @@ import sys from command_line import command_line + class Irpy_comm_world(object): '''Maestro.''' @@ -114,8 +115,8 @@ class Irpy_comm_world(object): l_duplicate = [x for x in l_ent if l_ent.count(x) > 1] if l_duplicate: from util import logger - logger.error('You have duplicate PROVIDER: %s' % ' '.join( - [e.name for e in l_duplicate])) + logger.error('You have duplicate PROVIDER: %s' % + ' '.join([e.name for e in l_duplicate])) import sys sys.exit(1) @@ -230,7 +231,7 @@ class Irpy_comm_world(object): d_routine = self.d_routine import parsed_text - vtuple = [(v,s.same_as, s.regexp) for v, s in d_entity.iteritems()] + vtuple = [(v, s.same_as, s.regexp) for v, s in d_entity.iteritems()] def worker_parsed(filename_text): filename, text = filename_text @@ -290,8 +291,16 @@ class Irpy_comm_world(object): lazy_write_file(filename, '%s\n' % text) def create_stack(self): - import irp_stack - irp_stack.create() + from util import lazy_write_file + from util import ashes_env + + str_ = ashes_env.render('irp_stack.F90', { + 'do_debug': command_line.do_debug, + 'do_openmp': command_line.do_openmp, + 'do_memory': command_line.do_memory + }) + filename = os.path.join(irpf90_t.irpdir, 'irp_stack.irp.F90') + lazy_write_file(filename, str_) def create_buildfile(self, ninja): import build_file @@ -307,8 +316,8 @@ class Irpy_comm_world(object): def create_lock(self): from util import lazy_write_file - from util import ashes_env + from util import ashes_env - str_ = ashes_env.render('irp_lock.F90', {'entity':sorted(self.d_entity)}) + str_ = ashes_env.render('irp_lock.F90', {'entity': sorted(self.d_entity)}) filename = os.path.join(irpf90_t.irpdir, 'irp_locks.irp.F90') lazy_write_file(filename, str_) diff --git a/src/module.py b/src/module.py index 532e739..111dead 100644 --- a/src/module.py +++ b/src/module.py @@ -30,6 +30,7 @@ import preprocessed_text from util import * from entity import Entity + def put_info(text, filename): lenmax = 80 - len(filename) @@ -37,26 +38,25 @@ def put_info(text, filename): str_ = '{text:{width}} ! {filename}:{i:4}' for _, line in text: - line.text = str_.format(text=line.text,filename=line.filename,i=line.i,width=lenmax) + line.text = str_.format(text=line.text, filename=line.filename, i=line.i, width=lenmax) return text class Fmodule(object): - header = [ "! -*- F90 -*-", - "!", - "!-----------------------------------------------!", - "! This file was generated with the irpf90 tool. !", - "! !", - "! DO NOT MODIFY IT BY HAND !", - "!-----------------------------------------------!", - ""] + header = [ + "! -*- F90 -*-", "!", "!-----------------------------------------------!", + "! This file was generated with the irpf90 tool. !", + "! !", + "! DO NOT MODIFY IT BY HAND !", + "!-----------------------------------------------!", "" + ] def __init__(self, text, filename, d_variable): self.text = put_info(text, filename) self.filename = filename[:-6] self.name = "%s_mod" % (self.filename).replace('/', '__').replace('.', 'Dot') - self.d_all_variable = d_variable + self.d_all_variable = d_variable @irpy.lazy_property def prog_name(self): @@ -75,20 +75,23 @@ class Fmodule(object): def head(self): '''The module who containt the declaration of the entity''' - if self.use or self.dec or self.l_entity: + if self.use or self.dec or self.l_entity: - d_template = {'name' : self.name, - 'use':list(self.use),'usr_declaration':list(self.dec), - 'irp_declaration':[e.d_header for e in self.l_entity], - 'coarray': command_line.coarray, - 'align': False if command_line.align == 1 else command_line.align} - return [i for i in ashes_env.render('module.f90', d_template).split('\n') if i] - else: - return [] + d_template = { + 'name': self.name, + 'use': list(self.use), + 'usr_declaration': list(self.dec), + 'irp_declaration': [e.d_header for e in self.l_entity], + 'coarray': command_line.coarray, + 'align': False if command_line.align == 1 else command_line.align + } + return [i for i in ashes_env.render('module.f90', d_template).split('\n') if i] + else: + return [] @irpy.lazy_property def has_irp_module(self): - return bool(self.head) + return bool(self.head) @irpy.lazy_property def needed_vars(self): @@ -100,23 +103,22 @@ class Fmodule(object): @irpy.lazy_property def generated_text(self): - 'Routine genereraed by the IRPF90. provide, build, ...' + 'Routine genereraed by the IRPF90. provide, build, ...' result = [] for var in self.l_entity: result += var.provider - if not var.is_protected: + if not var.is_protected: result += var.builder - result += var.allocater + result += var.allocater if var.is_read: result += var.reader if var.is_written: result += var.writer - + return result @irpy.lazy_property def residual_text_use_dec(self): - def remove_providers(text): result = [] inside = False @@ -134,21 +136,21 @@ class Fmodule(object): result = [] variable_list = [] - skip_interface = False + skip_interface = False for vars, line in text: - if type(line) in [Interface, End_interface]: - skip_interface = not skip_interface + if type(line) in [Interface, End_interface]: + skip_interface = not skip_interface - if skip_interface: - result.append((vars, line)) - continue + if skip_interface: + result.append((vars, line)) + continue - - if type(line) in [Subroutine, Function, Program]: + if type(line) in [Subroutine, Function, Program]: #Deep copy... variable_list = list(vars) elif type(line) == End: - result += [([], Use(line.i, x, line.filename)) for x in build_use(variable_list, self.d_all_variable)] + result += [([], Use(line.i, x, line.filename)) + for x in build_use(variable_list, self.d_all_variable)] else: variable_list += vars @@ -156,19 +158,19 @@ class Fmodule(object): return result def extract_use_dec_text(text): - # (List[ Tuple(Entity,Line) ]) -> (List[ Tuple(Entity,Line),List[ Tuple(Entity,Line),List[ Tuple(Entity,Line)) - '''Extract the global declaration statement and module use form the declaration of function. ''' + # (List[ Tuple(Entity,Line) ]) -> (List[ Tuple(Entity,Line),List[ Tuple(Entity,Line),List[ Tuple(Entity,Line)) + '''Extract the global declaration statement and module use form the declaration of function. ''' inside = 0 - result,dec,use,module = [],[],[],[] + result, dec, use, module = [], [], [], [] for vars, line in text: - - if isinstance(line, (Subroutine, Function, Program,Interface,Module)): + + if isinstance(line, (Subroutine, Function, Program, Interface, Module)): inside += 1 - if type(line) == Module: - module.append((vars,line)) + if type(line) == Module: + module.append((vars, line)) if inside: result.append((vars, line)) @@ -177,22 +179,22 @@ class Fmodule(object): use.append((vars, line)) elif type(line) == Declaration: dec.append((vars, line)) - - if isinstance(line,(End,End_interface,End_module)): - inside += -1 - - if inside: - print 'Something wrong append' - sys.exit(1) + if isinstance(line, (End, End_interface, End_module)): + inside += -1 + + if inside: + print 'Something wrong append' + sys.exit(1) return use, module, dec, result result = remove_providers(self.text) result = modify_functions(result) - + from collections import namedtuple - Residual_text_use_dec = namedtuple('Residual_text_use_dec', ['use', 'module', 'dec', 'result']) + Residual_text_use_dec = namedtuple('Residual_text_use_dec', + ['use', 'module', 'dec', 'result']) return Residual_text_use_dec(*extract_use_dec_text(result)) @@ -202,12 +204,12 @@ class Fmodule(object): @irpy.lazy_property def gen_mod(self): - '''List of module generated by the user in this module...''' + '''List of module generated by the user in this module...''' return set("%s" % line.subname for _, line in self.residual_text_use_dec.module) @irpy.lazy_property def dec(self): - '''The declaration of this module + '''The declaration of this module Note: Because user can define F90 Type, we need to keep the correct order. @@ -224,10 +226,10 @@ class Fmodule(object): ''' - l = [" %s" % line.text for _, line in self.residual_text_use_dec.dec] - from util import uniquify - if len(l) != len(uniquify(l)): - raise NotImplementedError + l = [" %s" % line.text for _, line in self.residual_text_use_dec.dec] + from util import uniquify + if len(l) != len(uniquify(l)): + raise NotImplementedError return l @@ -241,29 +243,28 @@ class Fmodule(object): result += map(lambda x: ([], Simple_line(line.i, x, line.filename)), build_call_provide(vars, self.d_all_variable)) - from parsed_text import move_to_top_list, move_interface move_to_top_list(result, [Declaration, Implicit, Use]) - move_interface(result) + move_interface(result) - return [line.text for _, line in result] + return [line.text for _, line in result] @irpy.lazy_property def needed_modules(self): - l = set(x.split(',only').pop(0).split()[1] for x in self.generated_text + self.head + self.residual_text if x.lstrip().startswith("use ")) + l = set( + x.split(',only').pop(0).split()[1] + for x in self.generated_text + self.head + self.residual_text + if x.lstrip().startswith("use ")) if self.name in l: l.remove(self.name) return l - @irpy.lazy_property def needed_modules_irp(self): return [i for i in self.needed_modules if i.endswith("_mod")] @irpy.lazy_property def needed_modules_usr(self): - return [i for i in self.needed_modules if not i.endswith("_mod")] - - + return [i for i in self.needed_modules if not i.endswith("_mod")] diff --git a/src/parsed_text.py b/src/parsed_text.py index cf57c92..bc62ad6 100644 --- a/src/parsed_text.py +++ b/src/parsed_text.py @@ -35,7 +35,7 @@ regexps_re_string_sub = regexps.re_string.sub def find_variables_in_line(line, vtuple): line_lower = regexps_re_string_sub('', line.lower) #return [same_as for v,same_as, regexp in vtuple if v in line_lower and regexp(line_lower)] - return [v for v,same_as, regexp in vtuple if v in line_lower and regexp(line_lower)] + return [v for v, same_as, regexp in vtuple if v in line_lower and regexp(line_lower)] def find_funcs_in_line(line, stuple): @@ -66,10 +66,11 @@ def check_touch(variables, line, vars, main_vars): if x != y: message = "The following entities should be touched:" message = "\n".join([message] + map(lambda x: "- %s" % (x, ), all_others)) - from util import logger - logger.error("%s (%s)" % (message,line)) - import sys - sys.exit(1) + from util import logger + logger.error("%s (%s)" % (message, line)) + import sys + sys.exit(1) + from collections import namedtuple Parsed_text = namedtuple('Parsed_text', ['varlist', 'line']) @@ -100,7 +101,7 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): variable_list = find_variables_in_line(line, vtuple) variable_list.remove(v) -# variable_list.remove(variables[v].same_as) + # variable_list.remove(variables[v].same_as) append(Parsed_text(variable_list, line)) @@ -113,9 +114,9 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): l = filter(lambda x: x not in varlist, l) for v in l: if v not in variables: - logger.error("Variable %s is unknown (%s)" % (v,line)) - import sys - sys.exit(1) + logger.error("Variable %s is unknown (%s)" % (v, line)) + import sys + sys.exit(1) append(Parsed_text(l, Provide(line.i, "", line.filename))) append(Parsed_text(l, Simple_line(line.i, "!%s" % (line.text), line.filename))) @@ -125,7 +126,7 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): for v in l: if v not in variables: error.fail(line, "Variable %s is unknown" % (v)) - sys.exit(1) + sys.exit(1) l = map(lambda x: "-%s" % (x), l) append(Parsed_text(l, Simple_line(line.i, "!%s" % (line.text), line.filename))) @@ -157,8 +158,9 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): def fun(x): if x not in variables: error.fail(line, "Variable %s unknown" % (x, )) - return Parsed_text( - [], Simple_line(line.i, " %s_is_built = .True." % (x, ), line.filename)) + return Parsed_text([], + Simple_line(line.i, " %s_is_built = .True." % + (x, ), line.filename)) result += map(fun, main_vars[:-1]) if type(line) == SoftTouch: @@ -206,6 +208,7 @@ def get_parsed_text(filename, text, variables, subroutines, vtuple): return (filename, result) + ###################################################################### def move_to_top_list(text, it): # ( List[ List[Entity], Line], Iterator) @@ -239,16 +242,16 @@ def move_to_top_list(text, it): for i, (l_var, line) in enumerate(text): t = type(line) - - if t in [Begin_provider, Module,Program, Subroutine, Function]: + + if t in [Begin_provider, Module, Program, Subroutine, Function]: l_begin.append(i) elif t in [End_provider, End]: - l_begin.pop() + l_begin.pop() elif l_begin and t in it: - d_permutation[t].append( (l_begin[-1], [l_var, line]) ) - # Put the sentinel, will be deleted after the insertion - text[i] = None + d_permutation[t].append((l_begin[-1], [l_var, line])) + # Put the sentinel, will be deleted after the insertion + text[i] = None # ~ # ~ # ~ # O r d e r t h e m @@ -272,33 +275,35 @@ def move_to_top_list(text, it): # Now do the Delete part of the move. Fortunatly we put a sentinel to know the line to delete for i in reversed(xrange(len(text))): - if text[i] is None: - del text[i] + if text[i] is None: + del text[i] -def move_interface(parsed_text,s_type=(Use,Implicit,Declaration,Subroutine,Function,Module)): - # ( List[ List[Entity], Line], Iterator) - '''Move everything containt into 'interface' below the first instance of s_type who preced it +def move_interface(parsed_text, s_type=(Use, Implicit, Declaration, Subroutine, Function, Module)): + # ( List[ List[Entity], Line], Iterator) + '''Move everything containt into 'interface' below the first instance of s_type who preced it Note: = This function is unpur ''' - # Get the born of the interface - i_begin = [ i for i, (_, line) in enumerate(parsed_text) if isinstance(line,Interface) ] - i_end = [ i+1 for i, (_, line) in enumerate(parsed_text) if isinstance(line,End_interface) ] + # Get the born of the interface + i_begin = [i for i, (_, line) in enumerate(parsed_text) if isinstance(line, Interface)] + i_end = [i + 1 for i, (_, line) in enumerate(parsed_text) if isinstance(line, End_interface)] - # Get the begin of the insert - i_insert = [] - for begin in i_begin: - i_insert.append(next(i+1 for i in range(begin,-1,-1) if isinstance(parsed_text[i][1], s_type))) + # Get the begin of the insert + i_insert = [] + for begin in i_begin: + i_insert.append( + next(i + 1 for i in range(begin, -1, -1) if isinstance(parsed_text[i][1], s_type))) # Do the insert and the delete in one passe - for insert, begin, end in zip(i_insert,i_begin,i_end): - parsed_text[insert:insert] = parsed_text[begin:end] + for insert, begin, end in zip(i_insert, i_begin, i_end): + parsed_text[insert:insert] = parsed_text[begin:end] + + padding = end - begin + parsed_text[begin + padding:end + padding] = [] - padding = end-begin - parsed_text[begin+padding:end+padding] = [] ###################################################################### def build_sub_needs(parsed_text, d_subroutine): @@ -311,10 +316,14 @@ def build_sub_needs(parsed_text, d_subroutine): l_buffer = [] for _, text in parsed_text: - l_begin = [ i for i, (_, line) in enumerate(text) if isinstance(line, (Subroutine, Function, Program))] + l_begin = [ + i for i, (_, line) in enumerate(text) + if isinstance(line, (Subroutine, Function, Program)) + ] l_end = [i for i, (_, line) in enumerate(text) if isinstance(line, End)] - l_buffer += [(d_subroutine[text[b].line.subname], text[b + 1:e]) for b, e in zip(l_begin, l_end) if not isinstance(text[b].line, Program)] + l_buffer += [(d_subroutine[text[b].line.subname], text[b + 1:e]) + for b, e in zip(l_begin, l_end) if not isinstance(text[b].line, Program)] for sub, text in l_buffer: sub.needs = set(v for vs, _ in text for v in vs) @@ -347,42 +356,42 @@ def raise_entity(text): skip_interface = False lvl = 0 - - for i,(e, line) in enumerate(text): - type_ = type(line) + for i, (e, line) in enumerate(text): + type_ = type(line) if type_ in [Interface, End_interface]: - skip_interface = not skip_interface + skip_interface = not skip_interface if skip_interface: - continue + continue - if type_ in [Begin_provider, Program, Subroutine, Function,If]: - l_token.append(i) - lvl += 1 - d_level_var[lvl] = e[:] - - elif type_ in [End_provider, End, Endif]: - i = l_token.pop() - text[i] = ( d_level_var[lvl],text[i][1]) + if type_ in [Begin_provider, Program, Subroutine, Function, If]: + l_token.append(i) + lvl += 1 + d_level_var[lvl] = e[:] - lvl += -1 + elif type_ in [End_provider, End, Endif]: + i = l_token.pop() + text[i] = (d_level_var[lvl], text[i][1]) - elif type_ in [Else,Elseif]: - i = l_token.pop() - text[i] = ( d_level_var[lvl],text[i][1]) + lvl += -1 - assert (type(text[i][1]) == If) - - l_token.append(i) - d_level_var[lvl] = e[:] + elif type_ in [Else, Elseif]: + i = l_token.pop() + text[i] = (d_level_var[lvl], text[i][1]) + + assert (type(text[i][1]) == If) + + l_token.append(i) + d_level_var[lvl] = e[:] + + else: + d_level_var[lvl] += e[:] + text[i] = ([], line) + + assert (lvl == 0) - else: - d_level_var[lvl] += e[:] - text[i] = ([],line) - assert(lvl==0) - def move_variables(parsed_text): #(List[ Tuple[List[Entity], Tuple[int,List[Line]] ]] '''Move variables into the top of the declaraiton. @@ -390,7 +399,6 @@ def move_variables(parsed_text): This need to be optimised to handle the fact that we can have multi-provider ''' - def func(filename, text): result = [] append = result.append @@ -403,16 +411,16 @@ def move_variables(parsed_text): old_elsevars = [] revtext = list(text) revtext.reverse() - - skip_interface = False + + skip_interface = False try: for vars, line in revtext: - if type(line) in [Interface, End_interface]: - skip_interface = not skip_interface - - if skip_interface: - append(([], line)) - continue + if type(line) in [Interface, End_interface]: + skip_interface = not skip_interface + + if skip_interface: + append(([], line)) + continue if type(line) in [End_provider, End]: varlist = [] @@ -456,10 +464,10 @@ def move_variables(parsed_text): varlist += vars append(([], line)) except: - from util import logger + from util import logger logger.error("Unable to parse file %s", line) - import sys - sys.exit(1) + import sys + sys.exit(1) result.reverse() @@ -554,7 +562,6 @@ def build_needs(parsed_text, subroutines, stuple, variables): entity.needs = uniquify(l_needs) - # Now do the Other entity for v in variables: main = variables[v].same_as @@ -573,7 +580,7 @@ def build_needs(parsed_text, subroutines, stuple, variables): d_needed_by[x].append(var.name) for v in d_needed_by: - variables[v].needed_by = uniquify(d_needed_by[v]) + variables[v].needed_by = uniquify(d_needed_by[v]) ###################################################################### from command_line import command_line @@ -612,4 +619,3 @@ def perform_loop_substitutions(parsed_text): append((vars, line)) main_result.append((filename, result)) return main_result - diff --git a/src/preprocessed_text.py b/src/preprocessed_text.py index dcdf631..0622583 100644 --- a/src/preprocessed_text.py +++ b/src/preprocessed_text.py @@ -37,7 +37,7 @@ re_enddo = re.compile("end +do") re_endwhere = re.compile("end +where") re_endtype = re.compile("end +type.*") -re_endmodule = re.compile("end +module",re.I) +re_endmodule = re.compile("end +module", re.I) re_endselect = re.compile("end +select") re_endinterface = re.compile("end +interface") @@ -102,6 +102,7 @@ simple_dict = { "endwhere": Endwhere, } + def get_canonized_text(text_lower): text_canonized = text_lower @@ -112,7 +113,7 @@ def get_canonized_text(text_lower): text_canonized = re_endif.sub("endif", text_canonized) text_canonized = re_endselect.sub("endselect", text_canonized) text_canonized = re_endinterface.sub("endinterface", text_canonized) - text_canonized = re_endwhere.sub('endwhere',text_canonized) + text_canonized = re_endwhere.sub('endwhere', text_canonized) for c in """()'"[]""": text_canonized = text_canonized.replace(c, " %s " % c) @@ -122,7 +123,7 @@ def get_canonized_text(text_lower): def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc): # ( int,str,str,str,str,bool) -> Irpf90_t '''Find the type of a text line''' - + line = line.rstrip() l_word = line_lower_canonized.split() @@ -132,11 +133,11 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc): # Handle archaic do loop of f77 firstword = l_word[0] if firstword.isdigit(): - l_word = l_word[1:] + l_word = l_word[1:] firstword = l_word[0] if firstword == "contains": - return [Contains(i, line, filename)], False + return [Contains(i, line, filename)], False if firstword == "end_doc": return [End_doc(i, line, filename)], False @@ -150,12 +151,10 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc): type_ = simple_dict[firstword] return [type_(i, line, filename)], is_doc - #label do-loop (outer: do i=1,sze) reg_do_lab = ur":\s+do\s+" - if re.search(reg_do_lab,line_lower): - return [Do(i,line,filename)], is_doc - + if re.search(reg_do_lab, line_lower): + return [Do(i, line, filename)], is_doc lower_line = line_lower.strip()[1:] @@ -173,10 +172,10 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc): result = [Simple_line(i, line, filename)] logger.info("%s:" - "irpf90 may not work with preprocessor directives. You can use" + "irpf90 may not work with preprocessor directives. You can use" "Irp_if ... Irp_else ... Irp_endif" "instead of" - "#ifdef ... #else ... #endif"%line) + "#ifdef ... #else ... #endif" % line) return result, is_doc if firstword.startswith("case("): @@ -192,7 +191,7 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc): # Detect errors if firstword == "dowhile": logger.error("%s 'do while' should be in 2 words." % Do(i, line, filename)) - sys.exit(1) + sys.exit(1) return [Simple_line(i, line, filename)], is_doc @@ -216,11 +215,16 @@ def save_and_execute(irpdir, scriptname, code, interpreter): # Execute shell import util try: - text = util.check_output('PYTHONPATH=$PYTHONPATH:. %s %s' % (interpreter, irpdir_scriptname), shell=True, bufsize=-1, cwd=os.path.join(irpdir,'..')) + text = util.check_output( + 'PYTHONPATH=$PYTHONPATH:. %s %s' % (interpreter, irpdir_scriptname), + shell=True, + bufsize=-1, + cwd=os.path.join(irpdir, '..')) except: - util.logger.error("Something wrong append with embeded '%s' script: %s"% (interpreter, irpdir_scriptname)) - import sys - sys.exit(1) + util.logger.error("Something wrong append with embeded '%s' script: %s" % + (interpreter, irpdir_scriptname)) + import sys + sys.exit(1) # Create the Line p = Preprocess_text(scriptname) @@ -233,35 +237,35 @@ def execute_shell(text): # (List[Line]) -> List[Line] '''Execute the embedded shell scripts''' - - l_begin = [i for i,line in enumerate(text) if isinstance(line,Begin_shell)] - l_end = [i for i,line in enumerate(text) if isinstance(line,End_shell)] - l_output= [] + l_begin = [i for i, line in enumerate(text) if isinstance(line, Begin_shell)] + l_end = [i for i, line in enumerate(text) if isinstance(line, End_shell)] + l_output = [] # ~=~=~=~ # E x e c u t e S h e l l # ~=~=~=~ from util import logger import sys + def fail(l, a, b): - logger.error("%s In Begin_Shell, %s '%s'" % (l,a, b)) - sys.exit(1) + logger.error("%s In Begin_Shell, %s '%s'" % (l, a, b)) + sys.exit(1) - for begin,end in zip(l_begin,l_end): + for begin, end in zip(l_begin, l_end): - header = text[begin] - header_text = header.text + header = text[begin] + header_text = header.text for bracket in ['[', ']']: - n = header_text.count(bracket) - assert n <= 1, fail(header_text, "Too many", bracket) - assert n >= 1, fail(header_text, "Missing", bracket) - else: - interpreter = header_text[header_text.find('[')+1: header_text.find(']')].strip() - script = ['%s\n' % l.text for l in text[begin+1:end] ] - scriptname="%s_shell_%d" % (header.filename, header.i) + n = header_text.count(bracket) + assert n <= 1, fail(header_text, "Too many", bracket) + assert n >= 1, fail(header_text, "Missing", bracket) + else: + interpreter = header_text[header_text.find('[') + 1:header_text.find(']')].strip() + script = ['%s\n' % l.text for l in text[begin + 1:end]] + scriptname = "%s_shell_%d" % (header.filename, header.i) - l_output.append(save_and_execute(irpdir, scriptname, script,interpreter)) + l_output.append(save_and_execute(irpdir, scriptname, script, interpreter)) # ~=~=~=~ # R e p l a c e @@ -271,12 +275,12 @@ def execute_shell(text): text_new = text[:] # Because we use slicing and we want to include the end line - l_end_include = [i+1 for i in l_end] + l_end_include = [i + 1 for i in l_end] padding = 0 - for begin,end, out in zip(l_begin,l_end_include,l_output): - text_new[begin+padding:end+padding] = out - padding += len(out) - (end-begin) - + for begin, end, out in zip(l_begin, l_end_include, l_output): + text_new[begin + padding:end + padding] = out + padding += len(out) - (end - begin) + return text_new @@ -343,7 +347,11 @@ def execute_templates(text): for v in variables: script += " t0 = t0.replace('%s',d['%s'])\n" % (v, v) script += " print t0\n" - result += save_and_execute(irpdir, scriptname="%s_template_%d" % (line.filename, line.i), code=script,interpreter="python") + result += save_and_execute( + irpdir, + scriptname="%s_template_%d" % (line.filename, line.i), + code=script, + interpreter="python") else: subst += line.text + '\n' @@ -415,26 +423,25 @@ def remove_comments(text, form): result = [] def remove_after_bang(str_): - # str -> str - i_bang = str_.find('!') - - if i_bang == -1: - return str_ - else: - sentinel, inside = None, False - for i,c in enumerate(str_): - if c == '"' or c == "'": - if not inside: - inside = True - sentinel = c - elif sentinel == c: - inside = False + # str -> str + i_bang = str_.find('!') - elif c == '!' and not inside: - return str_[:i].strip() - - return str_ - + if i_bang == -1: + return str_ + else: + sentinel, inside = None, False + for i, c in enumerate(str_): + if c == '"' or c == "'": + if not inside: + inside = True + sentinel = c + elif sentinel == c: + inside = False + + elif c == '!' and not inside: + return str_[:i].strip() + + return str_ if form == Free_form: for line in text: @@ -445,10 +452,10 @@ def remove_comments(text, form): else: newline = line.text.lstrip() if (newline != "" and newline[0] != "!#"): - text = remove_after_bang(line.text) - if text: - line.text = text - result.append(line) + text = remove_after_bang(line.text) + if text: + line.text = text + result.append(line) return result else: @@ -514,7 +521,7 @@ def irp_simple_statements(text): '''Processes simple statements''' def process_irp_rw(line, rw, t): - '''Read Write''' + '''Read Write''' assert type(line) == t buffer = line.text.split() if len(buffer) == 2: @@ -547,7 +554,7 @@ def irp_simple_statements(text): def process_return(line): assert type(line) == Return - if command_line.do_assert or command_line.do_debug: + if command_line.do_debug: newline = Simple_line(line.i, " call irp_leave(irp_here)", line.filename) result = [newline, line] else: @@ -603,7 +610,7 @@ def irp_simple_statements(text): def process_end(line): '''Add irp_leave if necessary''' - if command_line.do_assert or command_line.do_debug: + if command_line.do_debug: i = line.i f = line.filename result = [Simple_line(i, " call irp_leave(irp_here)", f), line] @@ -614,11 +621,15 @@ def irp_simple_statements(text): def process_begin_provider(line): assert type(line) == Begin_provider import string - trans = string.maketrans("[]"," ") + trans = string.maketrans("[]", " ") buffer = line.lower.translate(trans).split(',') if len(buffer) < 2: - error.fail(line, "Error in Begin_provider statement") + import sys + print line + print "Error in Begin_provider statement" + sys.exit(1) + varname = buffer[1].strip() length = len(varname) i = line.i @@ -627,7 +638,7 @@ def irp_simple_statements(text): Begin_provider(i, line.text, (f, varname)), Declaration(i, " character*(%d) :: irp_here = '%s'" % (length, varname), f) ] - if command_line.do_assert or command_line.do_debug: + if command_line.do_debug: result += [Simple_line(i, " call irp_enter(irp_here)", f), ] return result @@ -647,23 +658,25 @@ def irp_simple_statements(text): length = len(subname) i = line.i f = line.filename - result = [ line, Declaration(i, " character*(%d) :: irp_here = '%s'" % (length, subname), f)] + result = [ + line, Declaration(i, " character*(%d) :: irp_here = '%s'" % (length, subname), f) + ] - if command_line.do_assert or command_line.do_debug: - result += [Simple_line(i, " call irp_enter_f(irp_here)", f), ] + if command_line.do_debug: + result += [Simple_line(i, " call irp_enter_routine(irp_here)", f), ] return result def process_function(line): assert type(line) == Function - subname = line.subname + subname = line.subname length = len(subname) i = line.i f = line.filename result = [ line, Declaration(i, " character*(%d) :: irp_here = '%s'" % (length, subname), f) ] - if command_line.do_assert or command_line.do_debug: - result += [Simple_line(i, " call irp_enter_f(irp_here)", f), ] + if command_line.do_debug: + result += [Simple_line(i, " call irp_enter_routine(irp_here)", f), ] return result def process_program(line): @@ -671,15 +684,17 @@ def irp_simple_statements(text): program_name = line.lower.split()[1] temp = [Program(0, "program irp_program", program_name)] - if command_line.do_Task: - for i in [" call omp_set_nested(.TRUE.)", "!$omp parallel", "!$omp single"]: - temp += [Simple_line(0, i, line.filename)] + if command_line.do_Task: + for i in [" call omp_set_nested(.TRUE.)", "!$omp parallel", "!$omp single"]: + temp += [Simple_line(0, i, line.filename)] if command_line.do_profile: temp += [Simple_line(0, "call irp_init_timer()", line.filename)] # Need to choose between lazy lock or are big full initialization # if command_line.do_openmp: # temp += [Simple_line(0, " call irp_init_locks_%s()" % (irp_id), line.filename)] + if command_line.do_debug or command_line.do_assert: + temp += [Simple_line(0, " CALL irp_stack_init", line.filename)] temp += [Call(0, " call %s" % (program_name), line.filename)] if command_line.do_profile: @@ -687,9 +702,9 @@ def irp_simple_statements(text): temp += [Simple_line(0, " call irp_finalize_%s()" % (irp_id), line.filename)] - if command_line.do_Task: - for i in ["!$omp taskwait","!$omp end single", "!$omp end parallel"]: - temp += [Simple_line(0, i, line.filename)] + if command_line.do_Task: + for i in ["!$omp taskwait", "!$omp end single", "!$omp end parallel"]: + temp += [Simple_line(0, i, line.filename)] temp += [End(0, "end program", line.filename)] @@ -712,7 +727,6 @@ def irp_simple_statements(text): Program: process_program, } - result = [] for line in text: buffer = [line] @@ -756,15 +770,16 @@ def process_old_style_do(text): DO 1 i=1,10''' def change_matching_enddo(begin, number): - for i,line in enumerate(text[begin+1:]): - if isinstance(line,(Continue,Enddo)) and line.text.split()[0] == number: - text[begin+1+i] = Enddo(line.i, " enddo", line.filename) - return + for i, line in enumerate(text[begin + 1:]): + if isinstance(line, (Continue, Enddo)) and line.text.split()[0] == number: + text[begin + 1 + i] = Enddo(line.i, " enddo", line.filename) + return - from util import logger - logger.error(text[begin], "(%s) Old-style do loops should end with 'continue' or 'end do'" % text[begin]) - from util import sys - sys.exit(1) + from util import logger + logger.error(text[begin], "(%s) Old-style do loops should end with 'continue' or 'end do'" % + text[begin]) + from util import sys + sys.exit(1) result = [] for i in range(len(text)): @@ -801,9 +816,9 @@ def change_single_line_ifs(text): else: buffer = line.text begin = buffer.find('(') - if begin == -1: - logger.error("No '(' in if statemnt: %s" % line) - sys.exit(1) + if begin == -1: + logger.error("No '(' in if statemnt: %s" % line) + sys.exit(1) level = 0 instring = False @@ -821,14 +836,14 @@ def change_single_line_ifs(text): break if level != 0: logger.error("If statement not valid: %s (%s)" % (line, line.filename)) - sys.exit(1) + sys.exit(1) test = buffer[:end] code = buffer[end:] i = line.i f = line.filename result.append(If(i, "%s then" % (test, ), f)) - result += get_type(i, f, code, code.lower(),code.lower(), False)[0] + result += get_type(i, f, code, code.lower(), code.lower(), False)[0] result.append(Endif(i, " endif", f)) else: result.append(line) @@ -847,35 +862,40 @@ def check_begin_end(raw_text): Maybe more of one 'x' statement in defined cause in 'ifdef/else/endif' statement. ''' - d_block = {Enddo: [Do], - Endif: [If], - End_provider: [Begin_provider], - End_doc: [Begin_doc], - End: [Program, Subroutine, Function], - End_module: [Module], - End_interface: [Interface]} + d_block = { + Enddo: [Do], + Endif: [If], + End_provider: [Begin_provider], + End_doc: [Begin_doc], + End: [Program, Subroutine, Function], + End_module: [Module], + End_interface: [Interface] + } from collections import defaultdict d_type = defaultdict(list) for line in raw_text: - d_type[type(line)].append(line) - + d_type[type(line)].append(line) + for t_end, l_begin in d_block.iteritems(): - n_end = len(d_type[t_end]) - n_begin = sum(len(d_type[t_begin]) for t_begin in l_begin) - - if n_end != n_begin: - - if n_end > n_begin: - logger.error("You have more close statement than open statement (%s) (%s)",line.filename,t_end) - else: - logger.error('You have more end statement than open statenemt for (%s) (%s)' % (line.filename, t_end)) - - for i in zip([l for i in l_begin for l in d_type[i]], d_type[t_end]): + n_end = len(d_type[t_end]) + n_begin = sum(len(d_type[t_begin]) for t_begin in l_begin) + + if n_end != n_begin: + + if n_end > n_begin: + logger.error("You have more close statement than open statement (%s) (%s)", + line.filename, t_end) + else: + logger.error('You have more end statement than open statenemt for (%s) (%s)' % + (line.filename, t_end)) + + for i in zip([l for i in l_begin for l in d_type[i]], d_type[t_end]): logger.debug(i) - sys.exit(1) + sys.exit(1) + ###################################################################### def remove_ifdefs(text): @@ -927,14 +947,14 @@ class Preprocess_text(object): def text(self): with open(self.filename, 'r') as f: str_ = f.read() - - #Dirty thing. We will replace 'end program' by 'end subroutine' - #because afterward the program will be replaced by a subroutine... - import re - transform = re.compile(re.escape('end program'), re.IGNORECASE) - - return transform.sub('end subroutine', str_) +#Dirty thing. We will replace 'end program' by 'end subroutine' +#because afterward the program will be replaced by a subroutine... + + import re + transform = re.compile(re.escape('end program'), re.IGNORECASE) + + return transform.sub('end subroutine', str_) @irpy.lazy_property_mutable def text_align(self): @@ -967,7 +987,8 @@ class Preprocess_text(object): result = [] is_doc = False - for i, (l, ll, llc) in enumerate(zip(self.lines, self.lines_lower, self.lines_lower_canonized)): + for i, (l, ll, + llc) in enumerate(zip(self.lines, self.lines_lower, self.lines_lower_canonized)): line, is_doc = get_type(i + 1, self.filename, l, ll, llc, is_doc) result += line return result @@ -994,6 +1015,5 @@ class Preprocess_text(object): return result - if __name__ == '__main__': debug() diff --git a/src/profile.py b/src/profile.py index 02094e2..77344c2 100644 --- a/src/profile.py +++ b/src/profile.py @@ -24,7 +24,6 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - rdtsc = """ #ifdef __i386 double irp_rdtsc_(void) { @@ -47,14 +46,16 @@ import os import threading from irpf90_t import irpdir + def build_rdtsc(): - filename = irpdir+"irp_rdtsc.c" - file = open(filename,'w') - file.write(rdtsc) - file.close() + filename = irpdir + "irp_rdtsc.c" + file = open(filename, 'w') + file.write(rdtsc) + file.close() + def build_module(variables): - data = """ + data = """ module irp_timer double precision :: irp_profile(3,%(n)d) integer :: irp_order(%(n)d) @@ -153,23 +154,23 @@ subroutine irp_print_timer() print *, 'rdtsc latency :', irp_rdtsc_shift, ' cycles' end """ - label = {} - for i in variables: - vi = variables[i] - label[vi.label] = vi.same_as - text = [] - lmax = 0 - for l in label: - text.append(" irp_profile_label(%d) = '%s'"%(l,label[l])) - lmax = max(lmax,l) - text.sort() - text = '\n'.join(text) - data = data%{'text': text, 'n':lmax} - file = open("IRPF90_temp/irp_profile.irp.F90",'w') - file.write(data) - file.close() + label = {} + for i in variables: + vi = variables[i] + label[vi.label] = vi.same_as + text = [] + lmax = 0 + for l in label: + text.append(" irp_profile_label(%d) = '%s'" % (l, label[l])) + lmax = max(lmax, l) + text.sort() + text = '\n'.join(text) + data = data % {'text': text, 'n': lmax} + file = open("IRPF90_temp/irp_profile.irp.F90", 'w') + file.write(data) + file.close() + def run(d_entity): - build_module(d_entity) - build_rdtsc() - + build_module(d_entity) + build_rdtsc() diff --git a/src/regexps.py b/src/regexps.py index 61d77e1..c78e83e 100644 --- a/src/regexps.py +++ b/src/regexps.py @@ -24,34 +24,18 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - import re re_comment = re.compile(r"^([^'!]*)('[^']*'[^']*)*!") -re_decl = re.compile( "".join( [ r"^\ *", - r"(integer[(::)?\* ,]+", - r"|double *precision[(::)?\* ,]+", - r"|logical[(::)?\* ,]+", - r"|character[(::)?\* ,]+", - r"|real[(::)?\* ,]+", - r"|dimension[(::)?\* ,]+", - r"|parameter[(::)?\* ,]+", - r"|data */", - r"|allocatable *(::)?", - r"|common */", - r"|namelist */", - r"|save */", - r"|complex[(::)?\* ,]+", - r"|intrinsic *(::)?", - r"|external *(::)?", - r"|equivalence *(::)?", - r"|type", - r"|endtype", - r")[^=(]" -] ) ) +re_decl = re.compile("".join([ + r"^\ *", r"(integer[(::)?\* ,]+", r"|double *precision[(::)?\* ,]+", r"|logical[(::)?\* ,]+", + r"|character[(::)?\* ,]+", r"|real[(::)?\* ,]+", r"|dimension[(::)?\* ,]+", + r"|parameter[(::)?\* ,]+", r"|data */", r"|allocatable *(::)?", r"|common */", r"|namelist */", + r"|save */", r"|complex[(::)?\* ,]+", r"|intrinsic *(::)?", r"|external *(::)?", + r"|equivalence *(::)?", r"|type", r"|endtype", r")[^=(]" +])) -re_test = re.compile(r"\( *(.*)(\.[a-zA-Z]*\.|[<>]=?|[=/]=)([^=]*)\)") +re_test = re.compile(r"\( *(.*)(\.[a-zA-Z]*\.|[<>]=?|[=/]=)([^=]*)\)") re_string = re.compile(r"'.*?'") - diff --git a/src/routine.py b/src/routine.py index e38c697..6b69a77 100644 --- a/src/routine.py +++ b/src/routine.py @@ -24,77 +24,80 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - from irpf90_t import * from util import logger from lib.manager import irpy -class Routine(object): - ''' + +class Routine(object): + ''' A collection of list corresponding of a Routine (Subroutine, or function) ''' + ############################################################ + def __init__(self, text): + assert type(text) == list + assert len(text) > 0 - ############################################################ - def __init__(self,text): - assert type(text) == list - assert len(text) > 0 + self.text = text + self.prototype = self.text[0] + assert isinstance(self.prototype, (Subroutine, Function)) - self.text = text - self.prototype = self.text[0] - assert isinstance(self.prototype, (Subroutine, Function)) + ############################################################ + @irpy.lazy_property_mutable + def called_by(self): + raise AttributeError - ############################################################ - @irpy.lazy_property_mutable - def called_by(self): - raise AttributeError - - ############################################################ - @irpy.lazy_property - def name(self): - '''Name is lowercase''' - return self.prototype.subname +############################################################ - ############################################################ - @irpy.lazy_property - def is_function(self): - return "function" in self.prototype.lower + @irpy.lazy_property + def name(self): + '''Name is lowercase''' + return self.prototype.subname - ############################################################ - @irpy.lazy_property - def is_subroutine(self): - return "subroutine" in self.prototype.lower + ############################################################ + @irpy.lazy_property + def is_function(self): + return "function" in self.prototype.lower - ############################################################ - @irpy.lazy_property - def doc(self): + ############################################################ + @irpy.lazy_property + def is_subroutine(self): + return "subroutine" in self.prototype.lower - l_doc = [ l for l in self.text if isinstance(l,Doc) ] - if not l_doc: - logger.info("Subroutine '%s' is not documented"%(self.name)) - return [l.text.lstrip()[1:] for l in l_doc] + ############################################################ + @irpy.lazy_property + def doc(self): - ############################################################ - @irpy.lazy_property - def touches_my_self(self): - return set(x for line in self.text for x in line.text.split()[1:] if isinstance(line,(Touch, SoftTouch))) + l_doc = [l for l in self.text if isinstance(l, Doc)] + if not l_doc: + logger.info("Subroutine '%s' is not documented" % (self.name)) + return [l.text.lstrip()[1:] for l in l_doc] - @irpy.lazy_property_mutable - def touches_ancestor(self): - raise AttributeError + ############################################################ + @irpy.lazy_property + def touches_my_self(self): + return set(x for line in self.text for x in line.text.split()[1:] + if isinstance(line, (Touch, SoftTouch))) - @irpy.lazy_property - def touches(self): - return list(self.touches_my_self.union(self.touches_ancestor)) + @irpy.lazy_property_mutable + def touches_ancestor(self): + raise AttributeError - ############################################################ - @irpy.lazy_property - def regexp(self): - import re - return re.compile(r"([^a-z0-9'\"_]|^)%s([^a-z0-9_]|$)"%(self.name),re.I) + @irpy.lazy_property + def touches(self): + return list(self.touches_my_self.union(self.touches_ancestor)) - ############################################################ - @irpy.lazy_property - def calls(self): - return set(line.text.split('(',1)[0].split()[1].lower() for line in self.text if isinstance(line,Call)) + ############################################################ + @irpy.lazy_property + def regexp(self): + import re + return re.compile(r"([^a-z0-9'\"_]|^)%s([^a-z0-9_]|$)" % (self.name), re.I) + + ############################################################ + @irpy.lazy_property + def calls(self): + return set( + line.text.split('(', 1)[0].split()[1].lower() for line in self.text + if isinstance(line, Call)) diff --git a/src/templates/finalize.f90 b/src/templates/finalize.f90 index 8ead9c4..fa4c3f3 100644 --- a/src/templates/finalize.f90 +++ b/src/templates/finalize.f90 @@ -7,7 +7,7 @@ SUBROUTINE irp_finalize_{id} {#entity_array} IF (ALLOCATED({name})) THEN {name_root}_is_built = .FALSE. -! DEALLOCATE({name}) + DEALLOCATE({name}) ENDIF {/entity_array} END SUBROUTINE irp_finalize_{id} diff --git a/src/templates/irp_lock.f90 b/src/templates/irp_lock.f90 index 13c5cf6..a7c7ca1 100644 --- a/src/templates/irp_lock.f90 +++ b/src/templates/irp_lock.f90 @@ -24,7 +24,7 @@ SUBROUTINE irp_lock_{.}(set) CALL omp_unset_lock({.}_lock) ENDIF - {?do_debug} CALL irp_leach(irp_here) {/do_debug} + {?do_debug} CALL irp_leave(irp_here) {/do_debug} END SUBROUTINE irp_lock_{.} {/entity} diff --git a/src/templates/irp_stack.f90 b/src/templates/irp_stack.f90 new file mode 100644 index 0000000..6c72a29 --- /dev/null +++ b/src/templates/irp_stack.f90 @@ -0,0 +1,151 @@ +MODULE irp_stack_mod + INTEGER, PARAMETER :: STACKMAX=1000 + CHARACTER*(128) ,allocatable :: irp_stack(:,:) + DOUBLE PRECISION ,allocatable :: irp_cpu(:,:) + INTEGER ,allocatable :: stack_index(:) + INTEGER :: nthread + CHARACTER*(128) :: white = '' +END MODULE + +SUBROUTINE irp_stack_init + USE irp_stack_mod + + IMPLICIT NONE + + INTEGER :: ithread + {?do_openmp} + INTEGER, EXTERNAL :: omp_get_thread_num + INTEGER, EXTERNAL :: omp_get_max_threads + {/do_openmp} + INTEGER :: ierr + {^do_openmp} + ithread = 0 + {:else} + ithread = omp_get_thread_num() + {/do_openmp} + + {^do_openmp} !$OMP CRITICAL {/do_openmp} + IF (.NOT.ALLOCATED(stack_index) ) THEN + + {^do_openmp} + nthread = 1 + {:else} + nthread = omp_get_max_threads() + {/do_openmp} + + {?do_memory} + print *, 'Allocating irp_stack(0:',STACKMAX,',0:',nthread,')' + print *, 'Allocating irp_cpu(0:',STACKMAX,',0:',nthread,')' + print *, 'Allocating stack_index(0:',nthread,')' + {/do_memory} + + ALLOCATE ( irp_stack(0:STACKMAX, 0:nthread), & + irp_cpu(0:STACKMAX, 0:nthread), & + stack_index(0:nthread) ) + IF (ierr /=0 ) THEN + print*, 'Failed Allocating irp_stack, irp_cpu, stack_index' + ENDIF + stack_index = 0 + END IF + +END SUBROUTINE + +SUBROUTINE irp_enter_routine(irp_where) + USE irp_stack_mod + + IMPLICIT NONE + + CHARACTER*(*), INTENT(in) :: irp_where + INTEGER :: ithread + REAL :: cpu + {?do_openmp} + INTEGER, EXTERNAL :: omp_get_thread_num + {/do_openmp} + + {^do_openmp} + ithread = 0 + {:else} + ithread = omp_get_thread_num() + {/do_openmp} + + stack_index(ithread) = min(stack_index(ithread)+1,STACKMAX) + irp_stack(stack_index(ithread),ithread) = irp_where + +END SUBROUTINE irp_enter_routine + +SUBROUTINE irp_enter(irp_where) + USE irp_stack_mod + + IMPLICIT NONE + + CHARACTER*(*), INTENT(in) :: irp_where + INTEGER :: ithread + {?do_openmp} + INTEGER, EXTERNAL :: omp_get_thread_num + {/do_openmp} + + {^do_openmp} + ithread = 0 + {:else} + ithread = omp_get_thread_num() + {/do_openmp} + + print *, ithread, ':', white(1:stack_index(ithread))//'-> ', trim(irp_where) + CALL cpu_time(irp_cpu(stack_index(ithread),ithread)) +END SUBROUTINE irp_enter + + +SUBROUTINE irp_leave(irp_where) + USE irp_stack_mod + + IMPLICIT NONE + + CHARACTER*(*), INTENT(in) :: irp_where + INTEGER :: ithread + REAL :: cpu + {?do_openmp} + INTEGER, EXTERNAL :: omp_get_thread_num + {/do_openmp} + + {^do_openmp} + ithread = 0 + {:else} + ithread = omp_get_thread_num() + {/do_openmp} + + CALL cpu_time(cpu) + print *, ithread, ':', white(1:stack_index(ithread))//'<- ', & + trim(irp_stack(stack_index(ithread),ithread)), & + cpu - irp_cpu(stack_index(ithread),ithread) + + stack_index(ithread) = max(0,stack_index(ithread)-1) + +END SUBROUTINE irp_leave + + +SUBROUTINE irp_trace + USE irp_stack_mod + + IMPLICIT NONE + + INTEGER :: ithread + {?do_openmp} + INTEGER, EXTERNAL :: omp_get_thread_num + {/do_openmp} + INTEGER :: i + + {^do_openmp} + ithread = 0 + {:else} + ithread = omp_get_thread_num() + {/do_openmp} + + print *, 'Stack trace: ', ithread + print *, '-------------------------' + DO i=1,stack_index(ithread) + print *, trim(irp_stack(i,ithread)) + END DO + print *, '-------------------------' + +END SUBROUTINE irp_trace + diff --git a/src/templates/module.f90 b/src/templates/module.f90 index 77ac928..0fcf15d 100644 --- a/src/templates/module.f90 +++ b/src/templates/module.f90 @@ -6,7 +6,6 @@ ! DO NOT MODIFY IT BY HAND ! !-----------------------------------------------! - MODULE {name} {#use} diff --git a/src/templates/provider.f90 b/src/templates/provider.f90 index 54db996..ecac948 100644 --- a/src/templates/provider.f90 +++ b/src/templates/provider.f90 @@ -29,15 +29,16 @@ SUBROUTINE provide_{name} {@last} {?do_task}!$OMP END TASKGROUP{/do_task} {/last} {/l_children_static} + {#l_entity} {?dim} CALL allocate_{name} {/dim} + {/l_entity} CALL bld_{name} - - {?do_debug} CALL irp_enter(irp_here) {/do_debug} {?do_openmp} ENDIF CALL irp_lock_{name}(.FALSE.) {/do_openmp} - + + {?do_debug} CALL irp_leave(irp_here) {/do_debug} END SUBROUTINE provide_{name} diff --git a/src/templates/writer.f90 b/src/templates/writer.f90 deleted file mode 100644 index 1652eec..0000000 --- a/src/templates/writer.f90 +++ /dev/null @@ -1,69 +0,0 @@ -SUBROUTINE write_{name}(irp_num) - - USE {fmodule} - IMPLICIT NONE - - CHARACTER*(*), INTENT(IN) :: irp_num - LOGICAL :: irp_is_open = .TRUE. - INTEGER :: irp_iunit = 9 - - {?do_debug} - CHARACTER*(7+{@size key=name/}),PARAMETER :: irp_here = 'writer_{name}' - {/do_debug} - - {?do_debug} CALL irp_enter(irp_here) {/do_debug} - - IF (.NOT.{same_as}_is_built) THEN - CALL provide_{same_as} - ENDIF - - {children} - CALL write_{.}(irp_num) - {/children} - - DO WHILE (irp_is_open) - irp_iunit = irp_inuit + 1 - INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) - END DO - - {#group_entity} - OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') - WRITE(irp_inuit,*) {.}{dim} - CLOSE(irp_inuit) - {/group_entity} - - {?do_debug} CALL irp_leave(irp_here) {/do_debug} - -END SUBROUTINE write_{name} - -SUBROUTINE read_{name}(irp_num) - - USE {fmodule} - IMPLICIT NONE - - CHARACTER*(*), INTENT(IN) :: irp_num - LOGICAL :: irp_is_open = .TRUE. - INTEGER :: irp_iunit = 9 - - {?do_debug} - CHARACTER*(5+{@size key=name/}),PARAMETER :: irp_here = 'read_{name}' - {/do_debug} - - {?do_debug} CALL irp_enter(irp_here) {/do_debug} - - DO WHILE (irp_is_open) - irp_iunit = irp_inuit + 1 - INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) - END DO - - {#group_entity} - OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') - READ(irp_inuit,*) {name}{dim} - CLOSE(irp_inuit) - {/group_entity} - - CALL touch_{name} - {?do_debug} CALL irp_leave(irp_here) {/do_debug} - -END SUBROUTINE read_{name} - diff --git a/src/touches.py b/src/touches.py index 6494a04..fce01f8 100644 --- a/src/touches.py +++ b/src/touches.py @@ -24,30 +24,40 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr -from irpf90_t import irp_id,irpdir +from irpf90_t import irp_id, irpdir import os from command_line import command_line -def create(modules,variables): - # (Dict[str,Module]. Dict[str, Variable]) -> None - '''Create the fortran90 finalize subroutine and the touched one''' - main_modules_name =[ m.name for m in modules.values() if m.is_main] +def create(modules, variables): + # (Dict[str,Module]. Dict[str, Variable]) -> None + '''Create the fortran90 finalize subroutine and the touched one''' - d_template_finalize = {'id':irp_id, - 'use':[m.name for m in modules.values() if not m.is_main and m.has_irp_module], - 'entity_array': - [{'name':e.name,'name_root':e.same_as} for e in variables.values() if e.fmodule not in main_modules_name and e.dim]} - + main_modules_name = [m.name for m in modules.values() if m.is_main] - d_template_touch = {'do_debug': command_line.do_debug, - 'entity':[e.d_touche_template for e in variables.values() if e.fmodule not in main_modules_name]} - - import util - str_out = util.ashes_env.render('touch.f90', d_template_touch) + util.ashes_env.render('finalize.f90', d_template_finalize) + d_template_finalize = { + 'id': irp_id, + 'use': [m.name for m in modules.values() if not m.is_main and m.has_irp_module], + 'entity_array': [{ + 'name': e.name, + 'name_root': e.same_as + } for e in variables.values() if e.fmodule not in main_modules_name and e.dim] + } + + d_template_touch = { + 'do_debug': command_line.do_debug, + 'entity': [ + e.d_touche_template for e in variables.values() + if e.fmodule not in main_modules_name and e.d_touche_template + ] + } + import util + str_out = util.ashes_env.render('touch.f90', d_template_touch) + util.ashes_env.render( + 'finalize.f90', d_template_finalize) + + filename = os.path.join(irpdir, 'irp_touches.irp.F90') + util.lazy_write_file(filename, '%s\n' % util.remove_empy_lines(str_out)) - filename=os.path.join(irpdir,'irp_touches.irp.F90') - util.lazy_write_file(filename,'%s\n'% util.remove_empy_lines(str_out)) if __name__ == '__main__': - create() + create() diff --git a/src/util.py b/src/util.py index dd88f40..956d024 100644 --- a/src/util.py +++ b/src/util.py @@ -24,7 +24,6 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - # ~#~#~#~#~# # L o g e r # ~#~#~#~#~# @@ -42,30 +41,33 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger('Irpf90') logger.setLevel(30) - # ~#~#~#~#~# # A S H E S _ T E M P L A T E S # ~#~#~#~#~# -from lib.manager import ashes +from lib.manager import ashes import os -ashes_env = ashes.AshesEnv([os.path.join(os.path.dirname(__file__),'templates')]) +ashes_env = ashes.AshesEnv([os.path.join(os.path.dirname(__file__), 'templates')]) + def remove_empy_lines(text): - return os.linesep.join([s for s in text.splitlines() if s.strip()]) + return os.linesep.join([s for s in text.splitlines() if s.strip()]) # ~#~#~#~#~# # / / _ R E L A T E D # ~#~#~#~#~# -def chunkify(l,n_chunk): + +def chunkify(l, n_chunk): # (List[any], int) -> List [ List[any] ] '''Split the list on n_chunk''' len_ = len(l) - n = max(1, len_ / n_chunk ) - return [ l[i:i + n] for i in xrange(0, len_, n) ] + n = max(1, len_ / n_chunk) + return [l[i:i + n] for i in xrange(0, len_, n)] import multiprocessing + + def parmap(f, it, parallel=False): # (Callable, Iterable, bool) -> List '''Parallel version of the std map function @@ -81,7 +83,7 @@ def parmap(f, it, parallel=False): ''' if not parallel: - return map(f, it) + return map(f, it) nproc = multiprocessing.cpu_count() @@ -109,12 +111,12 @@ def parmap(f, it, parallel=False): # In this implementation, we minimizise the communication # (aka 1 job by processor) - it_chunk = chunkify(l=it,n_chunk=nproc) + it_chunk = chunkify(l=it, n_chunk=nproc) + def F(chunk): # (List[any]) -> (List[any]) '''Same as 'f' but for a chunck''' - return map(f,chunk) - + return map(f, chunk) q_in = multiprocessing.JoinableQueue() q_out = multiprocessing.Queue() @@ -123,8 +125,8 @@ def parmap(f, it, parallel=False): stop_condition = None def worker(): - # () -> None - '''Read a task from q_in, excute it, and store it in q_out + # () -> None + '''Read a task from q_in, excute it, and store it in q_out Note: - We use 'F' and not 'f'. @@ -132,19 +134,19 @@ def parmap(f, it, parallel=False): - We get, and put an idx to allow the possibility of ordering afterward - We store any exeception, to raise her afterward ''' - for i, x in iter(q_in.get, stop_condition): + for i, x in iter(q_in.get, stop_condition): - try: - result = F(x) - except BaseException as e: - t = e - else: - t = (i, result) + try: + result = F(x) + except BaseException as e: + t = e + else: + t = (i, result) - q_out.put(t) + q_out.put(t) q_in.task_done() - q_in.task_done() + q_in.task_done() # Process' creation l_proc = [multiprocessing.Process(target=worker) for _ in range(nproc)] @@ -154,7 +156,7 @@ def parmap(f, it, parallel=False): # Add the job to the queue (Note we add an idx, this will all) for i, x in enumerate(it_chunk): - q_in.put((i, x)) + q_in.put((i, x)) # Now add the stop contidion and join # (Because q_in.get is blocking we don't need to join the queue before) @@ -163,26 +165,28 @@ def parmap(f, it, parallel=False): q_in.join() # Get all the chunk and join the process - l_res = [q_out.get() for _ in range(len(it_chunk))] + l_res = [q_out.get() for _ in range(len(it_chunk))] for p in l_proc: p.join() - + # Check if error have occured try: - from itertools import ifilter - e = next(ifilter(lambda t: isinstance(t,BaseException), l_res)) + from itertools import ifilter + e = next(ifilter(lambda t: isinstance(t, BaseException), l_res)) except StopIteration: - # Now we need first to order the result, and secondly to flatte it - return [item for _, chunk in sorted(l_res) for item in chunk] + # Now we need first to order the result, and secondly to flatte it + return [item for _, chunk in sorted(l_res) for item in chunk] else: - raise e + raise e # ~#~#~#~#~# # I O _ R E L A T E D # ~#~#~#~#~# import hashlib import os + + def cached_file(filename, text): # (str,str) -> bool '''Check if file locatte at filename containt the same data as text @@ -192,19 +196,19 @@ def cached_file(filename, text): ''' def digest(data): - # (str) -> str - '''compute an uniq data id''' - return hashlib.md5(data).hexdigest() + # (str) -> str + '''compute an uniq data id''' + return hashlib.md5(data).hexdigest() try: - text_ref = open(filename, 'rb').read() + text_ref = open(filename, 'rb').read() except IOError: return False else: return digest(text_ref) == digest(text) -def lazy_write_file(filename, text, conservative=False,touch=False): +def lazy_write_file(filename, text, conservative=False, touch=False): # (str, str, bool) -> None '''Write data lazily in filename location. @@ -216,7 +220,8 @@ def lazy_write_file(filename, text, conservative=False,touch=False): with open(filename, 'w') as f: f.write(text) elif touch: - os.utime(filename,None) + os.utime(filename, None) + def listdir(directory, abspath=False): #(str, bool) -> List[str] @@ -228,6 +233,7 @@ def listdir(directory, abspath=False): else: return [os.path.abspath(os.path.join(directory, f)) for f in l_filename] + def check_output(*popenargs, **kwargs): """Run command with arguments and return its output as a byte string. Backported from Python 2.7 as it's implemented as pure python on stdlib. @@ -252,28 +258,29 @@ def check_output(*popenargs, **kwargs): # ~#~#~#~#~# -def uniquify(l,sort=False): +def uniquify(l, sort=False): # (Iter, bool) -> List[Any] '''Uniquify a immutable iterable. Don't preserve the order. Or maybe.''' - #Be carefull that element in Iter can be unshable. try: - r = list(set(l)) + r = list(set(l)) except TypeError: - used = list() - r = [x for x in l if x not in used and (used.append(x) or True)] - + used = list() + r = [x for x in l if x not in used and (used.append(x) or True)] + if not sort: - return r + return r else: - return sorted(r) + return sorted(r) + def OrderedUniqueList(l): # (Iter, bool) -> List[Any] '''Uniquify a immutable iterable. Don't preserve the order''' return sorted(set(l)) + def flatten(l_2d): # (List [ Iter[Any] ]) -> List '''Construct a copy of the 2d list collapsed into one dimension. @@ -289,25 +296,25 @@ def flatten(l_2d): # I R P _ R E L A T E D # ~#~#~#~#~# def dimsize(x): - # (str) -> str - '''Compute the number of element in the array''' - try: - b0, b1 = x.split(':') - except ValueError: - return x + # (str) -> str + '''Compute the number of element in the array''' + try: + b0, b1 = x.split(':') + except ValueError: + return x - b0_is_digit = b0.replace('-', '').isdigit() - b1_is_digit = b1.replace('-', '').isdigit() + b0_is_digit = b0.replace('-', '').isdigit() + b1_is_digit = b1.replace('-', '').isdigit() - if b0_is_digit and b1_is_digit: - size = str(int(b1) - int(b0) + 1) - elif b0_is_digit: - size = "(%s) - (%d)" % (b1, int(b0) - 1) - elif b1_is_digit: - size = "(%d) - (%s)" % (int(b1) + 1, b0) - else: - size = "(%s) - (%s) + 1" % (b1, b0) - return size + if b0_is_digit and b1_is_digit: + size = str(int(b1) - int(b0) + 1) + elif b0_is_digit: + size = "(%s) - (%d)" % (b1, int(b0) - 1) + elif b1_is_digit: + size = "(%d) - (%s)" % (int(b1) + 1, b0) + else: + size = "(%s) - (%s) + 1" % (b1, b0) + return size def build_dim(l_dim, colons=False): @@ -326,35 +333,37 @@ def build_dim(l_dim, colons=False): return "(%s)" % (",".join(l_dim_colons)) + def mangled(l_ent, d_ent): # (List, Dict[str,Entity]) -> list '''Create a uniq list of providier (merge the multione) ''' return OrderedUniqueList(d_ent[name].same_as for name in l_ent) -def build_use(l_ent, d_ent,use=True): + +def build_use(l_ent, d_ent, use=True): # (List, Dict[str,Entity]) -> list '''Contruct the fortran90 'use' statement for the list of entity''' l_name = OrderedUniqueList(d_ent[x].fmodule for x in l_ent) if not use: - return l_name + return l_name else: - return [" use %s" % n for n in l_name] - + return [" use %s" % n for n in l_name] + def build_call_provide(l_ent, d_ent): # (List, Dict[str,Entity]) -> list '''Construct the fortran 90 call the provider needed by the list of entity''' # Get the corect name (in the case of multiple provider line) - l_same_as = mangled(l_ent,d_ent) + l_same_as = mangled(l_ent, d_ent) + def bld_f90(x): - return [ " if (.not.%s_is_built) then" % x, - " call provide_%s" % x, - " endif"] + return [" if (.not.%s_is_built) then" % x, " call provide_%s" % x, " endif"] return flatten(map(bld_f90, l_same_as)) + def che_merge(sets): #(List[Set] -> List[Set] """Merge a list of set is they are not disjoint. @@ -364,25 +373,32 @@ def che_merge(sets): results = [] upd, isd, pop = set.update, set.isdisjoint, sets.pop while sets: - if not [upd(sets[0],pop(i)) for i in range(len(sets)-1,0,-1) if not isd(sets[0],sets[i])]: + if not [ + upd(sets[0], pop(i)) for i in range(len(sets) - 1, 0, -1) + if not isd(sets[0], sets[i]) + ]: results.append(pop(0)) return results def l_dummy_entity(d_entity): - # Dict[str:Entity] -> List[set] - from itertools import combinations - l_candidate_botom = [ (i,j) for i,j in combinations(d_entity.keys(),2) if d_entity[i].needs == d_entity[j].needs] - l_dummy = [set([i,j]) for i,j in l_candidate_botom if d_entity[i].needed_by == d_entity[j].needed_by] + # Dict[str:Entity] -> List[set] + from itertools import combinations + l_candidate_botom = [(i, j) for i, j in combinations(d_entity.keys(), 2) + if d_entity[i].needs == d_entity[j].needs] + l_dummy = [ + set([i, j]) for i, j in l_candidate_botom if d_entity[i].needed_by == d_entity[j].needed_by + ] + + return che_merge(l_dummy) - return che_merge(l_dummy) def split_l_set(l_set_org): - #(List[set] -> (List, Set) - '''Split the list of set into a list of Head and and the concetenad of all the tail + #(List[set] -> (List, Set) + '''Split the list of set into a list of Head and and the concetenad of all the tail Note: Head and Tail a not defined in set. Head in one element of the set, and tail the rest. ''' - l_set = [set(s) for s in l_set_org] - l_main = [ s.pop() for s in l_set] - return l_main, set(flatten(l_set)) + l_set = [set(s) for s in l_set_org] + l_main = [s.pop() for s in l_set] + return l_main, set(flatten(l_set)) diff --git a/src/vim.py b/src/vim.py index b0d6778..c97be6e 100644 --- a/src/vim.py +++ b/src/vim.py @@ -24,25 +24,26 @@ # 31062 Toulouse Cedex 4 # scemama@irsamc.ups-tlse.fr - import os + def install(): - VIM = os.environ["HOME"]+"/.vim" - try: - if os.access(VIM+"/syntax/irpf90.vim",os.F_OK): - return - if not os.access(VIM,os.F_OK): - os.mkdir(VIM) - file = open(VIM+"/filetype.vim","a") - file.write("au BufRead,BufNewFile *.irp.f setfiletype irpf90") - file.close() - if not os.access(VIM+"/syntax",os.F_OK): - os.mkdir(VIM+"/syntax") - wd = os.path.abspath(os.path.dirname(__file__)) - os.symlink(wd+"/irpf90.vim",VIM+"/syntax/irpf90.vim") - except: - pass + VIM = os.environ["HOME"] + "/.vim" + try: + if os.access(VIM + "/syntax/irpf90.vim", os.F_OK): + return + if not os.access(VIM, os.F_OK): + os.mkdir(VIM) + file = open(VIM + "/filetype.vim", "a") + file.write("au BufRead,BufNewFile *.irp.f setfiletype irpf90") + file.close() + if not os.access(VIM + "/syntax", os.F_OK): + os.mkdir(VIM + "/syntax") + wd = os.path.abspath(os.path.dirname(__file__)) + os.symlink(wd + "/irpf90.vim", VIM + "/syntax/irpf90.vim") + except: + pass + if __name__ == "__main__": - install() + install() From 5379700a3d39f2360ff020cfd4da6c8e93e95e93 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 17 Mar 2017 11:37:07 -0500 Subject: [PATCH 24/31] More logical allocate --- src/templates/allocater.f90 | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/templates/allocater.f90 b/src/templates/allocater.f90 index 5f213ab..fa50bbe 100644 --- a/src/templates/allocater.f90 +++ b/src/templates/allocater.f90 @@ -9,12 +9,19 @@ SUBROUTINE allocate_{name} CHARACTER*(9+{@size key=name/}),PARAMETER :: irp_here = 'allocate_{name}' INTEGER :: irp_err + LOGICAL :: alloc - IF ( ALLOCATED({name}) .AND.( & + alloc = ALLOCATED({name}) + + IF ( alloc .AND.( & {#l_dim} - ( SIZE({name},{rank}) /= {value} ) {@sep}.OR.{/sep} & + ( SIZE({name},{rank}) == {value} ) {@sep}.OR.{/sep} & {/l_dim})) THEN + RETURN + ELSE IF (.NOT.alloc) THEN + GO TO 666 + ELSE {?do_memory} PRINT*, irp_here//': Deallocated {name}' {/do_memory} DEALLOCATE({name},STAT=irp_err) @@ -24,10 +31,6 @@ SUBROUTINE allocate_{name} ENDIF GO TO 666 - ELSE IF (.NOT.ALLOCATED({name})) THEN - GO TO 666 - ELSE - RETURN ENDIF 666 CONTINUE @@ -38,7 +41,7 @@ SUBROUTINE allocate_{name} {:else} ALLOCATE({name} ({dim}[*]), STAT=irp_err) {/do_corray} - + IF (irp_err /= 0) then PRINT*, irp_here//': Allocation failed: {name}' PRINT*,' size: {dim}' From ea5fa02b261c6d9a7949da1ca78c8ccd8cdbcbda Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 17 Mar 2017 13:01:12 -0500 Subject: [PATCH 25/31] Fix writer --- example/Makefile | 6 +++--- src/entity.py | 10 ++++++---- src/module.py | 1 - src/templates/ioer.f90 | 32 ++++++++++++++++---------------- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/example/Makefile b/example/Makefile index c026e2a..1661c21 100644 --- a/example/Makefile +++ b/example/Makefile @@ -1,5 +1,5 @@ -IRPF90= irpf90 -IRPF90FLAGS= -I ./ -I input/ +IRPF90= ../bin/irpf90 +IRPF90FLAGS= BUILD_SYSTEM= make .EXPORT_ALL_VARIABLES: @@ -50,7 +50,7 @@ define run_and_touch $(BUILD_SYSTEM) -C $(dir $(1) ) -f $(notdir $(1) ) $(addprefix $(CURDIR)/, $(2)) && touch $(2) endef -EXE := $(shell egrep -r '^\s*program' *.irp.f | awk '{print $$2}') +EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1) .PHONY: all diff --git a/src/entity.py b/src/entity.py index f43fcf2..84cabd8 100644 --- a/src/entity.py +++ b/src/entity.py @@ -178,17 +178,19 @@ class Entity(object): 'group_entity': [{ 'name': n, 'dim': build_dim( - self.cm_d_variable[n].dim, colons=True) + self.d_entity[n].dim, colons=True) } for n in self.l_name] } - return ashes_env('io.f90', d_template).split('\n') + return ashes_env.render('ioer.f90', d_template).split('!TOKEN_SPLIT') + @irpy.lazy_property def reader(self): - return io.er.split('TOKEN_SPLIT')[0] + return self.io_er[1].split('\n') + @irpy.lazy_property def writer(self): - return io.er.split('TOKEN_SPLIT')[1] + return self.io_er[0].split('\n') @irpy.lazy_property_mutable def is_read(self): diff --git a/src/module.py b/src/module.py index 111dead..cf55612 100644 --- a/src/module.py +++ b/src/module.py @@ -114,7 +114,6 @@ class Fmodule(object): result += var.reader if var.is_written: result += var.writer - return result @irpy.lazy_property diff --git a/src/templates/ioer.f90 b/src/templates/ioer.f90 index ab621e2..a00789a 100644 --- a/src/templates/ioer.f90 +++ b/src/templates/ioer.f90 @@ -1,4 +1,4 @@ -SUBROUTINE write_{name}(irp_num) +SUBROUTINE writer_{name}(irp_num) USE {fmodule} IMPLICIT NONE @@ -17,28 +17,28 @@ SUBROUTINE write_{name}(irp_num) CALL provide_{same_as} ENDIF - {children} - CALL write_{.}(irp_num) + {#children} + CALL writer_{.}(irp_num) {/children} DO WHILE (irp_is_open) - irp_iunit = irp_inuit + 1 - INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) + irp_iunit = irp_iunit + 1 + INQUIRE(UNIT=irp_iunit, OPENED=irp_is_open) END DO {#group_entity} - OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') - WRITE(irp_inuit,*) {.}{dim} - CLOSE(irp_inuit) + OPEN(UNIT=irp_iunit,file='irpf90_{name}_'//trim(irp_num),FORM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') + WRITE(irp_iunit,*) {name}{dim} + CLOSE(irp_iunit) {/group_entity} {?do_debug} CALL irp_leave(irp_here) {/do_debug} -END SUBROUTINE write_{name} +END SUBROUTINE writer_{name} !TOKEN_SPLIT -SUBROUTINE read_{name}(irp_num) +SUBROUTINE reader_{name}(irp_num) USE {fmodule} IMPLICIT NONE @@ -54,18 +54,18 @@ SUBROUTINE read_{name}(irp_num) {?do_debug} CALL irp_enter(irp_here) {/do_debug} DO WHILE (irp_is_open) - irp_iunit = irp_inuit + 1 - INQUIRE(UNIT=irp_inuit, OPENED=irp_is_open) + irp_iunit = irp_iunit + 1 + INQUIRE(UNIT=irp_iunit, OPENED=irp_is_open) END DO {#group_entity} - OPEN(UNIT=irp_inuit,file='irpf90_{name}_'//trim(irp_num),FROM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') - READ(irp_inuit,*) {name}{dim} - CLOSE(irp_inuit) + OPEN(UNIT=irp_iunit,file='irpf90_{name}_'//trim(irp_num),FORM='FORMATTED',STATUS='UNKNOWN',ACTION='WRITE') + READ(irp_iunit,*) {name}{dim} + CLOSE(irp_iunit) {/group_entity} CALL touch_{name} {?do_debug} CALL irp_leave(irp_here) {/do_debug} -END SUBROUTINE read_{name} +END SUBROUTINE reader_{name} From eb176ec0a28ef92435a84a994df29af8bce4692f Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 17 Mar 2017 15:27:31 -0500 Subject: [PATCH 26/31] Fancy general Makefile --- example/Makefile | 59 ++++++++++++++++------------- src/build_file.py | 48 ++++-------------------- src/templates/general.make | 76 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 116 insertions(+), 67 deletions(-) create mode 100644 src/templates/general.make diff --git a/example/Makefile b/example/Makefile index 1661c21..0f864f7 100644 --- a/example/Makefile +++ b/example/Makefile @@ -1,21 +1,22 @@ IRPF90= ../bin/irpf90 IRPF90FLAGS= +# Make | Ninja BUILD_SYSTEM= make .EXPORT_ALL_VARIABLES: -AR = ar -CC = gcc -CFLAGS = -O2 -CXX = g++ -CXXFLAGS = -O2 -FC = gfortran -FCFLAGS = -O2 LIB = -RANLIB = ranlib - -OBJ = SRC = +OBJ = + +# Compiler ! Will be overwriten by the ENV one if avalaible. +FC ?= gfortran-6 +FCFLAGS ?= -O2 + +CC ?= gcc +CFLAGS ?= -O2 +CXX ?= g++ +CXXFLAGS ?= -O2 # Dark magic below modify with caution! # "You are Not Expected to Understand This" @@ -32,38 +33,44 @@ SRC = # | | I _________ # | | I c(` ')o # | l I \. ,/ -# _/j L l\_! _//^---^\\_ +# _/j L l\_! _// +#Misc +AR ?= +RANLIB ?= +# Variable need by IRPF90 ifeq ($(BUILD_SYSTEM),ninja) - BUILD_FILE=IRPF90_temp/build.ninja - IRPF90FLAGS += -j + BUILD_FILE=IRPF90_temp/build.ninja + IRPF90FLAGS += -j else ifeq ($(BUILD_SYSTEM),make) - BUILD_FILE=IRPF90_temp/build.make - BUILD_SYSTEM += -j + BUILD_FILE=IRPF90_temp/build.make + BUILD_SYSTEM += -j else DUMMY: $(error 'Wrong BUILD_SYSTEM: $(BUILD_SYSTEM)') endif -define run_and_touch - $(BUILD_SYSTEM) -C $(dir $(1) ) -f $(notdir $(1) ) $(addprefix $(CURDIR)/, $(2)) && touch $(2) -endef - +# Actual Rule EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1) -.PHONY: all +.PHONY: clean all -all: $(BUILD_FILE) - $(call run_and_touch, $<, $(EXE)) +all: $(EXE) -.NOTPARALLEL: $(EXE) $(EXE): $(BUILD_FILE) - $(call run_and_touch, $<, $(EXE)) + @printf "%b" "\033[0;32m Build $@...\033[m\n" + @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(addprefix $(CURDIR)/, $@) && touch $@ + + +.NOTPARALLEL: $(BUILD_FILE) $(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print) - $(IRPF90) $(IRPF90FLAGS) + @printf "%b" "\033[0;32m Running the IRPF90-compiler...\033[m\n" + @$(IRPF90) $(IRPF90FLAGS) clean: - rm -f -- $(BUILD_FILE) $(EXE) $(shell find IRPF90_temp -type f \( -name "*.o" -o -name "*.mod" -name "*.a" \) -delete;) + rm -f -- $(BUILD_FILE) $(EXE) + veryclean: clean rm -rf IRPF90_temp/ IRPF90_man/ irpf90_entities dist tags + diff --git a/src/build_file.py b/src/build_file.py index e4cd072..b62a63e 100644 --- a/src/build_file.py +++ b/src/build_file.py @@ -282,49 +282,15 @@ def create_build_remaining(f, ninja): def create_makefile(d_flags, d_var, irpf90_flags, ninja=True): - result = [ - "IRPF90= irpf90", "IRPF90FLAGS= %s" % irpf90_flags, - "BUILD_SYSTEM= %s" % ('ninja' if ninja else 'make'), "" - ] - - # Export all the env variable used by irpf90 - result += [ - '.EXPORT_ALL_VARIABLES:', '', '\n'.join("{0} = {1}".format(k, v) - for k, v in sorted(d_flags.iteritems())), '', - '\n'.join("{0} = {1}".format(k, ' '.join(v)) for k, v in sorted(d_var.iteritems())), '' - ] - - result += [ - r'# Dark magic below modify with caution!', r'# "You are Not Expected to Understand This"', - r"# .", r"# /^\ .", r'# /\ "V",', - r"# /__\ I O o", r"# //..\\ I .", r"# \].`[/ I", - r"# /l\/j\ (] . O", r"# /. ~~ ,\/I .", r"# \\L__j^\/I o", - r"# \/--v} I o .", r"# | | I _________", r"# | | I c(` ')o", - r"# | l I \. ,/", r"# _/j L l\_! _//^---^\\_", r"" - ] - - result += [ - "", "ifeq ($(BUILD_SYSTEM),ninja)", "\tBUILD_FILE=IRPF90_temp/build.ninja", - "\tIRPF90FLAGS += -j", "else ifeq ($(BUILD_SYSTEM),make)", - "\tBUILD_FILE=IRPF90_temp/build.make", "\tBUILD_SYSTEM += -j", "else", "DUMMY:", - "\t$(error 'Wrong BUILD_SYSTEM: $(BUILD_SYSTEM)')", "endif" - ] - - result += [ - "", "define run_and_touch", - " $(BUILD_SYSTEM) -C $(dir $(1) ) -f $(notdir $(1) ) $(addprefix $(CURDIR)/, $(2)) && touch $(2)", - "endef", "", "EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1)", "", - ".PHONY: all", "", "all: $(BUILD_FILE)", "\t$(call run_and_touch, $<, $(EXE))", "", - ".NOTPARALLEL: $(EXE)", "$(EXE): $(BUILD_FILE)", "\t$(call run_and_touch, $<, $(EXE))", - "$(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print)", - "\t$(IRPF90) $(IRPF90FLAGS)", "", "clean:", '\trm -f -- $(BUILD_FILE) $(EXE)' - '\t$(shell find IRPF90_temp -type f \\( -name "*.o" -o -name "*.mod" -name "*.a" \\) -delete;)', - "veryclean: clean", "\trm -rf IRPF90_temp/ IRPF90_man/ irpf90_entities dist tags" - ] + d = {'BUILD_SYSTEM': 'ninja' if ninja else 'make', + 'irpf90_flags': irpf90_flags} + d.update(d_flags) + d.update(d_var) + import util - data = '%s\n' % '\n'.join(result) - util.lazy_write_file('Makefile', data, conservative=True) + str_ = util.ashes_env.render('general.make', d) + util.lazy_write_file('Makefile', str_, conservative=True) def create_make_all_clean(l_main): diff --git a/src/templates/general.make b/src/templates/general.make new file mode 100644 index 0000000..183adc1 --- /dev/null +++ b/src/templates/general.make @@ -0,0 +1,76 @@ +IRPF90= irpf90 +IRPF90FLAGS= {irpf90_flags} +# Make | Ninja +BUILD_SYSTEM= {BUILD_SYSTEM} + +.EXPORT_ALL_VARIABLES: + +LIB = {LIB} +SRC = {SRC} +OBJ = {OBJ} + +# Compiler ! Will be overwriten by the ENV one if avalaible. +FC ?= {FC} +FCFLAGS ?= {FCFLAGS} + +CC ?= {CC} +CFLAGS ?= {CFLAGS} +CXX ?= {CXX} +CXXFLAGS ?= {CXXFLAGS} + +# Dark magic below modify with caution! +# "You are Not Expected to Understand This" +# . +# /^\ . +# /\ "V", +# /__\ I O o +# //..\\ I . +# \].`[/ I +# /l\/j\ (] . O +# /. ~~ ,\/I . +# \\L__j^\/I o +# \/--v} I o . +# | | I _________ +# | | I c(` ')o +# | l I \. ,/ +# _/j L l\_! _// + +#Misc +AR ?= {ar} +RANLIB ?= {ranlib} + +# Variable need by IRPF90 +ifeq ($(BUILD_SYSTEM),ninja) + BUILD_FILE=IRPF90_temp/build.ninja + IRPF90FLAGS += -j +else ifeq ($(BUILD_SYSTEM),make) + BUILD_FILE=IRPF90_temp/build.make + BUILD_SYSTEM += -j +else +DUMMY: + $(error 'Wrong BUILD_SYSTEM: $(BUILD_SYSTEM)') +endif + +# Actual Rule +EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1) + +.PHONY: clean all + +all: $(EXE) + +$(EXE): $(BUILD_FILE) + @printf "%b" "\033[0;32m Build $@...\033[m\n" + @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(addprefix $(CURDIR)/, $@) && touch $@ + + +.NOTPARALLEL: $(BUILD_FILE) +$(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print) + @printf "%b" "\033[0;32m Running the IRPF90-compiler...\033[m\n" + @$(IRPF90) $(IRPF90FLAGS) + +clean: + rm -f -- $(BUILD_FILE) $(EXE) + +veryclean: clean + rm -rf IRPF90_temp/ IRPF90_man/ irpf90_entities dist tags + From 8238090f27f9830751f6c710fa29034a60c64392 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 17 Mar 2017 15:50:07 -0500 Subject: [PATCH 27/31] Fancy general Makefile --- example/Makefile | 14 ++++++++++---- example/uvwt.irp.f | 15 +++++++++------ src/templates/general.make | 12 +++++++++--- 3 files changed, 28 insertions(+), 13 deletions(-) diff --git a/example/Makefile b/example/Makefile index 0f864f7..a274703 100644 --- a/example/Makefile +++ b/example/Makefile @@ -1,5 +1,5 @@ IRPF90= ../bin/irpf90 -IRPF90FLAGS= +IRPF90FLAGS= -I input/ # Make | Ninja BUILD_SYSTEM= make @@ -53,17 +53,23 @@ endif # Actual Rule EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1) +ARCH = $(addprefix $(CURDIR)/,IRPF90_temp/irpf90.a) .PHONY: clean all all: $(EXE) -$(EXE): $(BUILD_FILE) +$(EXE): $(ARCH) @printf "%b" "\033[0;32m Build $@...\033[m\n" - @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(addprefix $(CURDIR)/, $@) && touch $@ + @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $(BUILD_FILE)) $(addprefix $(CURDIR)/, $@) && touch $@ -.NOTPARALLEL: $(BUILD_FILE) +.NOTPARALLEL: $(BUILD_FILE) $(ARCH) + +$(ARCH): $(BUILD_FILE) + @printf "%b" "\033[0;32m Creating the archive...\033[m\n" + @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(ARCH) + $(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print) @printf "%b" "\033[0;32m Running the IRPF90-compiler...\033[m\n" @$(IRPF90) $(IRPF90FLAGS) diff --git a/example/uvwt.irp.f b/example/uvwt.irp.f index 082f229..859cd09 100644 --- a/example/uvwt.irp.f +++ b/example/uvwt.irp.f @@ -1,28 +1,31 @@ BEGIN_PROVIDER [integer, t ] + IMPLICIT NONE t = u1+v+4 END_PROVIDER BEGIN_PROVIDER [integer,w] + IMPLICIT NONE w = d5+3 END_PROVIDER BEGIN_PROVIDER [ integer, v ] - implicit none + IMPLICIT NONE v = u2+w+2 END_PROVIDER BEGIN_PROVIDER [ integer, u1 ] + IMPLICIT NONE integer :: fu u1 = fu(d1,d2) END_PROVIDER BEGIN_PROVIDER [ integer, u2 ] + IMPLICIT NONE integer :: fu u2 = fu(d3,d4) - !df END_PROVIDER -integer function fu(x,y) - integer :: x,y - fu = x+y+1 -end function +INTEGER function fu(x,y) + INTEGER, INTENT(in) :: x,y + fu = int(x+y+1+3) +END FUNCTION diff --git a/src/templates/general.make b/src/templates/general.make index 183adc1..0659820 100644 --- a/src/templates/general.make +++ b/src/templates/general.make @@ -53,17 +53,23 @@ endif # Actual Rule EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1) +ARCH = $(addprefix $(CURDIR)/,IRPF90_temp/irpf90.a) .PHONY: clean all all: $(EXE) -$(EXE): $(BUILD_FILE) +$(EXE): $(ARCH) @printf "%b" "\033[0;32m Build $@...\033[m\n" - @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(addprefix $(CURDIR)/, $@) && touch $@ + @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $(BUILD_FILE)) $(addprefix $(CURDIR)/, $@) && touch $@ -.NOTPARALLEL: $(BUILD_FILE) +.NOTPARALLEL: $(BUILD_FILE) $(ARCH) + +$(ARCH): $(BUILD_FILE) + @printf "%b" "\033[0;32m Creating the archive...\033[m\n" + @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(ARCH) + $(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print) @printf "%b" "\033[0;32m Running the IRPF90-compiler...\033[m\n" @$(IRPF90) $(IRPF90FLAGS) From 926c72a88b753c028073ff86d968078984682167 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 17 Mar 2017 16:06:29 -0500 Subject: [PATCH 28/31] Fancy general Makefile --- example/Makefile | 8 ++++++-- src/templates/general.make | 10 ++++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/example/Makefile b/example/Makefile index a274703..6653b88 100644 --- a/example/Makefile +++ b/example/Makefile @@ -59,16 +59,20 @@ ARCH = $(addprefix $(CURDIR)/,IRPF90_temp/irpf90.a) all: $(EXE) +define run + $(BUILD_SYSTEM) -C $(dir $(BUILD_FILE) ) -f $(notdir $(BUILD_FILE) ) $(1) +endef + $(EXE): $(ARCH) @printf "%b" "\033[0;32m Build $@...\033[m\n" - @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $(BUILD_FILE)) $(addprefix $(CURDIR)/, $@) && touch $@ + @$(call run, $(addprefix $(CURDIR)/, $@)) && touch $@ .NOTPARALLEL: $(BUILD_FILE) $(ARCH) $(ARCH): $(BUILD_FILE) @printf "%b" "\033[0;32m Creating the archive...\033[m\n" - @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(ARCH) + @$(call run, $@) && touch $@ $(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print) @printf "%b" "\033[0;32m Running the IRPF90-compiler...\033[m\n" diff --git a/src/templates/general.make b/src/templates/general.make index 0659820..1822c84 100644 --- a/src/templates/general.make +++ b/src/templates/general.make @@ -59,16 +59,22 @@ ARCH = $(addprefix $(CURDIR)/,IRPF90_temp/irpf90.a) all: $(EXE) +define run + $(BUILD_SYSTEM) -C $(dir $(BUILD_FILE) ) -f $(notdir $(BUILD_FILE) ) $(1)", +endef + +#We allow for the user to ask for 'relative' path +#But the IRPF90 build system use absolute path. $(EXE): $(ARCH) @printf "%b" "\033[0;32m Build $@...\033[m\n" - @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $(BUILD_FILE)) $(addprefix $(CURDIR)/, $@) && touch $@ + @$(call run, $(addprefix $(CURDIR)/, $@)) && touch $@ .NOTPARALLEL: $(BUILD_FILE) $(ARCH) $(ARCH): $(BUILD_FILE) @printf "%b" "\033[0;32m Creating the archive...\033[m\n" - @$(BUILD_SYSTEM) -C $(dir $< ) -f $(notdir $< ) $(ARCH) + @$(call run, $@) && touch $@ $(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print) @printf "%b" "\033[0;32m Running the IRPF90-compiler...\033[m\n" From 5da213f29c4bb5346c1a179a7283b2602482d4ee Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 17 Mar 2017 16:07:23 -0500 Subject: [PATCH 29/31] Fancy general Makefile --- example/Makefile | 2 ++ src/templates/general.make | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/example/Makefile b/example/Makefile index 6653b88..2e54ecb 100644 --- a/example/Makefile +++ b/example/Makefile @@ -63,6 +63,8 @@ define run $(BUILD_SYSTEM) -C $(dir $(BUILD_FILE) ) -f $(notdir $(BUILD_FILE) ) $(1) endef +#We allow for the user to ask for 'relative' path +#But the IRPF90 build system use absolute path. $(EXE): $(ARCH) @printf "%b" "\033[0;32m Build $@...\033[m\n" @$(call run, $(addprefix $(CURDIR)/, $@)) && touch $@ diff --git a/src/templates/general.make b/src/templates/general.make index 1822c84..8e1ebb0 100644 --- a/src/templates/general.make +++ b/src/templates/general.make @@ -60,7 +60,7 @@ ARCH = $(addprefix $(CURDIR)/,IRPF90_temp/irpf90.a) all: $(EXE) define run - $(BUILD_SYSTEM) -C $(dir $(BUILD_FILE) ) -f $(notdir $(BUILD_FILE) ) $(1)", + $(BUILD_SYSTEM) -C $(dir $(BUILD_FILE) ) -f $(notdir $(BUILD_FILE) ) $(1) endef #We allow for the user to ask for 'relative' path From 9c233e0362f197242829ded3b29148e8df981c16 Mon Sep 17 00:00:00 2001 From: Thomas Applencourt Date: Fri, 17 Mar 2017 16:21:00 -0500 Subject: [PATCH 30/31] Fix provider... --- example/Makefile | 2 +- src/templates/provider.f90 | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/example/Makefile b/example/Makefile index 2e54ecb..860b446 100644 --- a/example/Makefile +++ b/example/Makefile @@ -1,4 +1,4 @@ -IRPF90= ../bin/irpf90 +IRPF90= ../bin/irpf90 -d IRPF90FLAGS= -I input/ # Make | Ninja BUILD_SYSTEM= make diff --git a/src/templates/provider.f90 b/src/templates/provider.f90 index ecac948..ea593db 100644 --- a/src/templates/provider.f90 +++ b/src/templates/provider.f90 @@ -34,7 +34,8 @@ SUBROUTINE provide_{name} {/l_entity} CALL bld_{name} - + {name}_is_built = .TRUE. + {?do_openmp} ENDIF CALL irp_lock_{name}(.FALSE.) From 057238ede88f74df7f52a218ef275478dc462169 Mon Sep 17 00:00:00 2001 From: Anthony Scemama Date: Tue, 11 Jul 2017 21:05:56 +0200 Subject: [PATCH 31/31] Codelet OK --- example/Makefile | 26 ++++++++++++++++---------- example/irp_example2.irp.f | 2 +- example/uvwt.irp.f | 2 +- src/build_file.py | 8 ++++++-- src/irpy_files.py | 4 ++-- src/lib/__init__.pyc | Bin 165 -> 141 bytes src/lib/manager.pyc | Bin 311 -> 287 bytes src/lib/static_ashes.pyc | Bin 99433 -> 93601 bytes src/templates/general.make | 18 ++++++++++++------ src/templates/ioer.f90 | 4 ++-- 10 files changed, 40 insertions(+), 24 deletions(-) diff --git a/example/Makefile b/example/Makefile index 860b446..08bfe3a 100644 --- a/example/Makefile +++ b/example/Makefile @@ -1,7 +1,12 @@ -IRPF90= ../bin/irpf90 -d -IRPF90FLAGS= -I input/ -# Make | Ninja -BUILD_SYSTEM= make +# make | ninja +############## + +BUILD_SYSTEM= ninja + +####### + +IRPF90= ../bin/irpf90 +IRPF90FLAGS= -I input --codelet=v:100 .EXPORT_ALL_VARIABLES: @@ -10,8 +15,8 @@ SRC = OBJ = # Compiler ! Will be overwriten by the ENV one if avalaible. -FC ?= gfortran-6 -FCFLAGS ?= -O2 +FC =ifort +FCFLAGS ?= -O2 CC ?= gcc CFLAGS ?= -O2 @@ -33,17 +38,18 @@ CXXFLAGS ?= -O2 # | | I _________ # | | I c(` ')o # | l I \. ,/ -# _/j L l\_! _// +# _/j L l\_! _// \\_ #Misc AR ?= RANLIB ?= # Variable need by IRPF90 -ifeq ($(BUILD_SYSTEM),ninja) +BUILD_SYSTEM_stripped=$(strip $(BUILD_SYSTEM)) +ifeq ($(BUILD_SYSTEM_stripped),ninja) BUILD_FILE=IRPF90_temp/build.ninja IRPF90FLAGS += -j -else ifeq ($(BUILD_SYSTEM),make) +else ifeq ($(BUILD_SYSTEM_stripped),make) BUILD_FILE=IRPF90_temp/build.make BUILD_SYSTEM += -j else @@ -60,7 +66,7 @@ ARCH = $(addprefix $(CURDIR)/,IRPF90_temp/irpf90.a) all: $(EXE) define run - $(BUILD_SYSTEM) -C $(dir $(BUILD_FILE) ) -f $(notdir $(BUILD_FILE) ) $(1) + $(BUILD_SYSTEM_stripped) -C $(dir $(BUILD_FILE) ) -f $(notdir $(BUILD_FILE) ) $(1) endef #We allow for the user to ask for 'relative' path diff --git a/example/irp_example2.irp.f b/example/irp_example2.irp.f index 2d0d23b..a5299b6 100644 --- a/example/irp_example2.irp.f +++ b/example/irp_example2.irp.f @@ -1,6 +1,6 @@ program irp_example2 print *, "Example 2" - print *, 't = ', t + print *, 'v = ', v IRP_WRITE t diff --git a/example/uvwt.irp.f b/example/uvwt.irp.f index 859cd09..365a577 100644 --- a/example/uvwt.irp.f +++ b/example/uvwt.irp.f @@ -27,5 +27,5 @@ END_PROVIDER INTEGER function fu(x,y) INTEGER, INTENT(in) :: x,y - fu = int(x+y+1+3) + fu = x+y+1 END FUNCTION diff --git a/src/build_file.py b/src/build_file.py index b62a63e..bc2e446 100644 --- a/src/build_file.py +++ b/src/build_file.py @@ -413,9 +413,13 @@ def run(d_module, ninja): l_irp_sup_o += ["irp_locks.irp.o"] l_irp_sup_s += ["irp_locks.irp.F90"] + if command_line.do_profile or command_line.do_codelet: + l_irp_sup_o += ["irp_rdtsc.o"] + l_irp_sup_s += ["irp_rdtsc.c"] + if command_line.do_profile: - l_irp_sup_o += ["irp_profile.irp.o", "irp_rdtsc.o"] - l_irp_sup_s += ["irp_profile.irp.F90", "irp_rdtsc.c"] + l_irp_sup_o += ["irp_profile.irp.o"] + l_irp_sup_s += ["irp_profile.irp.F90"] l_irp_sup_o = map(dress, l_irp_sup_o) l_irp_sup_s = map(dress, l_irp_sup_s) diff --git a/src/irpy_files.py b/src/irpy_files.py index cf21007..b438d31 100644 --- a/src/irpy_files.py +++ b/src/irpy_files.py @@ -294,7 +294,7 @@ class Irpy_comm_world(object): from util import lazy_write_file from util import ashes_env - str_ = ashes_env.render('irp_stack.F90', { + str_ = ashes_env.render('irp_stack.f90', { 'do_debug': command_line.do_debug, 'do_openmp': command_line.do_openmp, 'do_memory': command_line.do_memory @@ -318,6 +318,6 @@ class Irpy_comm_world(object): from util import lazy_write_file from util import ashes_env - str_ = ashes_env.render('irp_lock.F90', {'entity': sorted(self.d_entity)}) + str_ = ashes_env.render('irp_lock.f90', {'entity': sorted(self.d_entity)}) filename = os.path.join(irpf90_t.irpdir, 'irp_locks.irp.F90') lazy_write_file(filename, str_) diff --git a/src/lib/__init__.pyc b/src/lib/__init__.pyc index bf8d3fde2d852329102bd7b9fe121b15009c39e6..b58c681ebfa8de0f73ff52826fd93fd1c7162296 100644 GIT binary patch delta 44 ycmZ3=*vrVl{F#?)ws30XL=IC4GX@3*{fzwFRQ=-Q)ZE0}MEwvTFR^%Ht_1)SUk!l( delta 68 zcmeBWT*}D7{F#^QrpvO3i5#W|ehdr@`k}?CMaB9h8Tq-1#fb$4IjMQc`K3iA`r-LS X+4=!R`B|ySCHe)1W_kw36GJTk0}2-B diff --git a/src/lib/manager.pyc b/src/lib/manager.pyc index fd188400523ad59038fbc62563152f2338fa08e3..1d835e9c91b7d1680cd7ed4f57b7b63aba98546b 100644 GIT binary patch delta 44 ycmdnaG@prs`7I%R^h1kNi;DG2GV*g1ixUe9a#Hh>^Gl0L^uzOu Yvh@Rs^0QKtOY{p2&GZb6C*E=e01M_9qW}N^ diff --git a/src/lib/static_ashes.pyc b/src/lib/static_ashes.pyc index 14f6f7567af1e3e906a3389aaf95ddad04bb065d..6eabe599133798422b085653b19f05b36c7db06f 100644 GIT binary patch delta 8294 zcmai(X;f8J6vy2PDw!2Iq?U@~GcQ6!)CTcFlp;0^gvFzLazmG0{@4v^h&py`+Z#G_kDzIsz zy5{%1-y;3FolO@!*e0?;g9hO*yqKLHp8I@ywkO*YKFa*dldHT5E_M3BV;E@C@^cZe zy@?d2IXC+`*4efGQkqN+b7BZON{T3Pcji*ZmEcnVj19ILk0yXcc-r*q!T+2Xe1Nt zNp-h$;K^M>Yh49c=CvMkfehy)rDq0|P_3;z8Sq*lQZ8$C=wA8PZ2ym5Nix6+Yr{as zIaNjB?LdsCXgbKf)K6zz@AEqJLLcsH0VC{2a=$i^=&nlp=OZHw+^0$+QV`_QqX*P8qoCtB$P! zo^Yf6K5V)y!v)+O_W+P!S3}~bK?(Is{2mP494=2e?M*-qP|`fCU71uN1ML3o5esF& zq)4Ako&v@ar+VGkAhmkT$FfX4G)e|I)6b@C2AOsjSUO=`4j?#~hbL^2jd%(z)p6n& z8DKXPI|r&G6O%Dy&VgDmX*P!9Cm)7@&P|N~;jl9er^o}S{to_m$|TtcXZ5}3H8!F* zreE?y{F>3w4>2$^)(>$yYZ>SwuHC_DS8yozPglSQ2m9xYA%2LW-2RwQoo6nP-7rPb zw`QW@wSG2;f@^wyj#t(@e2#9pp5VKS(#twUrPVF=p1M^PhqI)`qp;Y7z5#p z;eK47mjzv6F@DYoXc09$F zN?84oEW^OaHD^RXdx}Q_eU78qI9!LwomR1A?!?xArbZ!Mb$TzV5&cK1}CJb7TpVZoSK!0leUBbi=b z|+-AB@7fZe}z^h_@WZO0rcf6!qrwokB`S*-a5V(1D8*L zRB|G={}7KAl23wu;#P?`wOLl+zK{Cx5_r3Ex$>*`$`U4*+O9c)Q>DK;w;vecNUMK2 z1&yfS^Xp}R6@q@dDgt(JG+Ao&h0`MO|7ucDJ5h!>+UScVAQm2IgMN>N!`Y>4u0;Y3 ze%H;s(Gn1B<>y;y?PE{F{u&B2_!J!aCkqF=Dd3=`WL(Q-`K0fdK>T@^^VKziNr76Y81V?+L zDHY2^ba@ZwVi5UzLzsQV$!tcYxI)gW%_$39er5q%%o zW#p(uKlxF)H;lqCVlNA)vvN#)RLUNstunz;SGZ|4CieHH?`6XDw_VqV`p5)ZnctW2 zPT}-?9EQtgbUy<3F`N0MKl#~?%=5vS5kcr%J`0^2fO>(6t&s%2D6_j~q9`0@#YuaD zD!{2`k-~wL2Sx1Er)i-`IOgvDP3|JwJ&7!P)L=R(D)>73^&y15rE)$N#gL6fa$;$W z?22z#bR0$r7_kq;(K%?xE*)hCA;nW9P%<7n%ZF1xjMyC$K^yP~Q+%g7hm+_dKLxHD zN#H92XV%0M2P1YoqbV9(K|F^4OrhZ*SVq>2qbPuwZgl!gpmG^;OgEuR)pu+r({LP2 zVk*4q_)r{}LN`Q_`kro?MuSDf*VEh52rchC@6CJCzL7z7P{fTMe_Fu ypEplUr4@mqitkpoW)OZ7Ih}H-4E8~&6E}-SHi34W>+@+7Si#M{>D1}TVdDq@ delta 14164 zcmbW7`FD+H5XTKSu{D+;wi1fmaKnwIA&H|>OSwc55lfxa-q^BIv`0^DRjS2<*H%lN z#?m@X`jnR-axJygo+?Epv6b2zThZz}?>+kaeE)#Y`OeHUGtV>gzUwad-JIiJu1rZl zg~_{@4ag}sEik-qsZyof@#!Pe(%l&!B#$4OJ~Sm|+{g*TlP9KSxD%4o#=3i_C65_7 zJj0#RGNMIzYnAG8ssn)q7@C8s-F~-maQ&zy~Z6e=#vbp87C%HX3gVr zh+~@H*P#~Giju>bHJz$r?Uiycrlm{WtnHSAmZDCmQ?0a|WXUbBzbgk>GP>S$l9R<= z*P`Ina*md}RB`Y@IcOcG{-Z%;89B*LCN>@kILXGA4VjNg7KBU%B>5;8gpP(JRYcQ| zFz|lUwHSD!S)LqZA5Xe^%R#Qm#4sT1I0f1_uLGQimFIeSiv>_oWsk;QqVBhuju;!FvfmtqfKDABkq+7F)MeW>1j*5njevvXYoy9mhdvhlBS2d{MDOnq z3R>*yh%^x^Y1NIGbI`%4mv86D!B;UV=~M)G$_*i?Yl>W!lm0;0Du5)1s8+WLkfb`$ zEf)jxy2~FFI@NWaZ)SfaH=93xU$+ zgire59u`_7Ndd<4p(Ks%43f9UwZ%v^Y{E3Tn^w~5;AG@=kdp#Lm20Fh)hoBt3aTnh z1MYTK{33mdT#-R;Kb{PEN!v}2;lPQ&n8EX6$wXj@!$I=<@G<18j+)#ENW523T07+o zBx^s3w@)p>rS;v9FUuQ)ujy9Hln+-v(tkR*N#d-iIYYi^`#+_s&0H#u^m^}1d4PS6 zI%b2nZ|>|Yv7NS)Yd#$-27R{1&H<8;i)QF&55{87FKghApGP2)Loye-l+DfR)yXJ}g zv`d?xMcKOxF_?Sx>AXKN`1v02%gn)iWC$+y=F9E$JvBOG;P8D{|@eTzzEA=$U$H;#o*3^*osmgJ49G@semJcvFL0#vIYZZ{Vab`+g0g4+7naE zJqq>#F6BALHi}h!%`*M?ZQ$Dq=la*D^2D0<#kuD6IV=zKwqN!F#&Ug`XD>j;s#E9I z$w5}k`R#!mwCc-9X-*Dx?fma@nq~P`@Vhuf4qMWC-Nh_0zWltHTlfz2d~@n5SEB%3 z-s#W3UL7EhYuBs=0ZO6d7C5td{&qMIGzxHSlra0~K91QI8WElqX)}_JIt{D8y)75gY*c+;6 z{EaL~`NRFqXZiB9)!rB22al~wC`+zqU-(w%GpCiNTXNjAegv!obQ13imwCybkd9#0 zwRx``?U5VHz8V%<#r&rNos#oLOmzw~0|RKQoMmm(zcTHBP?wP(NEOW2s!}S(n--rW zN7>`8^16BT1TZ3N&=L#}2qO9HY$tP3mY=yVh|0^o<(HjVu@(UvBOA@FO;05 zh8o@LQ85%_USk{M>(jRoZ6pNKDvWH^fPR&e)_dLP(U4lpNlwSz4GEhS&Ax9yN5kgs zYee#yhEvJd+?c>A+**2?eVP#RyP_=|g47+8gPjEYG{#|?6iROBRL!fQbON}2SoE=r zK7vGz>@doNNYfgNyz(kHs`et6k>Ms_@opWAIX|3`-;MZ8Gg^^>x$0?6{pGIOw`5@( z>V=`kxCpuo*=yv#MGkXeTZ#gdImi_vDFQ={PojX5vnLdzO*CO^%x&#yGgvrxIuP(( zkF&{&u?IRGvwKI10ruhi<~rPoVu6EUG`!DbP zG&gvlXFNAjM=U)Oi?XNK|FdRZFGALR)`9Y3$+**pO2p>ccXDtXoyACAhZ&LmNd7(1 zeplY@M@#+W%ItArs-JOa5Ur5Y+|IngtHr>*