Removed tabs

This commit is contained in:
Anthony Scemama 2017-03-17 09:44:29 +01:00
parent dd2afebb0d
commit ebd4c780fa
14 changed files with 511 additions and 566 deletions

View File

@ -1,21 +1,7 @@
PYTHON=python
PYVERSION=$(shell $(PYTHON) -c "import sys; print(sys.version[:3])")
HAS_CYTHON=$(shell bash -c "which cython &> /dev/null && echo 1 || echo 0")
ifneq ($(HAS_CYTHON),0)
../bin/irpf90: irpf90.so
rm ../bin/irpf90 ; cd ../bin ; ln -s ../src/irpf90_python.exe irpf90
irpf90.so : $(wildcard *.py) irpf90.c
./cython_setup.py build_ext --inplace
irpf90.c: irpf90.py
cython --embed irpf90.py
else
../bin/irpf90: irpf90_python.exe
rm ../bin/irpf90 ; cd ../bin ; ln -s ../src/irpf90_python.exe irpf90
endif
clean:
rm -f *.c *.so *.pyc *.pyo 2>/dev/null

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python
# IRPF90 is a Fortran90 preprocessor written in Python for programming using
# the Implicit Reference to Parameters (IRP) method.
# Copyright (C) 2009 Anthony SCEMAMA
# Copyright (C) 2009 Anthony SCEMAMA
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -20,8 +20,8 @@
# Anthony Scemama
# LCPQ - IRSAMC - CNRS
# Universite Paul Sabatier
# 118, route de Narbonne
# 31062 Toulouse Cedex 4
# 118, route de Narbonne
# 31062 Toulouse Cedex 4
# scemama@irsamc.ups-tlse.fr
import os, sys
@ -41,7 +41,7 @@ def dress(f, in_root=False):
""" Transfoms the filename into $PWD/IRPF90_temp/f
Note:
In root=True resurn $PWD/f
In root=True resurn $PWD/f
"""
pwd = os.getcwd()
@ -66,14 +66,14 @@ def create_build_touches(l_irp_m, ninja):
result_ninja = '\n'.join([
"build {target_o}: compile_fortran_{irp_id} {target_F90} | {list_of_modules_irp}",
" short_in = {short_target_F90}",
" short_out = {short_target_o}",
""
" short_out = {short_target_o}",
""
])
result_make = '\n'.join([
"{target_o}: {target_F90} | {list_of_modules_irp}",
'\t@printf "F: {short_target_F90} -> {short_target_o}\\n"',
"\t@$(FC) $(FCFLAGS) -c $^ -o $@", ""
'\t@printf "F: {short_target_F90} -> {short_target_o}\\n"',
"\t@$(FC) $(FCFLAGS) -c $^ -o $@", ""
])
result = result_ninja if ninja else result_make
@ -97,14 +97,14 @@ def create_build_archive(l_irp_o, l_usr_o_wo_main, l_ext_o, l_irp_sup_o, ninja=T
list_of_object = ' '.join(l_irp_o + l_usr_o_wo_main + l_ext_o + l_irp_sup_o)
result_ninja = '\n'.join([
"build {lib}: archive_{irp_id} {list_of_object}",
" short_out = {short_lib}",
""])
"build {lib}: archive_{irp_id} {list_of_object}",
" short_out = {short_lib}",
""])
result_make = '\n'.join([
"{lib}: {list_of_object}",
'\t@printf "Archive: {short_lib}\\n"',
"\t@$(AR) cr $@ $^", ""])
"{lib}: {list_of_object}",
'\t@printf "Archive: {short_lib}\\n"',
"\t@$(AR) cr $@ $^", ""])
result = result_ninja if ninja else result_make
return result.format(**locals())
@ -114,7 +114,7 @@ def create_build_link(t, l_irp_m, l_usr_m, l_ext_m, ninja=True):
# (Module, List[str], List[str]) -> str
""" Create the build command who will link the .o file into the target executable
To link we need the .o file corresponding to the target, and the ar lib.
To link we need the .o file corresponding to the target, and the ar lib.
"""
irp_id = irpf90_t.irp_id
@ -124,8 +124,8 @@ def create_build_link(t, l_irp_m, l_usr_m, l_ext_m, ninja=True):
basename = os.path.basename(filename)
if basename != progname:
from util import logger
logger.info('program-name `{0}` != file-name `{1}` (using file-name for now...)'.format(progname,basename))
from util import logger
logger.info('program-name `{0}` != file-name `{1}` (using file-name for now...)'.format(progname,basename))
target = dress(filename, in_root=True)
short_target = filename
@ -139,12 +139,12 @@ def create_build_link(t, l_irp_m, l_usr_m, l_ext_m, ninja=True):
result_ninja = '\n'.join([
"build {target}: link_{irp_id} {target_o} {irp_lib} | {list_of_module}",
" short_out = {short_target}",
""])
""])
result_make = '\n'.join([
"{target}:{target_o} {irp_lib} | {list_of_module}",
'\t@printf "Link: {short_target}\\n"',
"\t@$(FC) $^ $(LIB) -o $@",
"{target}:{target_o} {irp_lib} | {list_of_module}",
'\t@printf "Link: {short_target}\\n"',
"\t@$(FC) $^ $(LIB) -o $@",
""])
result = result_ninja if ninja else result_make
@ -158,8 +158,8 @@ def create_build_compile(t, l_module, l_ext_modfile=[], ninja=True):
- The module can produce a .MOD file
- The module can Need another .MOD file.
This .MOD file can be produced by:
1) a file generated by IRP_F90 preprocessor.
This .MOD file can be produced by:
1) a file generated by IRP_F90 preprocessor.
2) a file defined by the user but a .irp.f90 file.
3) a file not handle at all by IRPF90.
@ -186,10 +186,10 @@ def create_build_compile(t, l_module, l_ext_modfile=[], ninja=True):
# Expensive and stupid. We can create a dict to do the loockup only once
for m in t.needed_modules_usr:
# m is name
for x in l_module:
if m in x.gen_mod and x.filename != t.filename:
needed_modules.append("%s.irp.o" % x.filename)
# m is name
for x in l_module:
if m in x.gen_mod and x.filename != t.filename:
needed_modules.append("%s.irp.o" % x.filename)
from util import uniquify
needed_modules = uniquify(needed_modules)
@ -211,23 +211,23 @@ def create_build_compile(t, l_module, l_ext_modfile=[], ninja=True):
inline_include = True
if not inline_include:
#Wrong name, this not work!
#list_of_includes = ' '.join(map(lambda x: dress(x, in_root=True), t.includes))
raise NotImplemented
#Wrong name, this not work!
#list_of_includes = ' '.join(map(lambda x: dress(x, in_root=True), t.includes))
raise NotImplemented
else:
#The include have already by included
list_of_includes = ' '
#The include have already by included
list_of_includes = ' '
l_build = [
"build {target_o}: compile_fortran_{irp_id} {target_F90} | {list_of_includes} {list_of_modules} {list_of_modules_irp}",
" short_in = {short_target_F90}",
" short_out = {short_target}",
""
" short_out = {short_target}",
""
]
l_build_make = [
"{target_o}: {target_F90} | {list_of_includes} {list_of_modules} {list_of_modules_irp}",
'\t@printf "F: {short_target_F90} -> {short_target}\\n"',
'\t@printf "F: {short_target_F90} -> {short_target}\\n"',
"\t@$(FC) $(FCFLAGS) -c $^ -o $@", ""
]
@ -236,14 +236,14 @@ def create_build_compile(t, l_module, l_ext_modfile=[], ninja=True):
l_build += [
"build {target_module_o}: compile_fortran_{irp_id} {target_module_F90} | {list_of_includes} {list_of_modules} ",
" short_in = {short_target_module_F90}",
" short_out = {short_target_module_o}",
""
" short_out = {short_target_module_o}",
""
]
l_build_make += [
"{target_module_o}: {target_module_F90} | {list_of_includes} {list_of_modules}",
'\t@printf "F: {short_target_module_F90} -> {short_target_module_o}\\n"',
"\t@$(FC) $(FCFLAGS) -c $^ -o $@", ""
'\t@printf "F: {short_target_module_F90} -> {short_target_module_o}\\n"',
"\t@$(FC) $(FCFLAGS) -c $^ -o $@", ""
]
l_cur = l_build if ninja else l_build_make
@ -285,95 +285,95 @@ def create_makefile(d_flags,d_var,irpf90_flags,ninja=True):
result = ["IRPF90= irpf90",
"IRPF90FLAGS= %s" % irpf90_flags,
"BUILD_SYSTEM= %s" % ('ninja' if ninja else 'make'),
""]
""]
# Export all the env variable used by irpf90
result += ['.EXPORT_ALL_VARIABLES:',
'',
'\n'.join("{0} = {1}".format(k, v) for k, v in sorted(d_flags.iteritems())),
'',
'\n'.join("{0} = {1}".format(k, ' '.join(v)) for k, v in sorted(d_var.iteritems())),
'']
'',
'\n'.join("{0} = {1}".format(k, v) for k, v in sorted(d_flags.iteritems())),
'',
'\n'.join("{0} = {1}".format(k, ' '.join(v)) for k, v in sorted(d_var.iteritems())),
'']
result += [ r'# Dark magic below modify with caution!',
r'# "You are Not Expected to Understand This"',
r'# "You are Not Expected to Understand This"',
r"# .",
r"# /^\ .",
r'# /\ "V",',
r"# /__\ I O o",
r"# //..\\ I .",
r"# \].`[/ I",
r"# /l\/j\ (] . O",
r"# /. ~~ ,\/I .",
r"# \\L__j^\/I o",
r"# \/--v} I o .",
r"# | | I _________",
r"# | | I c(` ')o",
r"# | l I \. ,/",
r"# _/j L l\_! _//^---^\\_",
r""]
r"# /^\ .",
r'# /\ "V",',
r"# /__\ I O o",
r"# //..\\ I .",
r"# \].`[/ I",
r"# /l\/j\ (] . O",
r"# /. ~~ ,\/I .",
r"# \\L__j^\/I o",
r"# \/--v} I o .",
r"# | | I _________",
r"# | | I c(` ')o",
r"# | l I \. ,/",
r"# _/j L l\_! _//^---^\\_",
r""]
result += ["",
"ifeq ($(BUILD_SYSTEM),ninja)",
"ifeq ($(BUILD_SYSTEM),ninja)",
"\tBUILD_FILE=IRPF90_temp/build.ninja",
"\tIRPF90FLAGS += -j",
"else ifeq ($(BUILD_SYSTEM),make)",
"\tBUILD_FILE=IRPF90_temp/build.make",
"\tBUILD_SYSTEM += -j",
"else",
"DUMMY:",
"\tBUILD_SYSTEM += -j",
"else",
"DUMMY:",
"\t$(error 'Wrong BUILD_SYSTEM: $(BUILD_SYSTEM)')",
"endif"]
"endif"]
result += ["",
"define run_and_touch",
" $(BUILD_SYSTEM) -C $(dir $(1) ) -f $(notdir $(1) ) $(addprefix $(CURDIR)/, $(2)) && touch $(2)",
"endef",
"",
" $(BUILD_SYSTEM) -C $(dir $(1) ) -f $(notdir $(1) ) $(addprefix $(CURDIR)/, $(2)) && touch $(2)",
"endef",
"",
"EXE := $(shell egrep -ri '^\s*program' *.irp.f | cut -d'.' -f1)",
"",
".PHONY: all",
"",
"",
"all: $(BUILD_FILE)",
"\t$(call run_and_touch, $<, $(EXE))",
"",
".NOTPARALLEL: $(EXE)",
".NOTPARALLEL: $(EXE)",
"$(EXE): $(BUILD_FILE)",
"\t$(call run_and_touch, $<, $(EXE))",
"$(BUILD_FILE): $(shell find . -maxdepth 2 -path ./IRPF90_temp -prune -o -name '*.irp.f' -print)",
"\t$(IRPF90) $(IRPF90FLAGS)",
"",
"clean:",
'\trm -f -- $(BUILD_FILE) $(EXE)'
'\t$(shell find IRPF90_temp -type f \\( -name "*.o" -o -name "*.mod" -name "*.a" \\) -delete;)',
"veryclean: clean",
"\trm -rf IRPF90_temp/ IRPF90_man/ irpf90_entities dist tags"]
"",
"clean:",
'\trm -f -- $(BUILD_FILE) $(EXE)'
'\t$(shell find IRPF90_temp -type f \\( -name "*.o" -o -name "*.mod" -name "*.a" \\) -delete;)',
"veryclean: clean",
"\trm -rf IRPF90_temp/ IRPF90_man/ irpf90_entities dist tags"]
import util
data = '%s\n' % '\n'.join(result)
util.lazy_write_file('Makefile',data,conservative=True)
def create_make_all_clean(l_main):
#
'''Create the ALL and CLEAN target of Makefile
#
'''Create the ALL and CLEAN target of Makefile
Note: Portability doesn't mater. -delete is maybe not posix
but -exec rm {} + is far more ugly!
'''
Note: Portability doesn't mater. -delete is maybe not posix
but -exec rm {} + is far more ugly!
'''
l_executable =' '.join(dress( t.filename, in_root=True) for t in l_main)
l_executable =' '.join(dress( t.filename, in_root=True) for t in l_main)
output = [".PHONY : all",
"all: {l_executable}",
"",
".PHONY: clean",
"clean:",
'\tfind . -type f \( -name "*.o" -o -name "*.mod" \) -delete; rm -f {l_executable} --'
""]
output = [".PHONY : all",
"all: {l_executable}",
"",
".PHONY: clean",
"clean:",
'\tfind . -type f \( -name "*.o" -o -name "*.mod" \) -delete; rm -f {l_executable} --'
""]
return [ '\n'.join(output).format(**locals())]
return [ '\n'.join(output).format(**locals())]
def create_var_and_rule(d_flags, ninja):
@ -385,7 +385,7 @@ def create_var_and_rule(d_flags, ninja):
# Rules
t = [
"rule compile_fortran_{irp_id}",
" command = $FC $FCFLAGS -c $in -o $out",
" command = $FC $FCFLAGS -c $in -o $out",
" description = F : $short_in -> $short_out",
"",
"rule compile_c_{irp_id}",
@ -399,7 +399,7 @@ def create_var_and_rule(d_flags, ninja):
"rule archive_{irp_id}",
" command = $AR cr $out $in",
" description = Archive: $short_out",
"",
"",
"rule link_{irp_id}",
" command = $FC $FCFLAGS $in $LIB -o $out",
" description = Link: $short_out",
@ -422,7 +422,7 @@ d_default = {
"CFLAGS": "-O2",
"CXX": "g++",
"CXXFLAGS": "-O2",
"LIB": ""}
"LIB": ""}
d_flags = dict()
for k, v in d_default.iteritems():
@ -436,15 +436,15 @@ for k in ['SRC', 'OBJ']:
def create_generalmakefile(ninja):
create_makefile(d_flags,d_var, include_dir,ninja)
create_makefile(d_flags,d_var, include_dir,ninja)
def run(d_module, ninja):
#(Dict[str,Module],bool) -> str
"""Wrote the ninja file needed to compile the program
Note:
- FC,AR,CC,CXX,LIB, FCFLAGS, CFLAGS, CXXFLAGS are compiler enviroment read
- SRC,OBJ: Are the not irp.f file defined by the user
- FC,AR,CC,CXX,LIB, FCFLAGS, CFLAGS, CXXFLAGS are compiler enviroment read
- SRC,OBJ: Are the not irp.f file defined by the user
"""
# Add required flags
@ -510,8 +510,8 @@ def run(d_module, ninja):
output = create_var_and_rule(d_flags, ninja)
if not ninja:
output += create_make_all_clean(l_mod_main)
output += create_make_all_clean(l_mod_main)
# Create all the .irp.F90 -> .o
for m in l_mod:
output.append(create_build_compile(m, l_mod, l_ext_m, ninja))

View File

@ -168,7 +168,7 @@ def run(d_entity, d_routine):
def worker(l):
filename, text = l
filename, text = l
lazy_write_file(filename, text)
parmap(worker, l_data_to_write)

View File

@ -1,41 +0,0 @@
#!/usr/bin/env python
# IRPF90 is a Fortran90 preprocessor written in Python for programming using
# the Implicit Reference to Parameters (IRP) method.
# Copyright (C) 2009 Anthony SCEMAMA
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Anthony Scemama
# LCPQ - IRSAMC - CNRS
# Universite Paul Sabatier
# 118, route de Narbonne
# 31062 Toulouse Cedex 4
# scemama@irsamc.ups-tlse.fr
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import os
to_remove = """__init__.py cython_setup.py version.py command_line.py""".split()
ext_modules = []
for f in os.listdir('.') if f.emswith(".py") and not f in to_remove:
module = f.split('.')[0]
ext_modules.append(Extension(module,list(f)))
setup(name = 'IRPF90 extensions',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules)

View File

@ -42,15 +42,15 @@ class Entity(object):
############################################################
def __init__(self, text, label, name=None, comm_world=None):
# (list[str], str, int, Irpy_comm_world)
'''Instantiate the object.
# (list[str], str, int, Irpy_comm_world)
'''Instantiate the object.
Args:
text: List of lines between BEGIN_PROVIDER and END_PROVIDER included
int: An unique int id (usefull when profilling)
name: The name of the provider defined after the chosen BEGIN_PROVIDER statement
comm_world: A object to communicate we the external world.
'''
Args:
text: List of lines between BEGIN_PROVIDER and END_PROVIDER included
int: An unique int id (usefull when profilling)
name: The name of the provider defined after the chosen BEGIN_PROVIDER statement
comm_world: A object to communicate we the external world.
'''
assert type(text) == list
assert len(text) > 0
@ -59,7 +59,7 @@ class Entity(object):
self.label = label
self.text = text
self.same_as = text[0].filename[1]
self.same_as = text[0].filename[1]
self.name = name if name else self.same_as
self.comm_world = comm_world
@ -70,27 +70,27 @@ class Entity(object):
# ~ # ~ # ~
@irpy.lazy_property
def d_entity(self):
# () -> Dict[str,Entity]
# () -> Dict[str,Entity]
'''Create an alias to the global dictionary of Entity.
Note: Be aware of the possiblity of Cyclic Dependency.
'''
Note: Be aware of the possiblity of Cyclic Dependency.
'''
return self.comm_world.d_entity
@irpy.lazy_property
def cm_t_filename_parsed_text(self):
# () -> Tuple[str, Parsed_text]
# () -> Tuple[str, Parsed_text]
'''Create an alias to the global tuple for parsed text
Note: self.comm_world.t_filename_parsed_text need d_entity.
Be aware of the possibility of Cyclic Dependency
'''
Note: self.comm_world.t_filename_parsed_text need d_entity.
Be aware of the possibility of Cyclic Dependency
'''
return self.comm_world.t_filename_parsed_text
@irpy.lazy_property
def d_type_lines(self):
# () -> Dict[Line, Tuple[int,Line] ]
'''Contruct a mapping table between the type of the line and the possition'''
# () -> Dict[Line, Tuple[int,Line] ]
'''Contruct a mapping table between the type of the line and the possition'''
from collections import defaultdict
d = defaultdict(list)
for i, line in enumerate(self.text):
@ -103,45 +103,45 @@ class Entity(object):
# ~ # ~ # ~
@irpy.lazy_property
def is_main(self):
# () -> bool
'''Check if this Entity is the main one
Exemple:
BEGIN_PROVIDER [pi, double precision] &
# () -> bool
'''Check if this Entity is the main one
Exemple:
BEGIN_PROVIDER [pi, double precision] &
BEGIN_PROVIDER [e, double preision]
return True for 'pi' and False for 'e'
'''
return True for 'pi' and False for 'e'
'''
return self.name == self.same_as
@irpy.lazy_property
def prototype(self):
# () -> Line
'''Find the declaration statement associated with the name of the provider
# () -> Line
'''Find the declaration statement associated with the name of the provider
Exemple:
BEGIN_PROVIDER [pi, double precision] &
BEGIN_PROVIDER [e, double preision]
Exemple:
BEGIN_PROVIDER [pi, double precision] &
BEGIN_PROVIDER [e, double preision]
if self.name == e, will return BEGIN_PROVIDER [e, double preision]
'''
if self.name == e, will return BEGIN_PROVIDER [e, double preision]
'''
d = self.d_type_lines
return next(line for _,line in d[Begin_provider]+d[Cont_provider] if line.filename[1] == self.name)
d = self.d_type_lines
return next(line for _,line in d[Begin_provider]+d[Cont_provider] if line.filename[1] == self.name)
@irpy.lazy_property
def others_entity_name(self):
# () -> List[str]
'''Extract the other entity-name defined'''
d = self.d_type_lines
return [line.filename[1] for _,line in d[Begin_provider]+d[Cont_provider] if not line.filename[1] == self.name]
# () -> List[str]
'''Extract the other entity-name defined'''
d = self.d_type_lines
return [line.filename[1] for _,line in d[Begin_provider]+d[Cont_provider] if not line.filename[1] == self.name]
@irpy.lazy_property
def doc(self):
# () -> List[str]
# () -> List[str]
doc = [line.text.lstrip()[1:] for _,line in self.d_type_lines[Doc]]
if not doc:
logger.warning("Entity '%s' is not documented" % (self.name))
@ -149,7 +149,7 @@ class Entity(object):
@irpy.lazy_property
def documented(self):
#() -> bool
#() -> bool
return bool(self.doc)
# ~ # ~ # ~
@ -158,8 +158,8 @@ class Entity(object):
@irpy.lazy_property_mutable
def is_written(self):
#() -> bool
'''Check if it will be written on disk'''
#() -> bool
'''Check if it will be written on disk'''
return any(self.d_entity[i].is_written for i in self.parents)
@irpy.lazy_property
@ -205,7 +205,7 @@ class Entity(object):
@irpy.lazy_property_mutable
def is_read(self):
'''Check if it will be read from disk'''
'''Check if it will be read from disk'''
return any(self.d_entity[i].is_read for i in self.parents)
@irpy.lazy_property
@ -258,7 +258,7 @@ class Entity(object):
if self.is_self_touched or any(self.d_entity[i].is_touched for i in self.children):
return True
return False
return False
# ~ # ~ # ~
# INCLUDE, USE, CALL
@ -266,8 +266,8 @@ class Entity(object):
@irpy.lazy_property
def includes(self):
# () -> str
'''Extract the name of include who need be to be include in this Entity'''
# () -> str
'''Extract the name of include who need be to be include in this Entity'''
return [line.filename for _,line in self.d_type_lines[Include]]
@irpy.lazy_property
@ -277,12 +277,12 @@ class Entity(object):
@irpy.lazy_property
def calls(self):
'''Extract the name ofthe function called by the entity'''
'''Extract the name ofthe function called by the entity'''
def extract_name(line):
return line.text.split('(', 1)[0].split()[1].lower()
def extract_name(line):
return line.text.split('(', 1)[0].split()[1].lower()
return [extract_name(line) for _,line in self.d_type_lines[Call] ]
return [extract_name(line) for _,line in self.d_type_lines[Call] ]
# ~ # ~ # ~
# Array Dimension
@ -290,19 +290,19 @@ class Entity(object):
@irpy.lazy_property
def dim(self):
# () -> List[str]
'''Extract the dimension of the needed array in a form of list of variable name
# () -> List[str]
'''Extract the dimension of the needed array in a form of list of variable name
Exemple:
BEGIN_PROVIDER [real, ao_num ]
-> []
Exemple:
BEGIN_PROVIDER [real, ao_num ]
-> []
BEGIN_PROVIDER [ real, ao_oneD_p, (ao_num) ]
-> ['ao_num']
BEGIN_PROVIDER [ real, ao_oneD_p, (ao_num) ]
-> ['ao_num']
BEGIN_PROVIDER [ real, ao_oneD_prim_p, (ao_num,ao_prim_num_max) ]
-> ['ao_num', 'ao_prim_num_max']
'''
BEGIN_PROVIDER [ real, ao_oneD_prim_p, (ao_num,ao_prim_num_max) ]
-> ['ao_num', 'ao_prim_num_max']
'''
line = self.prototype.text.split('!')[0]
buffer = line.replace(']', '').split(',', 2)
@ -331,24 +331,24 @@ class Entity(object):
@irpy.lazy_property
def type(self):
# () -> str
'''Compute the fortran type code of the entity'''
# () -> str
'''Compute the fortran type code of the entity'''
type_ = self.prototype.text.split(',')[0].split('[')[1].strip()
if not type_:
logger.error( "Error in definition of %s." % (self.name))
sys.exit(1)
if not type_:
logger.error( "Error in definition of %s." % (self.name))
sys.exit(1)
if self.dim:
if self.dim:
return "%s, allocatable" % (type_)
else:
return type_
@irpy.lazy_property
def header(self):
# () -> List[str]
'''Compute all the code needed to inistanticant the entity'''
# () -> List[str]
'''Compute all the code needed to inistanticant the entity'''
name = self.name
@ -360,7 +360,7 @@ class Entity(object):
else:
str_ += " [:]"
l = [str_]
l = [str_]
if self.dim and command_line.align != '1':
l += [" !DIR$ ATTRIBUTES ALIGN: %s :: %s" % (command_line.align, name)]
@ -374,16 +374,16 @@ class Entity(object):
@irpy.lazy_property
def fmodule(self):
# () -> str
'''Contruct the name of the module who will contain the entity'''
'''Contruct the name of the module who will contain the entity'''
name = self.prototype.filename[0].replace('/', '__').split('.irp.f')[0]
return '%s_mod' % name
############################################################
@irpy.lazy_property
def regexp(self):
# () -> Regex
# () -> Regex
'''Compile a regex targeted to 'search' the name of this entity'''
import re
import re
return re.compile(r"([^a-z0-9'\"_]|^)%s([^a-z0-9_]|$)" % (self.name), re.I).search
# ~ # ~ # ~
@ -392,10 +392,10 @@ class Entity(object):
@irpy.lazy_property
def toucher(self):
# () -> List[str]
'''Fabric the f90 routine who handle the cache invalidation'''
# () -> List[str]
'''Fabric the f90 routine who handle the cache invalidation'''
# Only one by EntityColleciton
# Only one by EntityColleciton
if not self.is_main:
return []
@ -465,20 +465,20 @@ class Entity(object):
##########################################################
@irpy.lazy_property
def free(self):
# () -> List[ str ]
'''Compute an part of a subroutine used to free a variable'''
# () -> List[ str ]
'''Compute an part of a subroutine used to free a variable'''
name = self.name
result = ["!",
"! >>> FREE %s" % (name),
" %s_is_built = .False." % (self.same_as)]
" %s_is_built = .False." % (self.same_as)]
if self.dim:
result += [
" if (allocated(%s)) then"%(name),
" deallocate (%s)"%(name)]
if command_line.do_memory:
result += " print *, 'Deallocating %s'"%(name)
result += " print *, 'Deallocating %s'"%(name)
result += [" endif"]
@ -488,8 +488,8 @@ class Entity(object):
##########################################################
@irpy.lazy_property
def provider(self):
# () -> List[str]
'''Create the fortran90 code for the EntityCollection'''
# () -> List[str]
'''Create the fortran90 code for the EntityCollection'''
if not self.is_main:
return []
@ -498,16 +498,16 @@ class Entity(object):
same_as = self.same_as
def dimsize(x):
# (str) -> str
'''Compute the number of element in the array'''
try:
b0, b1 = x.split(':')
except ValueError:
return x
# (str) -> str
'''Compute the number of element in the array'''
try:
b0, b1 = x.split(':')
except ValueError:
return x
b0_is_digit = b0.replace('-', '').isdigit()
b1_is_digit = b1.replace('-', '').isdigit()
b0_is_digit = b0.replace('-', '').isdigit()
b1_is_digit = b1.replace('-', '').isdigit()
if b0_is_digit and b1_is_digit:
size = str(int(b1) - int(b0) + 1)
@ -627,7 +627,7 @@ class Entity(object):
# Get the raw text for the builder
# ~#~#~#~#~#
#Next return the first element of the iterator
#Next return the first element of the iterator
ps_text = next(text for filename, text in self.cm_t_filename_parsed_text
if self.prototype.filename[0].startswith(filename))
begin = next(i for i, (_, line) in enumerate(ps_text)
@ -669,7 +669,7 @@ class Entity(object):
import parsed_text
# Move the variable to top, and add the text
parsed_text.move_to_top_list(text, [Declaration, Implicit, Use])
parsed_text.move_to_top_list(text, [Declaration, Implicit, Use])
result.extend( line.text for _,line in text if not isinstance(line, (Begin_doc, End_doc, Doc, Cont_provider)))

View File

@ -41,36 +41,36 @@ def main():
if command_line.do_help:
command_line.usage()
return
return
if command_line.do_version:
from version import version
print version
return
return
if command_line.do_init:
from build_file import create_generalmakefile
create_generalmakefile(command_line.do_ninja)
return
from build_file import create_generalmakefile
create_generalmakefile(command_line.do_ninja)
return
comm_world = Irpy_comm_world()
if command_line.do_graph:
comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know.
comm_world.t_filename_parsed_text # Initialize entity need. Dirty I know.
print 'graph { '
for name,entity in comm_world.d_entity.items():
if entity.needs:
print ' {0} -> {1}'.format(name, ' '.join(entity.needs))
print '}'
return
print 'graph { '
for name,entity in comm_world.d_entity.items():
if entity.needs:
print ' {0} -> {1}'.format(name, ' '.join(entity.needs))
print '}'
return
if command_line.do_preprocess:
for filename, text in comm_world.preprocessed_text:
if filename in command_line.preprocessed:
if filename in command_line.preprocessed:
for line in text:
print line.text
return
return
if command_line.do_touch:
for var in command_line.touched:
@ -80,7 +80,7 @@ def main():
print "Touching %s invalidates the following entities:" % var
for x in sorted(d_entity[var].parents):
print "- %s" % (x, )
return
return
if command_line.do_codelet:
import profile
@ -102,7 +102,7 @@ def main():
profile.run(comm_world.d_entity)
if command_line.do_openmp:
comm_world.create_lock()
comm_world.create_lock()
if __name__ == '__main__':
main()

View File

@ -63,9 +63,9 @@ class LineWithName(Line):
l_name = l_buf[0].split()
if len(l_name) < 2:
import loger
logger.error("Syntax Error: %s" % line)
sys.exit(1)
import loger
logger.error("Syntax Error: %s" % line)
sys.exit(1)
return l_name.pop()
l_type = [

View File

@ -29,12 +29,12 @@ import os
import sys
if __name__ == "__main__":
from irpf90_t import mandir
entity = sys.argv[1].lower()
filename = '%s.l'% entity
if filename not in os.listdir(mandir):
print "Error: `%s` does not exist"% entity
sys.exit(-1)
from irpf90_t import mandir
entity = sys.argv[1].lower()
os.system("man %s" % os.path.join(mandir,filename))
filename = '%s.l'% entity
if filename not in os.listdir(mandir):
print "Error: `%s` does not exist"% entity
sys.exit(-1)
os.system("man %s" % os.path.join(mandir,filename))

View File

@ -18,61 +18,61 @@ class Irpy_comm_world(object):
'''Maestro.'''
def __init__(self,l_dir=None, l_file=None):
# (Iter, Iter) -> None
# (Iter, Iter) -> None
# Create directories
from itertools import ifilterfalse
from itertools import ifilterfalse
i_folder = ifilterfalse(os.path.exists, (irpf90_t.irpdir, irpf90_t.mandir))
map(os.mkdir,i_folder)
# List file
l_dir =l_dir if l_dir else (command_line.include_dir+['.'])
l_not_dir = [d for d in l_dir if not (os.path.exists(d) and os.path.isdir(d))]
if l_not_dir:
logger.error('Try to include no existing directory: [%s]' % ','.join(l_not_dir))
sys.exit(1)
# Create folder in IRPDIR
i_folder = ifilterfalse(os.path.exists, (os.path.join(irpf90_t.irpdir,d) for d in l_dir))
map(os.mkdir, i_folder)
# List file
l_dir =l_dir if l_dir else (command_line.include_dir+['.'])
l_not_dir = [d for d in l_dir if not (os.path.exists(d) and os.path.isdir(d))]
if l_not_dir:
logger.error('Try to include no existing directory: [%s]' % ','.join(l_not_dir))
sys.exit(1)
# Create folder in IRPDIR
i_folder = ifilterfalse(os.path.exists, (os.path.join(irpf90_t.irpdir,d) for d in l_dir))
map(os.mkdir, i_folder)
s_folder_abs = set(os.path.abspath(path) for path in l_dir)
s_folder_abs = set(os.path.abspath(path) for path in l_dir)
s_file_folder_all = set(flatten(listdir(path,abspath=True) for path in s_folder_abs))
s_file_folder_all = set(flatten(listdir(path,abspath=True) for path in s_folder_abs))
# Take everything!
s_file_folder = filter(lambda f: os.path.isfile(f) and not f.startswith("."), s_file_folder_all)
# Take everything!
s_file_folder = filter(lambda f: os.path.isfile(f) and not f.startswith("."), s_file_folder_all)
s_file_tot = set(l_file) if l_file else set()
s_file_tot.update(s_file_folder)
s_file_rel = set(os.path.relpath(f,self.cwd) for f in s_file_tot)
# Lazy Copy file
for f in s_file_rel:
src = os.path.join(self.cwd,f)
s_file_tot = set(l_file) if l_file else set()
s_file_tot.update(s_file_folder)
s_file_rel = set(os.path.relpath(f,self.cwd) for f in s_file_tot)
# Lazy Copy file
for f in s_file_rel:
src = os.path.join(self.cwd,f)
text_ref = open(src, 'rb').read()
dest = os.path.join(self.cwd,irpf90_t.irpdir, f)
lazy_write_file(dest, text_ref)
lazy_write_file(dest, text_ref)
if command_line.do_codelet:
s_file_tot.update(command_line.codelet[3])
# No filter the irpf90 file
self.irpf90_files_ordered=sorted(filter(lambda f: f.endswith(".irp.f") ,s_file_rel))
# No filter the irpf90 file
self.irpf90_files_ordered=sorted(filter(lambda f: f.endswith(".irp.f") ,s_file_rel))
@irpy.lazy_property
def cwd(self):
return os.getcwd()
return os.getcwd()
@irpy.lazy_property
def t_filename_preprocessed_text(self):
'''Tuple (filename, preprocessed_text)'''
'''Tuple (filename, preprocessed_text)'''
from preprocessed_text import Preprocess_text
def worker_preprocess(filename):
@ -82,8 +82,8 @@ class Irpy_comm_world(object):
@irpy.lazy_property
def l_preprocessed_text(self):
# (None) -> List[Line]
'''List preprocessed_text'''
# (None) -> List[Line]
'''List preprocessed_text'''
return [line for _, text in self.t_filename_preprocessed_text for line in text]
@ -98,7 +98,7 @@ class Irpy_comm_world(object):
@irpy.lazy_property
def d_entity(self):
# None -> Dict[Str,Entity]
# None -> Dict[Str,Entity]
'''And entity is a collection of line between BEGIN_PROVIDER and END_PROVIDER '''
from irpf90_t import Begin_provider, End_provider
from entity import Entity
@ -108,33 +108,33 @@ class Irpy_comm_world(object):
l_provider = [ self.l_preprocessed_text[begin:end] for begin, end in zip(l_begin, l_end)]
l_ent = []
for icount, buf in enumerate(l_provider):
from functools import partial
Ent_part = partial(Entity,buf,icount,comm_world=self)
l_ent = []
for icount, buf in enumerate(l_provider):
from functools import partial
Ent_part = partial(Entity,buf,icount,comm_world=self)
ent = Ent_part()
l_ent += [ent] + [Ent_part(other) for other in ent.others_entity_name]
ent = Ent_part()
l_ent += [ent] + [Ent_part(other) for other in ent.others_entity_name]
# O(2) but who care
l_duplicate = [x for x in l_ent if l_ent.count(x) > 1]
if l_duplicate:
from util import logger
logger.error('You have duplicate PROVIDER: %s' % ' '.join([e.name for e in l_duplicate]))
import sys
sys.exit(1)
# O(2) but who care
l_duplicate = [x for x in l_ent if l_ent.count(x) > 1]
if l_duplicate:
from util import logger
logger.error('You have duplicate PROVIDER: %s' % ' '.join([e.name for e in l_duplicate]))
import sys
sys.exit(1)
# Python 2.6 Don't allow list comprehesion
d_ent = dict()
for e in l_ent:
d_ent[e.name] = e
# Python 2.6 Don't allow list comprehesion
d_ent = dict()
for e in l_ent:
d_ent[e.name] = e
#
# Second pass
#
# Modify parameter of variables
# Touch Softouch
# Touch Softouch
def find_variable(line):
l_var = line.lower.split()[1:]
if len(l_var) < 1:
@ -145,7 +145,7 @@ class Irpy_comm_world(object):
return l_var
d_modif = dict()
d_modif = dict()
from irpf90_t import Touch, SoftTouch, Free
from util import flatten
for cmd, l_type in [('is_self_touched', [Touch, SoftTouch]),
@ -181,12 +181,12 @@ class Irpy_comm_world(object):
# ~#~#~#~#~#
from irpf90_t import Subroutine, Function, Program, End
d_type = self.d_type_lines
d_type = self.d_type_lines
l_begin = sorted(i for type_ in (Subroutine, Function, Program) for i, _ in d_type[type_])
l_end = [i for i, _ in d_type[End]]
from routine import Routine
text = self.l_preprocessed_text
from routine import Routine
text = self.l_preprocessed_text
l_rou = [ Routine(text[b:e]) for b, e in zip(l_begin, l_end) if not isinstance(text[b], Program)]
# Now we can create a dict and at it
@ -207,39 +207,39 @@ class Irpy_comm_world(object):
for x in entity.calls:
d_called_by[x].add(name)
from util import uniquify
for routine in d_rou.values():
for x in routine.calls:
d_called_by[x].add(routine.name)
from util import uniquify
for routine in d_rou.values():
for x in routine.calls:
d_called_by[x].add(routine.name)
for routine in d_rou.values():
routine.called_by = sorted(d_called_by[routine.name])
for routine in d_rou.values():
routine.called_by = sorted(d_called_by[routine.name])
l_set = [d_rou[name].touches_my_self for name in routine.calls if name in d_rou]
routine.touches_ancestor = set().union(*l_set)
l_set = [d_rou[name].touches_my_self for name in routine.calls if name in d_rou]
routine.touches_ancestor = set().union(*l_set)
return d_rou
@irpy.lazy_property
def t_filename_parsed_text(self):
'''(filename,parsed_text)'''
'''(filename,parsed_text)'''
d_entity = self.d_entity
d_routine = self.d_routine
import parsed_text
vtuple = [(v, s.same_as, s.regexp) for v, s in d_entity.iteritems()]
vtuple = [(v, s.same_as, s.regexp) for v, s in d_entity.iteritems()]
def worker_parsed(filename_text):
filename, text = filename_text
return parsed_text.get_parsed_text(filename, text, d_entity, d_routine, vtuple)
parsed_text_0 = parmap(worker_parsed, self.t_filename_preprocessed_text)
from irpf90_t import NoDep,Declaration,Implicit,Use,Cont_provider
def moved_to_top_l(ptext):
l = [NoDep, Declaration, Implicit, Use, Cont_provider]
for _, text in ptext:
parsed_text.move_to_top_list(text, l)
from irpf90_t import NoDep,Declaration,Implicit,Use,Cont_provider
def moved_to_top_l(ptext):
l = [NoDep, Declaration, Implicit, Use, Cont_provider]
for _, text in ptext:
parsed_text.move_to_top_list(text, l)
#Touch routine
parsed_text.build_sub_needs(parsed_text_0, d_routine)
@ -248,13 +248,13 @@ class Irpy_comm_world(object):
parsed_text_1 = parsed_text.add_subroutine_needs(parsed_text_0, d_routine)
parsed_text_1 = parsed_text.move_variables(parsed_text_1)
moved_to_top_l(parsed_text_1)
moved_to_top_l(parsed_text_1)
parsed_text.check_opt(parsed_text_1)
parsed_text_1 = parsed_text.perform_loop_substitutions(parsed_text_1)
#touch entity
stuple = [(s, v.regexp) for s, v in d_routine.iteritems() if v.is_function]
stuple = [(s, v.regexp) for s, v in d_routine.iteritems() if v.is_function]
parsed_text.build_needs(parsed_text_1, d_routine, stuple,d_entity)
return parsed_text_1
@ -277,7 +277,7 @@ class Irpy_comm_world(object):
# Module data
if m.has_irp_module:
filename = os.path.join(irpdir, '%s.irp.module.F90' % m.filename)
text = '\n'.join(m.header + m.head)
text = '\n'.join(m.header + m.head)
lazy_write_file(filename, '%s\n' % text)
# Subroutines
@ -290,7 +290,7 @@ class Irpy_comm_world(object):
irp_stack.create()
def create_buildfile(self,ninja):
import build_file
import build_file
build_file.run(self.d_module,ninja)
def create_touches(self):
@ -303,11 +303,11 @@ class Irpy_comm_world(object):
def create_lock(self):
from util import lazy_write_file
l = sorted(self.d_entity.keys())
from util import lazy_write_file
l = sorted(self.d_entity.keys())
out = []
for v in l:
out = []
for v in l:
out += self.d_entity[v].locker
out += [ "subroutine irp_init_locks_%s()"%(irpf90_t.irp_id),
@ -317,6 +317,6 @@ class Irpy_comm_world(object):
out += [ " call irp_lock_%s(.False.)"%v ]
out += [ "end subroutine", "" ]
filename = os.path.join(irpf90_t.irpdir,'irp_locks.irp.F90')
lazy_write_file(filename, '\n'.join(out))
filename = os.path.join(irpf90_t.irpdir,'irp_locks.irp.F90')
lazy_write_file(filename, '\n'.join(out))

View File

@ -37,7 +37,7 @@ def put_info(text, filename):
str_ = '{text:{width}} ! {filename}:{i:4}'
for _, line in text:
line.text = str_.format(text=line.text,filename=line.filename,i=line.i,width=lenmax)
line.text = str_.format(text=line.text,filename=line.filename,i=line.i,width=lenmax)
return text
@ -50,13 +50,13 @@ class Fmodule(object):
"! !",
"! DO NOT MODIFY IT BY HAND !",
"!-----------------------------------------------!",
""]
""]
def __init__(self, text, filename, d_variable):
self.text = put_info(text, filename)
self.filename = filename[:-6]
self.name = "%s_mod" % (self.filename).replace('/', '__').replace('.', 'Dot')
self.d_all_variable = d_variable
self.d_all_variable = d_variable
@irpy.lazy_property
def prog_name(self):
@ -75,22 +75,22 @@ class Fmodule(object):
def head(self):
'''The module who containt the declaration of the entity'''
body = list(self.use)
body += list(self.dec)
body += list(self.dec)
body += [header for var in self.l_entity for header in var.header]
if body:
result = ["module %s" % (self.name)]
result += body
result += ["end module %s" % (self.name)]
else:
result = []
if body:
result = ["module %s" % (self.name)]
result += body
result += ["end module %s" % (self.name)]
else:
result = []
return result
@irpy.lazy_property
def has_irp_module(self):
return bool(self.head)
return bool(self.head)
@irpy.lazy_property
def needed_vars(self):
@ -102,7 +102,7 @@ class Fmodule(object):
@irpy.lazy_property
def generated_text(self):
'Routine genereraed by the IRPF90. provide, build, ...'
'Routine genereraed by the IRPF90. provide, build, ...'
result = []
for var in self.l_entity:
result += var.provider
@ -111,7 +111,7 @@ class Fmodule(object):
result += var.reader
if var.is_written:
result += var.writer
return result
@irpy.lazy_property
@ -134,18 +134,18 @@ class Fmodule(object):
result = []
variable_list = []
skip_interface = False
skip_interface = False
for vars, line in text:
if type(line) in [Interface, End_interface]:
skip_interface = not skip_interface
if type(line) in [Interface, End_interface]:
skip_interface = not skip_interface
if skip_interface:
result.append((vars, line))
continue
if skip_interface:
result.append((vars, line))
continue
if type(line) in [Subroutine, Function, Program]:
#Deep copy...
if type(line) in [Subroutine, Function, Program]:
#Deep copy...
variable_list = list(vars)
elif type(line) == End:
result += [([], Use(line.i, x, line.filename)) for x in build_use(variable_list, self.d_all_variable)]
@ -156,19 +156,19 @@ class Fmodule(object):
return result
def extract_use_dec_text(text):
# (List[ Tuple(Entity,Line) ]) -> (List[ Tuple(Entity,Line),List[ Tuple(Entity,Line),List[ Tuple(Entity,Line))
'''Extract the global declaration statement and module use form the declaration of function. '''
# (List[ Tuple(Entity,Line) ]) -> (List[ Tuple(Entity,Line),List[ Tuple(Entity,Line),List[ Tuple(Entity,Line))
'''Extract the global declaration statement and module use form the declaration of function. '''
inside = 0
result,dec,use,module = [],[],[],[]
for vars, line in text:
if isinstance(line, (Subroutine, Function, Program,Interface,Module)):
inside += 1
if type(line) == Module:
module.append((vars,line))
if type(line) == Module:
module.append((vars,line))
if inside:
result.append((vars, line))
@ -177,14 +177,14 @@ class Fmodule(object):
use.append((vars, line))
elif type(line) == Declaration:
dec.append((vars, line))
if isinstance(line,(End,End_interface,End_module)):
inside += -1
if inside:
print 'Something wrong append'
sys.exit(1)
inside += -1
if inside:
print 'Something wrong append'
sys.exit(1)
return use, module, dec, result
@ -202,32 +202,32 @@ class Fmodule(object):
@irpy.lazy_property
def gen_mod(self):
'''List of module generated by the user in this module...'''
'''List of module generated by the user in this module...'''
return set("%s" % line.subname for _, line in self.residual_text_use_dec.module)
@irpy.lazy_property
def dec(self):
'''The declaration of this module
Note:
Because user can define F90 Type, we need to keep the correct order.
Warning:
If we uniquify that can cause a problem with the type in guess.
```type toto
integer :: n
end type toto
integer :: n
```
Fix:
We need to support Type keyword.
'''The declaration of this module
'''
Note:
Because user can define F90 Type, we need to keep the correct order.
l = [" %s" % line.text for _, line in self.residual_text_use_dec.dec]
from util import uniquify
if len(l) != len(uniquify(l)):
raise NotImplementedError
Warning:
If we uniquify that can cause a problem with the type in guess.
```type toto
integer :: n
end type toto
integer :: n
```
Fix:
We need to support Type keyword.
'''
l = [" %s" % line.text for _, line in self.residual_text_use_dec.dec]
from util import uniquify
if len(l) != len(uniquify(l)):
raise NotImplementedError
return l
@ -244,26 +244,26 @@ class Fmodule(object):
from parsed_text import move_to_top_list, move_interface
move_to_top_list(result, [Declaration, Implicit, Use])
move_interface(result)
move_interface(result)
return [line.text for _, line in result]
@irpy.lazy_property
def needed_modules(self):
l = set(x.split(',only').pop(0).split()[1] for x in self.generated_text + self.head + self.residual_text if x.lstrip().startswith("use "))
l = set(x.split(',only').pop(0).split()[1] for x in self.generated_text + self.head + self.residual_text if x.lstrip().startswith("use "))
if self.name in l:
l.remove(self.name)
return l
@irpy.lazy_property
def needed_modules_irp(self):
return [i for i in self.needed_modules if i.endswith("_mod")]
@irpy.lazy_property
def needed_modules_usr(self):
return [i for i in self.needed_modules if not i.endswith("_mod")]
return [i for i in self.needed_modules if not i.endswith("_mod")]

View File

@ -205,15 +205,15 @@ def move_to_top_list(text, it):
Note:
- The permutation neeed to be done following `it` order
- We can have `nested` subroutine / Function. (Because of interface)
- This function is called way to much. Is need to be efficient
- This function is Unpure
- One pass over `text`
- We can have `nested` subroutine / Function. (Because of interface)
- This function is called way to much. Is need to be efficient
- This function is Impure
- One pass over `text`
NB:
- I am not really proud of the Sentinel value for the deleted,
but I waste already so much time on more cleaver but not working solution...
- I am not really proud of the Sentinel value for the deleted,
but I waste already so much time on more cleaver but not working solution...
'''
assert set(it).issubset([NoDep, Declaration, Implicit, Use, Cont_provider])
@ -231,16 +231,16 @@ def move_to_top_list(text, it):
for i, (l_var, line) in enumerate(text):
t = type(line)
if t in [Begin_provider, Module,Program, Subroutine, Function]:
l_begin.append(i)
elif t in [End_provider, End]:
l_begin.pop()
l_begin.pop()
elif l_begin and t in it:
d_permutation[t].append( (l_begin[-1], [l_var, line]) )
# Put the sentinel, will be deleted after the insertion
text[i] = None
# Put the sentinel, will be deleted after the insertion
text[i] = None
# ~ # ~ # ~
# O r d e r t h e m
@ -264,8 +264,8 @@ def move_to_top_list(text, it):
# Now do the Delete part of the move. Fortunatly we put a sentinel to know the line to delete
for i in reversed(xrange(len(text))):
if text[i] is None:
del text[i]
if text[i] is None:
del text[i]
def move_interface(parsed_text,s_type=(Use,Implicit,Declaration,Subroutine,Function,Module)):
@ -273,24 +273,24 @@ def move_interface(parsed_text,s_type=(Use,Implicit,Declaration,Subroutine,Funct
'''Move everything containt into 'interface' below the first instance of s_type who preced it
Note:
= This function is unpur
= This function is impure
'''
# Get the born of the interface
# Get the born of the interface
i_begin = [ i for i, (_, line) in enumerate(parsed_text) if isinstance(line,Interface) ]
i_end = [ i+1 for i, (_, line) in enumerate(parsed_text) if isinstance(line,End_interface) ]
# Get the begin of the insert
i_insert = []
for begin in i_begin:
i_insert.append(next(i+1 for i in range(begin,-1,-1) if isinstance(parsed_text[i][1], s_type)))
i_insert.append(next(i+1 for i in range(begin,-1,-1) if isinstance(parsed_text[i][1], s_type)))
# Do the insert and the delete in one passe
for insert, begin, end in zip(i_insert,i_begin,i_end):
parsed_text[insert:insert] = parsed_text[begin:end]
parsed_text[insert:insert] = parsed_text[begin:end]
padding = end-begin
parsed_text[begin+padding:end+padding] = []
padding = end-begin
parsed_text[begin+padding:end+padding] = []
######################################################################
def build_sub_needs(parsed_text, d_subroutine):
@ -298,7 +298,7 @@ def build_sub_needs(parsed_text, d_subroutine):
'''Set the needs, and provides arguements of Routine present in parsed_text
Note:
This function is unpure
This function is impure
'''
l_buffer = []
@ -347,15 +347,15 @@ def move_variables(parsed_text):
revtext = list(text)
revtext.reverse()
skip_interface = False
skip_interface = False
try:
for vars, line in revtext:
if type(line) in [Interface, End_interface]:
skip_interface = not skip_interface
if skip_interface:
append(([], line))
continue
if type(line) in [Interface, End_interface]:
skip_interface = not skip_interface
if skip_interface:
append(([], line))
continue
if type(line) in [End_provider, End]:
varlist = []
@ -399,10 +399,10 @@ def move_variables(parsed_text):
varlist += vars
append(([], line))
except:
from util import logger
from util import logger
logger.error("Unable to parse file %s", line)
import sys
sys.exit(1)
import sys
sys.exit(1)
result.reverse()

View File

@ -117,7 +117,7 @@ def get_canonized_text(text_lower):
def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc):
# ( int,str,str,str,str,bool) -> Irpf90_t
# ( int,str,str,str,str,bool) -> Irpf90_t
'''Find the type of a text line'''
line = line.rstrip()
@ -129,11 +129,11 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc):
# Handle archaic do loop of f77
firstword = l_word[0]
if firstword.isdigit():
l_word = l_word[1:]
l_word = l_word[1:]
firstword = l_word[0]
if firstword == "contains":
return [Contains(i, line, filename)], False
return [Contains(i, line, filename)], False
if firstword == "end_doc":
return [End_doc(i, line, filename)], False
@ -151,8 +151,8 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc):
#label do-loop (outer: do i=1,sze)
reg_do_lab = ur":\s+do\s+"
if re.search(reg_do_lab,line_lower):
return [Do(i,line,filename)], is_doc
return [Do(i,line,filename)], is_doc
lower_line = line_lower.strip()[1:]
@ -170,7 +170,7 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc):
result = [Simple_line(i, line, filename)]
logger.info("%s:"
"irpf90 may not work with preprocessor directives. You can use"
"irpf90 may not work with preprocessor directives. You can use"
"Irp_if ... Irp_else ... Irp_endif"
"instead of"
"#ifdef ... #else ... #endif"%line)
@ -189,7 +189,7 @@ def get_type(i, filename, line, line_lower, line_lower_canonized, is_doc):
# Detect errors
if firstword == "dowhile":
logger.error("%s 'do while' should be in 2 words." % Do(i, line, filename))
sys.exit(1)
sys.exit(1)
return [Simple_line(i, line, filename)], is_doc
@ -202,8 +202,8 @@ def save_and_execute(irpdir, scriptname, code, interpreter):
''' Save the script in irpdir/scriptname and Execute it
Note:
The script are executed in the orginal directory of the .irp.f (aka '..')
and this directory is added to PYTHONPATH.
The script are executed in the orginal directory of the .irp.f (aka '..')
and this directory is added to PYTHONPATH.
'''
irpdir_scriptname = os.path.abspath(os.path.join(irpdir, scriptname))
@ -213,11 +213,11 @@ def save_and_execute(irpdir, scriptname, code, interpreter):
# Execute shell
import util
try:
text = util.check_output('PYTHONPATH=$PYTHONPATH:. %s %s' % (interpreter, irpdir_scriptname), shell=True, bufsize=-1, cwd=os.path.join(irpdir,'..'))
text = util.check_output('PYTHONPATH=$PYTHONPATH:. %s %s' % (interpreter, irpdir_scriptname), shell=True, bufsize=-1, cwd=os.path.join(irpdir,'..'))
except:
util.logger.error("Something wrong append with embeded '%s' script: %s"% (interpreter, irpdir_scriptname))
import sys
sys.exit(1)
util.logger.error("Something wrong append with embeded '%s' script: %s"% (interpreter, irpdir_scriptname))
import sys
sys.exit(1)
# Create the Line
p = Preprocess_text(scriptname)
@ -242,21 +242,21 @@ def execute_shell(text):
import sys
def fail(l, a, b):
logger.error("%s In Begin_Shell, %s '%s'" % (l,a, b))
sys.exit(1)
sys.exit(1)
for begin,end in zip(l_begin,l_end):
header = text[begin]
header_text = header.text
header = text[begin]
header_text = header.text
for bracket in ['[', ']']:
n = header_text.count(bracket)
assert n <= 1, fail(header_text, "Too many", bracket)
assert n >= 1, fail(header_text, "Missing", bracket)
else:
interpreter = header_text[header_text.find('[')+1: header_text.find(']')].strip()
script = ['%s\n' % l.text for l in text[begin+1:end] ]
scriptname="%s_shell_%d" % (header.filename, header.i)
else:
interpreter = header_text[header_text.find('[')+1: header_text.find(']')].strip()
script = ['%s\n' % l.text for l in text[begin+1:end] ]
scriptname="%s_shell_%d" % (header.filename, header.i)
l_output.append(save_and_execute(irpdir, scriptname, script,interpreter))
@ -271,9 +271,9 @@ def execute_shell(text):
l_end_include = [i+1 for i in l_end]
padding = 0
for begin,end, out in zip(l_begin,l_end_include,l_output):
text_new[begin+padding:end+padding] = out
padding += len(out) - (end-begin)
text_new[begin+padding:end+padding] = out
padding += len(out) - (end-begin)
return text_new
@ -407,31 +407,31 @@ def remove_comments(text, form):
'''Remove all comments
Note:
This function is unpur
This function is impure
'''
result = []
def remove_after_bang(str_):
# str -> str
i_bang = str_.find('!')
if i_bang == -1:
return str_
else:
sentinel, inside = None, False
for i,c in enumerate(str_):
if c == '"' or c == "'":
if not inside:
inside = True
sentinel = c
elif sentinel == c:
inside = False
# str -> str
i_bang = str_.find('!')
if i_bang == -1:
return str_
else:
sentinel, inside = None, False
for i,c in enumerate(str_):
if c == '"' or c == "'":
if not inside:
inside = True
sentinel = c
elif sentinel == c:
inside = False
elif c == '!' and not inside:
return str_[:i].strip()
return str_
elif c == '!' and not inside:
return str_[:i].strip()
return str_
if form == Free_form:
for line in text:
@ -442,10 +442,10 @@ def remove_comments(text, form):
else:
newline = line.text.lstrip()
if (newline != "" and newline[0] != "!#"):
text = remove_after_bang(line.text)
if text:
line.text = text
result.append(line)
text = remove_after_bang(line.text)
if text:
line.text = text
result.append(line)
return result
else:
@ -511,7 +511,7 @@ def irp_simple_statements(text):
'''Processes simple statements'''
def process_irp_rw(line, rw, t):
'''Read Write'''
'''Read Write'''
assert type(line) == t
buffer = line.text.split()
if len(buffer) == 2:
@ -652,7 +652,7 @@ def irp_simple_statements(text):
def process_function(line):
assert type(line) == Function
subname = line.subname
subname = line.subname
length = len(subname)
i = line.i
f = line.filename
@ -740,15 +740,15 @@ def process_old_style_do(text):
DO 1 i=1,10'''
def change_matching_enddo(begin, number):
for i,line in enumerate(text[begin+1:]):
if isinstance(line,(Continue,Enddo)) and line.text.split()[0] == number:
for i,line in enumerate(text[begin+1:]):
if isinstance(line,(Continue,Enddo)) and line.text.split()[0] == number:
text[begin+1+i] = Enddo(line.i, " enddo", line.filename)
return
from util import logger
from util import logger
logger.error(text[begin], "(%s) Old-style do loops should end with 'continue' or 'end do'" % text[begin])
from util import sys
sys.exit(1)
from util import sys
sys.exit(1)
result = []
for i in range(len(text)):
@ -769,8 +769,8 @@ def process_old_style_do(text):
######################################################################
def change_single_line_ifs(text):
# List[Line] -> List[Line]
'''Changes: `if (test) result`
into
'''Changes: `if (test) result`
into
`if (test) then
result
endif`'''
@ -785,9 +785,9 @@ def change_single_line_ifs(text):
else:
buffer = line.text
begin = buffer.find('(')
if begin == -1:
logger.error("No '(' in if statemnt: %s" % line)
sys.exit(1)
if begin == -1:
logger.error("No '(' in if statemnt: %s" % line)
sys.exit(1)
level = 0
instring = False
@ -805,7 +805,7 @@ def change_single_line_ifs(text):
break
if level != 0:
logger.error("If statement not valid: %s (%s)" % (line, line.filename))
sys.exit(1)
sys.exit(1)
test = buffer[:end]
code = buffer[end:]
@ -832,34 +832,34 @@ def check_begin_end(raw_text):
'''
d_block = {Enddo: [Do],
Endif: [If],
End_provider: [Begin_provider],
End_doc: [Begin_doc],
End: [Program, Subroutine, Function],
End_module: [Module],
End_interface: [Interface]}
Endif: [If],
End_provider: [Begin_provider],
End_doc: [Begin_doc],
End: [Program, Subroutine, Function],
End_module: [Module],
End_interface: [Interface]}
from collections import defaultdict
d_type = defaultdict(list)
for line in raw_text:
d_type[type(line)].append(line)
d_type[type(line)].append(line)
for t_end, l_begin in d_block.iteritems():
n_end = len(d_type[t_end])
n_begin = sum(len(d_type[t_begin]) for t_begin in l_begin)
n_end = len(d_type[t_end])
n_begin = sum(len(d_type[t_begin]) for t_begin in l_begin)
if n_end != n_begin:
if n_end != n_begin:
if n_end > n_begin:
logger.error("You have more close statement than open statement (%s) (%s)",line.filename,t_end)
else:
logger.error('You have more end statement than open statenemt for (%s) (%s)' % (line.filename, t_end))
logger.error("You have more close statement than open statement (%s) (%s)",line.filename,t_end)
else:
logger.error('You have more end statement than open statenemt for (%s) (%s)' % (line.filename, t_end))
for i in zip([l for i in l_begin for l in d_type[i]], d_type[t_end]):
logger.debug(i)
sys.exit(1)
sys.exit(1)
######################################################################
def remove_ifdefs(text):
@ -911,14 +911,14 @@ class Preprocess_text(object):
def text(self):
with open(self.filename, 'r') as f:
str_ = f.read()
#Dirty thing. We will replace 'end program' by 'end subroutine'
#because afterward the program will be replaced by a subroutine...
#Dirty thing. We will replace 'end program' by 'end subroutine'
#because afterward the program will be replaced by a subroutine...
import re
transform = re.compile(re.escape('end program'), re.IGNORECASE)
return transform.sub('end subroutine', str_)
import re
transform = re.compile(re.escape('end program'), re.IGNORECASE)
return transform.sub('end subroutine', str_)
@irpy.lazy_property_mutable
def text_align(self):

View File

@ -82,15 +82,15 @@ class Routine(object):
############################################################
@irpy.lazy_property
def touches_my_self(self):
return set(x for line in self.text for x in line.text.split()[1:] if isinstance(line,(Touch, SoftTouch)))
return set(x for line in self.text for x in line.text.split()[1:] if isinstance(line,(Touch, SoftTouch)))
@irpy.lazy_property_mutable
def touches_ancestor(self):
raise AttributeError
raise AttributeError
@irpy.lazy_property
def touches(self):
return list(self.touches_my_self.union(self.touches_ancestor))
return list(self.touches_my_self.union(self.touches_ancestor))
############################################################
@irpy.lazy_property

View File

@ -60,13 +60,13 @@ def parmap(f, it, parallel=False):
'''Parallel version of the std map function
The parallel flag is set to togle the // execusion
Note:
- We try to use the Mulprocesses map is possible else we use our own
- The order of the sequence if concerved
- We try to use the Mulprocesses map is possible else we use our own
- The order of the sequence if concerved
- Will use all the processesor possible
- We return a List
- The traceback is loose if an error occur but a Exception is raise.
- We return a List
- The traceback is loose if an error occur but a Exception is raise.
'''
if not parallel:
@ -82,12 +82,12 @@ def parmap(f, it, parallel=False):
# https://docs.python.org/2/library/pickle.html#what-can-be-pickled-and-unpickled
#from cPickle import PicklingError
#try:
# p = multiprocessing.Pool(nproc)
# l_res = p.map(f, it,nproc)
# p = multiprocessing.Pool(nproc)
# l_res = p.map(f, it,nproc)
#except PicklingError:
# pass
# pass
#else:
# return l_res
# return l_res
# ~!~!~!
# Parallelisation By Us
@ -103,7 +103,7 @@ def parmap(f, it, parallel=False):
# (List[any]) -> (List[any])
'''Same as 'f' but for a chunck'''
return map(f,chunk)
q_in = multiprocessing.JoinableQueue()
q_out = multiprocessing.Queue()
@ -116,21 +116,21 @@ def parmap(f, it, parallel=False):
'''Read a task from q_in, excute it, and store it in q_out
Note:
- We use 'F' and not 'f'.
- The for loop will break when stop_contition occur
- We get, and put an idx to allow the possibility of ordering afterward
- We store any exeception, to raise her afterward
- We use 'F' and not 'f'.
- The for loop will break when stop_contition occur
- We get, and put an idx to allow the possibility of ordering afterward
- We store any exeception, to raise her afterward
'''
for i, x in iter(q_in.get, stop_condition):
try:
result = F(x)
except BaseException as e:
t = e
else:
t = (i, result)
try:
result = F(x)
except BaseException as e:
t = e
else:
t = (i, result)
q_out.put(t)
q_out.put(t)
q_in.task_done()
q_in.task_done()
@ -143,7 +143,7 @@ def parmap(f, it, parallel=False):
# Add the job to the queue (Note we add an idx, this will all)
for i, x in enumerate(it_chunk):
q_in.put((i, x))
q_in.put((i, x))
# Now add the stop contidion and join
# (Because q_in.get is blocking we don't need to join the queue before)
@ -159,13 +159,13 @@ def parmap(f, it, parallel=False):
# Check if error have occured
try:
from itertools import ifilter
e = next(ifilter(lambda t: isinstance(t,BaseException), l_res))
from itertools import ifilter
e = next(ifilter(lambda t: isinstance(t,BaseException), l_res))
except StopIteration:
# Now we need first to order the result, and secondly to flatte it
return [item for _, chunk in sorted(l_res) for item in chunk]
# Now we need first to order the result, and secondly to flatte it
return [item for _, chunk in sorted(l_res) for item in chunk]
else:
raise e
raise e
# ~#~#~#~#~#
# I O _ R E L A T E D
@ -177,16 +177,16 @@ def cached_file(filename, text):
'''Check if file locatte at filename containt the same data as text
Return:
True if data is the same, false otherwise
True if data is the same, false otherwise
'''
def digest(data):
# (str) -> str
'''compute an uniq data id'''
return hashlib.md5(data).hexdigest()
# (str) -> str
'''compute an uniq data id'''
return hashlib.md5(data).hexdigest()
try:
text_ref = open(filename, 'rb').read()
text_ref = open(filename, 'rb').read()
except IOError:
return False
else:
@ -198,14 +198,14 @@ def lazy_write_file(filename, text, conservative=False,touch=False):
'''Write data lazily in filename location.
Note:
If convervative is set, we don't overwrite.
If convervative is set, we don't overwrite.
'''
if not os.path.exists(filename) or not cached_file(filename, text) and not conservative:
with open(filename, 'w') as f:
f.write(text)
elif touch:
os.utime(filename,None)
os.utime(filename,None)
def listdir(directory, abspath=False):
#(str, bool) -> List[str]
@ -246,9 +246,9 @@ def uniquify(l,sort=False):
'''Uniquify a immutable iterable. Don't preserve the order'''
r = list(set(l))
if not sort:
return r
return r
else:
return sorted(r)
return sorted(r)
def OrderedUniqueList(l):
# (Iter, bool) -> List[Any]
@ -260,7 +260,7 @@ def flatten(l_2d):
'''Construct a copy of the 2d list collapsed into one dimension.
Note:
- We collapse in a C-style fashion (row_major).
- We collapse in a C-style fashion (row_major).
'''
return [item for l_1d in l_2d for item in l_1d]
@ -274,9 +274,9 @@ def build_dim(l_dim, colons=False):
'''Contruct a valid fortran90 array dimension code from a list dimension
Exemple:
[4,8] -> (4,8) if not colons
[4,8] -> (:,:) if colons
[4,8] -> (4,8) if not colons
[4,8] -> (:,:) if colons
'''
if not l_dim:
return ""