diff --git a/robogen/.gitignore b/robogen/.gitignore
new file mode 100644
index 00000000..c18dd8d8
--- /dev/null
+++ b/robogen/.gitignore
@@ -0,0 +1 @@
+__pycache__/
diff --git a/robogen/README.md b/robogen/README.md
new file mode 100644
index 00000000..2f0b3d85
--- /dev/null
+++ b/robogen/README.md
@@ -0,0 +1,76 @@
+robogen — modular code generation tool
+======================================
+
+*robogen* is a command-line tool which allows the generation of code from
+templates. It is based around a powerful plugin architecture, and most aspects
+of the tool can be extended through it: grammars, artifact descriptions,
+generation templates and file populators are reachable from plugins.
+
+This tool aims to provide a replacement for _RoboCompDSL_ for component
+generation, and a straightforward development framework for future code
+generation needs.
+
+Installation
+------------
+
+The tool is distributed as a Python package, so `pip` can take care of the
+installation. The following commands will install both the command-line tool and
+the Python modules for plugins to import, and then check whether the
+installation succeeded by running the tool:
+
+```sh
+pip install .
+robogen --help
+```
+
+Some plugins are bundled in the `extras/plugins` directory. These can be
+installed to the default plugin directory by running:
+
+```sh
+mkdir -fp /opt/robocomp/robogen/plugins
+cp -r extras/plugins/* /opt/robocomp/robogen/plugins
+```
+
+Usage
+-----
+
+The tool can be invoked as follows:
+
+```
+robogen [OPTIONS] INPUT_FILE OUTPUT_PATH
+```
+
+The `INPUT_FILE` leads to the input artifact description to be processed.
+`OUTPUT_PATH` points to the directory where files will be generated. `OPTIONS`
+is an optional list of the following command-line flags.
+
+
+
+ --plugin-dir path
-P path |
+ Add path to the list of paths where robogen will look for
+ plugins at initialization. |
+
+
+ --import-dir path
-I path |
+ Add path to the list of paths where robogen will look for
+ files when an include sentence is processed. |
+
+
+
+Alternatively, `RG_PLUGIN_DIR` and `RG_IMPORT_DIR` can be set to provide a
+plugin and import path respectively.
+
+Status
+------
+
+This tool is currently a functional prototype. It delivers all the features
+advertised, but there are still some rough edges.
+
+Extras
+------
+
+Under the *extras* directory of this repository, development material and
+examples can be found. The contents of this directory may change at any time
+and without prior notice, and are included only for testing and completeness
+purposes.
+
diff --git a/robogen/extras/plugins/agent/__init__.py b/robogen/extras/plugins/agent/__init__.py
new file mode 100644
index 00000000..06571aa4
--- /dev/null
+++ b/robogen/extras/plugins/agent/__init__.py
@@ -0,0 +1,12 @@
+__name__ = 'agent'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+from .parser import ADSLParser
+from .artifact import AgentArtifact
+
+class Plugin:
+ @classmethod
+ def install(klass, installer):
+ installer.install_parser('adsl', ADSLParser, {'adsl'})
+ installer.install_artifact('agent', AgentArtifact, markers={'a'}, extensions={'adsl'})
diff --git a/robogen/extras/plugins/agent/artifact.py b/robogen/extras/plugins/agent/artifact.py
new file mode 100644
index 00000000..d7816e32
--- /dev/null
+++ b/robogen/extras/plugins/agent/artifact.py
@@ -0,0 +1,86 @@
+from robogen.artifact import ImportManager
+from schema import Schema, Optional
+import robogen
+
+ROBOCOMP_COMM_ITEM = Schema({
+ 'name': str,
+ Optional('type', default='ice'): str,
+})
+
+class AgentArtifact(robogen.Artifact):
+ extensions = {
+ 'root': {},
+ 'communications': {},
+ }
+
+ @classmethod
+ def schema(klass):
+ return Schema({
+ 'name': str,
+ 'language': str,
+ Optional('imports', default=[]): [str],
+ Optional('apis', default=[]): [str],
+ Optional('qtdebug', default=False): bool,
+ Optional('ignore_attrs', default=[]): [str],
+ Optional('communications', default=[]): {
+ Optional('robocomp'): {
+ Optional('requires', default=[]): [ROBOCOMP_COMM_ITEM],
+ Optional('implements', default=[]): [ROBOCOMP_COMM_ITEM],
+ Optional('publishes', default=[]): [ROBOCOMP_COMM_ITEM],
+ Optional('subscribes_to', default=[]): [ROBOCOMP_COMM_ITEM],
+ },
+
+ # Install alternative communication types
+ **klass.extensions['communications'],
+ },
+ Optional('subscriptions', default=[]): {
+ # NOTE: some syntax sugar would make this section much more
+ # readable for the agent designer (see ADSL).
+ Optional('nodes', default=[]): [str],
+ Optional('edges', default=[]): [str],
+ },
+
+ # Install root extensions
+ **klass.extensions['root'],
+ })
+
+ @classmethod
+ def extend_schema(klass, anchor, key, frag):
+ klass.extensions[anchor][key] = frag
+
+ def __init__(self, raw, importer):
+ super().__init__(raw, importer)
+
+ # Grab tree generated by the superclass constructor and delete it from
+ # the artifact.
+ tree = self.tree
+ # TODO: uncomment line below
+ # del self.tree
+
+ # Dumb copy, but better be explicit. Cleverness is not future-proof.
+ self.name = tree['name']
+ self.language = tree['language']
+ self.imports = importer.import_files(tree['imports'])
+ self.qtdebug = tree['qtdebug']
+ self.ignored_attrs = tree['ignore_attrs']
+ self.communications = tree['communications'] # TODO
+ # TODO: hack but I don't have time for PyParsing shenanigans
+ self.sub_nodes = tree['subscriptions']['nodes'] if tree['subscriptions'] else {}
+ self.sub_edges = tree['subscriptions']['edges'] if tree['subscriptions'] else {}
+
+ @property
+ def ice_interfaces(self):
+ robocomp_comms = self.communications['robocomp']
+
+ candidates = [
+ *robocomp_comms['requires'],
+ *robocomp_comms['implements'],
+ *robocomp_comms['publishes'],
+ *robocomp_comms['subscribes_to'],
+ ]
+
+ return {
+ candidate['name']
+ for candidate in candidates
+ if candidate['type'] == 'ice'
+ }
diff --git a/robogen/extras/plugins/agent/parser.py b/robogen/extras/plugins/agent/parser.py
new file mode 100644
index 00000000..18e9d164
--- /dev/null
+++ b/robogen/extras/plugins/agent/parser.py
@@ -0,0 +1,147 @@
+from functools import reduce
+from pyparsing import (
+ CaselessKeyword, Group, Optional, QuotedString, Suppress, Word, ZeroOrMore,
+ alphanums, alphas, cppStyleComment, pythonStyleComment, delimitedList
+)
+
+import robogen
+
+#
+# Symbols
+#
+
+SC = Suppress(Word(';'))
+OBRACE = Suppress(Word('{'))
+CBRACE = Suppress(Word('}'))
+OBRACKET = Suppress(Word('['))
+CBRACKET = Suppress(Word(']'))
+OPARENS = Suppress(Word('('))
+CPARENS = Suppress(Word(')'))
+AT = Word('@')
+PIPE = Word('|')
+
+#
+# Keywords
+#
+
+IMPORT = CaselessKeyword('import')
+AGENT = CaselessKeyword('Agent')
+LANGUAGE = CaselessKeyword('Language')
+QTDEBUG = CaselessKeyword('Qtdebug')
+APIS = CaselessKeyword('Apis')
+IGNORE_ATTRS = CaselessKeyword('IgnoreAttrs')
+SUBSCRIPTIONS = CaselessKeyword('Subscriptions')
+COMMUNICATIONS = CaselessKeyword('Communications')
+ROBOCOMP = CaselessKeyword('robocomp')
+IMPLEMENTS = CaselessKeyword('implements')
+REQUIRES = CaselessKeyword('requires')
+PUBLISHES = CaselessKeyword('publishes')
+SUBSCRIBES_TO = CaselessKeyword('subscribesTo')
+
+#
+# Convenient definitions
+#
+
+ident = Word(alphas + '_', alphanums + '_')
+
+#
+# Robocomp communication elements
+#
+
+comm_robocomp_item = Group(
+ ident('name')
+ - Optional(ident('type'))
+)
+
+comm_robocomp_kind = lambda kw: \
+ Suppress(kw) - delimitedList(comm_robocomp_item) - SC
+
+comm_robocomp = Group(
+ Suppress(ROBOCOMP)
+ - OBRACE
+ - (
+ Optional(comm_robocomp_kind(IMPLEMENTS)('implements'))
+ & Optional(comm_robocomp_kind(REQUIRES)('requires'))
+ & Optional(comm_robocomp_kind(PUBLISHES)('publishes'))
+ & Optional(comm_robocomp_kind(SUBSCRIBES_TO)('subscribes_to'))
+ )
+ - CBRACE
+ - SC
+)
+
+comm_items = lambda ext: \
+ Group(reduce(lambda acc, next: acc & next, ext['communications'], Optional(comm_robocomp('robocomp'))))
+
+#
+# Agent elements
+#
+
+def turn_subs_into_dict(subs):
+ CATEGORIES = {
+ '@': 'nodes',
+ '|': 'edges',
+ }
+
+ result = {
+ 'nodes': [],
+ 'edges': [],
+ }
+
+ for symbol, sub in subs:
+ category = CATEGORIES[symbol]
+ result[category].append(sub)
+
+ return result
+
+node = Group(AT - ident)
+edge = Group(PIPE - ident)
+subs = delimitedList(node | edge).setParseAction(turn_subs_into_dict)
+
+inport = Suppress(IMPORT) - QuotedString('"') - SC
+language = Suppress(LANGUAGE) - ident - SC
+qtdebug = Suppress(QTDEBUG) - SC
+apis = Suppress(APIS) - delimitedList(ident) - SC
+ignore_attrs = Suppress(IGNORE_ATTRS) - delimitedList(ident) - SC
+subscriptions = Suppress(SUBSCRIPTIONS) - subs - SC
+communications = lambda ext: Suppress(COMMUNICATIONS) - OBRACE - comm_items(ext) - CBRACE - SC
+
+# Things come inside arrays and I don't know why? Ugly hack ahead, beware.
+hack = lambda x: x[0]
+
+contents = lambda ext: (
+ language('language').setParseAction(hack)
+ & Optional(qtdebug.setParseAction(lambda: True)('qtdebug'))
+ & Optional(apis('apis'))
+ & Optional(ignore_attrs('ignore_attrs'))
+ & Optional(subscriptions.setParseAction(hack)('subscriptions'))
+ # I don't know why but it needs to be grouped. PyParsing is weird...
+ & Optional(Group(communications(ext).setParseAction(hack))('communications'))
+)
+
+#
+# Top-level parser
+#
+
+ADSL = lambda ext: (
+ ZeroOrMore(inport)('imports')
+ - Suppress(AGENT)
+ - ident('name')
+ - OBRACE
+ # Unpack root extensions
+ - reduce(lambda acc, next: acc & next , ext['root'], contents(ext))
+ - CBRACE
+ - SC
+).ignore(cppStyleComment | pythonStyleComment)
+
+class ADSLParser(robogen.Parser):
+ extensions = {
+ 'root': set(),
+ 'communications': set(),
+ }
+
+ @classmethod
+ def extend_syntax(klass, anchor, syntax):
+ klass.extensions[anchor].add(syntax)
+
+ def parse(self, src):
+ return ADSL(ADSLParser.extensions).parseString(src).asDict()
diff --git a/robogen/extras/plugins/agent_generator_python/__init__.py b/robogen/extras/plugins/agent_generator_python/__init__.py
new file mode 100644
index 00000000..b9977081
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/__init__.py
@@ -0,0 +1,46 @@
+__name__ = 'agent_generator_python'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+DEPENDENCIES = {
+ 'agent': '==0.1.0',
+}
+
+import os
+import robogen
+
+class PythonAgentGenerator(robogen.Generator):
+ PATH = f'{os.path.dirname(__file__)}'
+
+ TREE = {
+ 'CMakeLists.txt',
+ 'DoxyFile',
+ 'README.md',
+ # etc
+ 'etc/config',
+ # src
+ 'src/CMakeLists.txt',
+ 'src/genericworker.py',
+ 'src/interfaces.py',
+ 'src/main.py',
+ 'src/specificworker.py',
+ }
+
+ KEEP = {
+ 'README.md',
+ # etc
+ 'etc/config',
+ # src
+ 'src/specificworker.py',
+ }
+
+ def tree(self, artifact):
+ tree = self.__class__.TREE.copy()
+ tree[f'src/main.py'].output = f'src/{artifact.name}.py'
+ return tree
+
+class Plugin:
+ @classmethod
+ def install(klass, installer):
+ PythonAgentGenerator.install_default_file_tree()
+ installer.install_generator('agent', 'python', PythonAgentGenerator)
diff --git a/robogen/extras/plugins/agent_generator_python/files/CMakeLists.txt b/robogen/extras/plugins/agent_generator_python/files/CMakeLists.txt
new file mode 100644
index 00000000..832e46ac
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/CMakeLists.txt
@@ -0,0 +1,6 @@
+cmake_minimum_required(VERSION 3.10)
+PROJECT( ${component_name} )
+
+SET(RC_COMPONENT_PATH $${CMAKE_CURRENT_SOURCE_DIR} CACHE STRING "component base path")
+SET(RC_COMPONENT_DEVEL_PATH $${CMAKE_CURRENT_SOURCE_DIR} CACHE STRING "component binary path")
+ADD_SUBDIRECTORY( src )
diff --git a/robogen/extras/plugins/agent_generator_python/files/DoxyFile b/robogen/extras/plugins/agent_generator_python/files/DoxyFile
new file mode 100644
index 00000000..e12f566f
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/DoxyFile
@@ -0,0 +1,1417 @@
+# Doxyfile 1.6.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+
+DOXYFILE_ENCODING = UTF-8
+
+# ROBOCOMP
+PROJECT_NAME = ${component_name}
+
+PROJECT_NUMBER =
+
+
+OUTPUT_DIRECTORY = doc
+CREATE_SUBDIRS = NO
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$$name" is automatically
+# replaced with the name of the entity): "The $$name class" "The $$name widget"
+# "The $$name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# ROBOCOMP
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+OPTIMIZE_OUTPUT_JAVA = NO
+OPTIMIZE_FOR_FORTRAN = NO
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it parses.
+# With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this tag.
+# The format is ext=language, where ext is a file extension, and language is one of
+# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP,
+# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat
+# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penality.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will rougly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# ROBOCOMP
+HIDE_UNDOC_MEMBERS = NO
+HIDE_UNDOC_CLASSES = NO
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command , where is the value of
+# the FILE_VERSION_FILTER tag, and is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by
+# doxygen. The layout file controls the global structure of the generated output files
+# in an output format independent way. The create the layout file that represents
+# doxygen's defaults, run doxygen with the -l option. You can optionally specify a
+# file name after the option, if omitted DoxygenLayout.xml will be used as the name
+# of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# ROBOCOMP
+QUIET = NO
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $$file, $$line, and $$text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $$version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$$file:$$line: $$text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT =
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS = *.cpp *.h *.c *.py
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+# ROBOCOMP
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = A* B* C* D* E* F* G* H* I* J* K* L* M* N* O* P* Q* R* S* T* U* V* W* X* Y* Z*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command , where
+# is the value of the INPUT_FILTER tag, and is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+# ROBOCOMP
+HTML_HEADER =
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = YES
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER
+# are set, an additional index file will be generated that can be used as input for
+# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated
+# HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER =
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add.
+# For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see
+# Qt Help Project / Custom Filters.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's
+# filter section matches.
+# Qt Help Project / Filter Attributes.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP)
+# there is already a search function so this one should typically
+# be disabled.
+
+SEARCHENGINE = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/robogen/extras/plugins/agent_generator_python/files/README.md b/robogen/extras/plugins/agent_generator_python/files/README.md
new file mode 100644
index 00000000..ba82161d
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/README.md
@@ -0,0 +1,29 @@
+# ${component_name}
+Intro to component here
+
+
+## Configuration parameters
+As any other component, *${component_name}* needs a configuration file to start. In
+```
+etc/config
+```
+you can find an example of a configuration file. We can find there the following lines:
+```
+EXAMPLE HERE
+```
+
+## Starting the component
+To avoid changing the *config* file in the repository, we can copy it to the component's home directory, so changes will remain untouched by future git pulls:
+
+```
+cd <${component_name}'s path>
+```
+```
+cp etc/config config
+```
+
+After editing the new config file we can run the component:
+
+```
+bin/${component_name} config
+```
diff --git a/robogen/extras/plugins/agent_generator_python/files/etc/config b/robogen/extras/plugins/agent_generator_python/files/etc/config
new file mode 100644
index 00000000..651e9198
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/etc/config
@@ -0,0 +1,10 @@
+${implements_endpoints}
+${subscribes_to_endpoints}
+${requires_proxies}
+
+${storm_topic_manager}
+
+Ice.Warn.Connections=0
+Ice.Trace.Network=0
+Ice.Trace.Protocol=0
+Ice.MessageSizeMax=20004800
diff --git a/robogen/extras/plugins/agent_generator_python/files/src/CMakeLists.txt b/robogen/extras/plugins/agent_generator_python/files/src/CMakeLists.txt
new file mode 100644
index 00000000..7fe180b5
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/src/CMakeLists.txt
@@ -0,0 +1,6 @@
+cmake_minimum_required(VERSION 2.6)
+PROJECT( ${component_name} )
+
+INCLUDE( /opt/robocomp/cmake/robocomp.cmake )
+
+ROBOCOMP_IDSL_TO_ICE( CommonBehavior ${ice_interfaces} )
diff --git a/robogen/extras/plugins/agent_generator_python/files/src/genericworker.py b/robogen/extras/plugins/agent_generator_python/files/src/genericworker.py
new file mode 100644
index 00000000..596d2979
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/src/genericworker.py
@@ -0,0 +1,41 @@
+${header}
+
+import sys, Ice, os
+from PySide2 import QtCore
+
+ROBOCOMP = ''
+try:
+ ROBOCOMP = os.environ['ROBOCOMP']
+except KeyError:
+ print('$$ROBOCOMP environment variable not set, using the default value /opt/robocomp')
+ ROBOCOMP = '/opt/robocomp'
+
+Ice.loadSlice("-I ./src/ --all ./src/CommonBehavior.ice")
+import RoboCompCommonBehavior
+
+class GenericWorker(QtCore.QObject):
+
+ kill = QtCore.Signal()
+
+ def __init__(self, mprx):
+ super(GenericWorker, self).__init__()
+
+ ${requires_proxies}
+ ${publishes_proxies}
+
+ self.mutex = QtCore.QMutex(QtCore.QMutex.Recursive)
+ self.Period = 30
+ self.timer = QtCore.QTimer(self)
+
+ @QtCore.Slot()
+ def killYourSelf(self):
+ rDebug("Killing myself")
+ self.kill.emit()
+
+ # \brief Change compute period
+ # @param per Period in ms
+ @QtCore.Slot(int)
+ def setPeriod(self, p):
+ print("Period changed", p)
+ self.Period = p
+ self.timer.start(self.Period)
diff --git a/robogen/extras/plugins/agent_generator_python/files/src/interfaces.py b/robogen/extras/plugins/agent_generator_python/files/src/interfaces.py
new file mode 100644
index 00000000..e0585f86
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/src/interfaces.py
@@ -0,0 +1,164 @@
+${header}
+
+import time
+import Ice
+import IceStorm
+from rich.console import Console, Text
+console = Console()
+
+
+${load_slice_and_create_imports}
+
+${create_list_classes}
+
+${implements_and_subscribes_to_imports}
+
+
+class Publishes:
+ def __init__(self, ice_connector, topic_manager):
+ self.ice_connector = ice_connector
+ self.mprx={}
+ self.topic_manager = topic_manager
+ ${publish_proxy_creation}
+
+
+ def create_topic(self, topic_name, ice_proxy):
+ # Create a proxy to publish a AprilBasedLocalization topic
+ topic = False
+ try:
+ topic = self.topic_manager.retrieve(topic_name)
+ except:
+ pass
+ while not topic:
+ try:
+ topic = self.topic_manager.retrieve(topic_name)
+ except IceStorm.NoSuchTopic:
+ try:
+ topic = self.topic_manager.create(topic_name)
+ except:
+ print(f'Another client created the {topic_name} topic? ...')
+ pub = topic.getPublisher().ice_oneway()
+ proxy = ice_proxy.uncheckedCast(pub)
+ self.mprx[topic_name] = proxy
+ return proxy
+
+ def get_proxies_map(self):
+ return self.mprx
+
+
+class Requires:
+ def __init__(self, ice_connector):
+ self.ice_connector = ice_connector
+ self.mprx={}
+ ${require_proxy_creation}
+
+ def get_proxies_map(self):
+ return self.mprx
+
+ def create_proxy(self, property_name, ice_proxy):
+ # Remote object connection for
+ try:
+ proxy_string = self.ice_connector.getProperties().getProperty(property_name)
+ try:
+ base_prx = self.ice_connector.stringToProxy(proxy_string)
+ proxy = ice_proxy.uncheckedCast(base_prx)
+ self.mprx[property_name] = proxy
+ return True, proxy
+ except Ice.Exception:
+ print('Cannot connect to the remote object (CameraSimple)', proxy_string)
+ # traceback.print_exc()
+ return False, None
+ except Ice.Exception as e:
+ console.print_exception(e)
+ console.log(f'Cannot get {property_name} property.')
+ return False, None
+
+
+class Subscribes:
+ def __init__(self, ice_connector, topic_manager, default_handler):
+ self.ice_connector = ice_connector
+ self.topic_manager = topic_manager
+ ${subscribes_to_adapters_creation}
+
+ def create_adapter(self, property_name, interface_handler):
+ adapter = self.ice_connector.createObjectAdapter(property_name)
+ handler = interface_handler
+ proxy = adapter.addWithUUID(handler).ice_oneway()
+ topic_name = property_name.replace('Topic','')
+ subscribe_done = False
+ while not subscribe_done:
+ try:
+ topic = self.topic_manager.retrieve(topic_name)
+ subscribe_done = True
+ except Ice.Exception as e:
+ console.log("Error. Topic does not exist (creating)", style="blue")
+ time.sleep(1)
+ try:
+ topic = self.topic_manager.create(topic_name)
+ subscribe_done = True
+ except:
+ console.log(f"Error. Topic {Text(topic_name, style='red')} could not be created. Exiting")
+ status = 0
+ qos = {}
+ topic.subscribeAndGetPublisher(qos, proxy)
+ adapter.activate()
+ return adapter
+
+
+class Implements:
+ def __init__(self, ice_connector, default_handler):
+ self.ice_connector = ice_connector
+ ${implements_adapters_creation}
+
+ def create_adapter(self, property_name, interface_handler):
+ adapter = self.ice_connector.createObjectAdapter(property_name)
+ adapter.add(interface_handler, self.ice_connector.stringToIdentity(property_name.lower()))
+ adapter.activate()
+
+
+class InterfaceManager:
+ def __init__(self, ice_config_file):
+ # TODO: Make ice connector singleton
+ self.ice_config_file = ice_config_file
+ self.ice_connector = Ice.initialize(self.ice_config_file)
+ needs_rcnode = ${needs_rcnode}
+ self.topic_manager = self.init_topic_manager() if needs_rcnode else None
+
+ self.status = 0
+ self.parameters = {}
+ for i in self.ice_connector.getProperties():
+ self.parameters[str(i)] = str(self.ice_connector.getProperties().getProperty(i))
+ self.requires = Requires(self.ice_connector)
+ self.publishes = Publishes(self.ice_connector, self.topic_manager)
+ self.implements = None
+ self.subscribes = None
+
+
+
+ def init_topic_manager(self):
+ # Topic Manager
+ proxy = self.ice_connector.getProperties().getProperty("TopicManager.Proxy")
+ obj = self.ice_connector.stringToProxy(proxy)
+ try:
+ return IceStorm.TopicManagerPrx.checkedCast(obj)
+ except Ice.ConnectionRefusedException as e:
+ console.log(Text('Cannot connect to rcnode! This must be running to use pub/sub.', 'red'))
+ exit(-1)
+
+ def set_default_hanlder(self, handler):
+ self.implements = Implements(self.ice_connector, handler)
+ self.subscribes = Subscribes(self.ice_connector, self.topic_manager, handler)
+
+ def get_proxies_map(self):
+ result = {}
+ result.update(self.requires.get_proxies_map())
+ result.update(self.publishes.get_proxies_map())
+ return result
+
+ def destroy(self):
+ if self.ice_connector:
+ self.ice_connector.destroy()
+
+
+
+
diff --git a/robogen/extras/plugins/agent_generator_python/files/src/main.py b/robogen/extras/plugins/agent_generator_python/files/src/main.py
new file mode 100644
index 00000000..ddd83229
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/src/main.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+${header}
+
+# \mainpage RoboComp::${component_name}
+#
+# \section intro_sec Introduction
+#
+# Some information about the component...
+#
+# \section interface_sec Interface
+#
+# Descroption of the interface provided...
+#
+# \section install_sec Installation
+#
+# \subsection install1_ssec Software depencences
+# Software dependences....
+#
+# \subsection install2_ssec Compile and install
+# How to compile/install the component...
+#
+# \section guide_sec User guide
+#
+# \subsection config_ssec Configuration file
+#
+#
+# The configuration file...
+#
+#
+# \subsection execution_ssec Execution
+#
+# Just: "$${PATH_TO_BINARY}/${component_name} --Ice.Config=$${PATH_TO_CONFIG_FILE}"
+#
+# \subsection running_ssec Once running
+#
+#
+#
+
+import argparse
+# Ctrl+c handling
+import signal
+
+from rich.console import Console
+console = Console()
+
+import interfaces
+from specificworker import *
+
+#SIGNALS handler
+def sigint_handler(*args):
+ QtCore.QCoreApplication.quit()
+
+
+if __name__ == '__main__':
+ app = QtCore.QCoreApplication(sys.argv)
+ parser = argparse.ArgumentParser()
+ parser.add_argument('iceconfigfile', nargs='?', type=str, default='etc/config')
+ parser.add_argument('--startup-check', action='store_true')
+
+ args = parser.parse_args()
+ interface_manager = interfaces.InterfaceManager(args.iceconfigfile)
+
+ if interface_manager.status == 0:
+ worker = SpecificWorker(interface_manager.get_proxies_map(), args.startup_check)
+ worker.setParams(interface_manager.parameters)
+ else:
+ print("Error getting required connections, check config file")
+ sys.exit(-1)
+
+ interface_manager.set_default_hanlder(worker)
+ signal.signal(signal.SIGINT, sigint_handler)
+ app.exec_()
+ interface_manager.destroy()
diff --git a/robogen/extras/plugins/agent_generator_python/files/src/specificworker.py b/robogen/extras/plugins/agent_generator_python/files/src/specificworker.py
new file mode 100644
index 00000000..f97667ab
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/files/src/specificworker.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+
+${header}
+
+from PySide2.QtCore import QLoggingCategory, QTimer
+from PySide2.QtWidgets import QApplication
+from rich.console import Console
+from genericworker import *
+import interfaces as ifaces
+
+sys.path.append('/opt/robocomp/lib')
+console = Console(highlight=False)
+
+${dsr_import}
+
+# If RoboComp was compiled with Python bindings you can use InnerModel in Python
+# import librobocomp_qmat
+# import librobocomp_osgviewer
+# import librobocomp_innermodel
+
+
+class SpecificWorker(GenericWorker):
+ def __init__(self, proxy_map, startup_check=False):
+ super(SpecificWorker, self).__init__(proxy_map)
+ self.Period = 2000
+ ${qtdebug}
+ ${dsr_init}
+ ${dsr_ignored_attrs}
+ if startup_check:
+ self.startup_check()
+ else:
+ ${timeout_compute_connect}
+
+ def __del__(self):
+ """Destructor"""
+
+ def setParams(self, params):
+ # try:
+ # self.innermodel = InnerModel(params["InnerModelPath"])
+ # except:
+ # traceback.print_exc()
+ # print("Error reading config params")
+ return True
+
+ ${compute_creation}
+
+ def startup_check(self):
+ ${startup_check_ice}
+ QTimer.singleShot(200, QApplication.instance().quit)
+
+ ${subscribes_to_methods}
+
+ ${implements_methods}
+
+ ${interface_specific_comment}
+
+ ${dsr_slots}
diff --git a/robogen/extras/plugins/agent_generator_python/populators/CMakeLists.txt.py b/robogen/extras/plugins/agent_generator_python/populators/CMakeLists.txt.py
new file mode 100644
index 00000000..5c4af373
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/CMakeLists.txt.py
@@ -0,0 +1,7 @@
+import robogen
+
+class Populator(robogen.Populator):
+ def populate(self):
+ return {
+ 'component_name': self.artifact.name,
+ }
diff --git a/robogen/extras/plugins/agent_generator_python/populators/DoxyFile.py b/robogen/extras/plugins/agent_generator_python/populators/DoxyFile.py
new file mode 100644
index 00000000..5c4af373
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/DoxyFile.py
@@ -0,0 +1,7 @@
+import robogen
+
+class Populator(robogen.Populator):
+ def populate(self):
+ return {
+ 'component_name': self.artifact.name,
+ }
diff --git a/robogen/extras/plugins/agent_generator_python/populators/README.md.py b/robogen/extras/plugins/agent_generator_python/populators/README.md.py
new file mode 100644
index 00000000..5c4af373
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/README.md.py
@@ -0,0 +1,7 @@
+import robogen
+
+class Populator(robogen.Populator):
+ def populate(self):
+ return {
+ 'component_name': self.artifact.name,
+ }
diff --git a/robogen/extras/plugins/agent_generator_python/populators/etc/config.py b/robogen/extras/plugins/agent_generator_python/populators/etc/config.py
new file mode 100644
index 00000000..6fdb36c9
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/etc/config.py
@@ -0,0 +1,66 @@
+import random
+import re
+import robogen
+from robogen.utils import number_duplicates
+
+try:
+ from rcportchecker import RCPortChecker
+ RCPORTCHECKER_IMPORTED = True
+except:
+ RCPORTCHECKER_IMPORTED = False
+
+STORM_TOPIC_MANAGER = '''\
+# This property is used by the clients to connect to IceStorm.
+TopicManager.Proxy=IceStorm/TopicManager:default -p 9999
+'''
+
+DEFAULT_PORTS = {
+ 'DifferentialRobot': 10004,
+ 'Laser': 10003,
+}
+
+def get_existing_port(name):
+ ports = RCPortChecker().search_interface_ports_by_name(name)
+ return min(ports.keys(), default=0) if ports else 0
+
+class Populator(robogen.Populator):
+ def populate(self):
+
+ return {
+ # TODO: are these needed? How to get those from the agent definition?
+ 'implements_endpoints': '',
+ 'subscribes_to_endpoints': '',
+ 'requires_proxies': self.requires_proxies(),
+ 'storm_topic_manager': self.storm_topic_manager(),
+ }
+
+ def requires_proxies(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ result = ''
+
+ for req, i in number_duplicates(robocomp_comm['requires'], lambda req: req['name']):
+ if req['type'] != 'ice':
+ continue
+
+ name = req['name']
+
+ if name in DEFAULT_PORTS: port = DEFAULT_PORTS[name]
+ elif RCPORTCHECKER_IMPORTED: port = get_existing_port(name)
+ else: port = random.randint(10001, 19000)
+
+ result += f'{name}{i}Proxy = {name.lower()}:tcp -h localhost -p {port}\n'
+
+ if not result:
+ return ''
+
+ return '\n'.join((
+ f'# Proxies for required interfaces',
+ result,
+ ))
+
+ def storm_topic_manager(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ pubs = robocomp_comm['publishes']
+ subs = robocomp_comm['subscribes_to']
+
+ return STORM_TOPIC_MANAGER if pubs or subs else ''
diff --git a/robogen/extras/plugins/agent_generator_python/populators/src/CMakeLists.txt.py b/robogen/extras/plugins/agent_generator_python/populators/src/CMakeLists.txt.py
new file mode 100644
index 00000000..2e62d324
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/src/CMakeLists.txt.py
@@ -0,0 +1,16 @@
+import os
+import robogen
+
+class Populator(robogen.Populator):
+ def populate(self):
+ # TODO: this will produce invalid code if the interface is not
+ # implemented as CDSL due to robocomp's CMake macro.
+ ice_interfaces = {
+ os.path.basename(interface).rsplit('.', maxsplit=2)[0]
+ for interface in {*self.artifact.imports.recursive_imports.keys(), *self.artifact.ice_interfaces}
+ }
+
+ return {
+ 'component_name': self.artifact.name,
+ 'ice_interfaces': ' '.join(ice_interfaces),
+ }
diff --git a/robogen/extras/plugins/agent_generator_python/populators/src/genericworker.py.py b/robogen/extras/plugins/agent_generator_python/populators/src/genericworker.py.py
new file mode 100644
index 00000000..8ad9a407
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/src/genericworker.py.py
@@ -0,0 +1,33 @@
+import robogen
+from robogen.utils import number_duplicates
+
+class Populator(robogen.Populator):
+ def populate(self):
+ return {
+ 'requires_proxies': self.requires_proxies(),
+ 'publishes_proxies': self.publishes_proxies(),
+ }
+
+ def requires_proxies(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ result = ''
+
+ for req, i in number_duplicates(robocomp_comm['requires'], lambda req: req['name']):
+ name = req['name']
+
+ if req['type'] == 'ice': result += f'self.{name.lower()}_proxy = mprx["{name}Proxy{i}"]\n'
+ elif req['type'] == 'ros': result += f'self.{name.lower()}_proxy = ServiceClient{name}()\n'
+
+ return result
+
+ def publishes_proxies(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ result = ''
+
+ for pub, i in number_duplicates(robocomp_comm['publishes'], lambda req: req['name']):
+ name = pub['name']
+
+ if pub['type'] == 'ice': result += f'self.{name.lower()}_proxy = mprx["{name}{i}"]\n'
+ elif pub['type'] == 'ros': result += f'self.{name.lower()}_proxy = Publisher{name}()\n'
+
+ return result
diff --git a/robogen/extras/plugins/agent_generator_python/populators/src/interfaces.py.py b/robogen/extras/plugins/agent_generator_python/populators/src/interfaces.py.py
new file mode 100644
index 00000000..3a22eede
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/src/interfaces.py.py
@@ -0,0 +1,160 @@
+from string import Template
+from robogen.utils import full_type_string, number_duplicates
+import os
+import robogen
+
+# TODO: Check if this can be reduced to an abstract class and some inheriting from that.
+LIST_CLASSES = '''\
+class ${list_type}(list):
+ def __init__(self, iterable=list()):
+ super(${list_type}, self).__init__(iterable)
+
+ def append(self, item):
+ assert isinstance(item, ${item_type})
+ super(${list_type}, self).append(item)
+
+ def extend(self, iterable):
+ for item in iterable:
+ assert isinstance(item, ${item_type})
+ super(${list_type}, self).extend(iterable)
+
+ def insert(self, index, item):
+ assert isinstance(item, ${item_type})
+ super(${list_type}, self).insert(index, item)
+
+setattr(${module_name}, "${list_type}", ${list_type})
+'''
+
+SLICE_LOAD = '''\
+Ice.loadSlice("-I ./src/ --all ./src/${filename}.ice")
+'''
+
+SUBSCRIBES_TO = '''\
+self.${name} = self.create_adapter("${name}Topic", ${name_lower}I.${name}I(default_handler))
+'''
+
+REQUIRE = '''\
+self.${name}${i} = self.create_proxy("${name}${i}Proxy", ${module_name}.${name}Prx)
+'''
+
+PUBLISHES = '''\
+self.${name_lower} = self.create_topic("${name}", ${module_name}.${name}Prx)
+'''
+
+IMPLEMENTS = '''\
+self.${name_lower} = self.create_adapter("${name}", ${name_lower}I.${name}I(default_handler))
+'''
+
+class Populator(robogen.Populator):
+ def populate(self):
+ return {
+ 'load_slice_and_create_imports': self.load_slice_and_create_imports(),
+ 'create_list_classes': self.create_list_classes(),
+ 'implements_and_subscribes_to_imports': self.implements_and_subscribes_to_imports(),
+ 'publish_proxy_creation': self.publish_proxy_creation(),
+ 'require_proxy_creation': self.require_proxy_creation(),
+ 'subscribes_to_adapters_creation': self.subscribes_to_adapters_creation(),
+ 'implements_adapters_creation': self.implements_adapters_creation(),
+ 'needs_rcnode': self.needs_rcnode(),
+ }
+
+ def load_slice_and_create_imports(self):
+ result = ''
+
+ for path, module in self.artifact.imports.recursive_imports.items():
+ filename = os.path.basename(path).rsplit('.', maxsplit=2)[0]
+ result += Template(SLICE_LOAD).substitute(filename=filename)
+ result += f'import {module.name}\n'
+ pass
+
+ return result
+
+ def create_list_classes(self):
+ result = ''
+
+ for path in self.artifact.imports.recursive_imports:
+ name = os.path.basename(path).rsplit('.', maxsplit=2)[0]
+ module = self.artifact.imports.module_providing_interface(name)
+
+ if module is None:
+ continue
+
+ for name, sequence in module.sequences.items():
+ type = full_type_string(sequence['type'], module.name)
+
+ if type == 'bytes':
+ continue
+
+ result += Template(LIST_CLASSES).substitute({
+ 'list_type': name,
+ 'item_type': type,
+ 'module_name': module.name,
+ })
+
+ return result
+
+ def implements_and_subscribes_to_imports(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+
+ return '\n'.join((
+ f'import {interface.lower()}I'
+ for interface in robocomp_comm['implements'] + robocomp_comm['subscribes_to']
+ if interface.type == 'ice'
+ ))
+
+ def publish_proxy_creation(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ result = ''
+
+ for req, i in number_duplicates(robocomp_comm['publishes'], lambda req: req['name']):
+ name = req['name']
+
+ module = self.artifact.imports.module_providing_interface(name)
+ result += Template(PUBLISHES).substitute({
+ 'name': name,
+ 'name_lower': name.lower(),
+ 'module_name': module.name,
+ })
+
+ return result
+
+ def require_proxy_creation(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ result = ''
+
+ for req, i in number_duplicates(robocomp_comm['requires'], lambda req: req['name']):
+ name = req['name']
+
+ module = self.artifact.imports.module_providing_interface(name)
+ result += Template(REQUIRE).substitute({
+ 'name': name,
+ 'i': i,
+ 'name_lower': name.lower(),
+ 'module_name': module.name,
+ })
+
+ return result
+
+ def subscribes_to_adapters_creation(self):
+ return '\n'.join((
+ Template(SUBSCRIBES_TO).substitute({
+ 'name': interface['name'],
+ 'name_lower': interface['name'].lower()
+ })
+ for interface in self.artifact.communications['robocomp']['subscribes_to']
+ if interface.type == 'ice'
+ ))
+
+ def implements_adapters_creation(self):
+ return '\n'.join((
+ Template(IMPLEMENTS).substitute({
+ 'name': interface['name'],
+ 'name_lower': interface['name'].lower()
+ })
+ for interface in self.artifact.communications['robocomp']['implements']
+ if interface.type == 'ice'
+ ))
+
+ def needs_rcnode(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ return bool(robocomp_comm['subscribes_to'] or robocomp_comm['implements'])
diff --git a/robogen/extras/plugins/agent_generator_python/populators/src/main.py.py b/robogen/extras/plugins/agent_generator_python/populators/src/main.py.py
new file mode 100644
index 00000000..8058e027
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/src/main.py.py
@@ -0,0 +1,7 @@
+import robogen
+
+class Populator(robogen.Populator):
+ def populate(self):
+ return {
+ 'component_name': self.artifact.name,
+ }
diff --git a/robogen/extras/plugins/agent_generator_python/populators/src/specificworker.py.py b/robogen/extras/plugins/agent_generator_python/populators/src/specificworker.py.py
new file mode 100644
index 00000000..ae1450a2
--- /dev/null
+++ b/robogen/extras/plugins/agent_generator_python/populators/src/specificworker.py.py
@@ -0,0 +1,245 @@
+from string import Template
+from robogen.utils import full_type_string, number_duplicates
+import robogen
+
+DSR_SLOTS = '''\
+
+# =============== DSR SLOTS ================
+# =============================================
+
+def update_node_att(self, id: int, attribute_names: [str]):
+ console.print(f"UPDATE NODE ATT: {id} {attribute_names}", style='green')
+
+def update_node(self, id: int, type: str):
+ console.print(f"UPDATE NODE: {id} {type}", style='green')
+
+def delete_node(self, id: int):
+ console.print(f"DELETE NODE:: {id} ", style='green')
+
+def update_edge(self, fr: int, to: int, type: str):
+
+ console.print(f"UPDATE EDGE: {fr} to {type}", type, style='green')
+
+def update_edge_att(self, fr: int, to: int, type: str, attribute_names: [str]):
+ console.print(f"UPDATE EDGE ATT: {fr} to {type} {attribute_names}", style='green')
+
+def delete_edge(self, fr: int, to: int, type: str):
+ console.print(f"DELETE EDGE: {fr} to {type} {type}", style='green')
+'''
+
+
+DSR_IMPORT = '''\
+from pydsr import *
+
+'''
+
+DSR_INIT = '''\
+
+# YOU MUST SET AN UNIQUE ID FOR THIS AGENT IN YOUR DEPLOYMENT. "_CHANGE_THIS_ID_" for a valid unique integer
+self.agent_id = "_CHANGE_THIS_ID_"
+self.g = DSRGraph(0, "pythonAgent", self.agent_id)
+
+try:
+ signals.connect(self.g, signals.UPDATE_NODE_ATTR, self.update_node_att)
+ signals.connect(self.g, signals.UPDATE_NODE, self.update_node)
+ signals.connect(self.g, signals.DELETE_NODE, self.delete_node)
+ signals.connect(self.g, signals.UPDATE_EDGE, self.update_edge)
+ signals.connect(self.g, signals.UPDATE_EDGE_ATTR, self.update_edge_att)
+ signals.connect(self.g, signals.DELETE_EDGE, self.delete_edge)
+ console.print("signals connected")
+except RuntimeError as e:
+ print(e)
+
+'''
+
+COMPUTE_METHOD_STR = '''\
+@QtCore.Slot()
+def compute(self):
+ print('SpecificWorker.compute...')
+ # computeCODE
+ # try:
+ # self.differentialrobot_proxy.setSpeedBase(100, 0)
+ # except Ice.Exception as e:
+ # traceback.print_exc()
+ # print(e)
+
+ # The API of python-innermodel is not exactly the same as the C++ version
+ # self.innermodel.updateTransformValues('head_rot_tilt_pose', 0, 0, 0, 1.3, 0, 0)
+ # z = librobocomp_qmat.QVec(3,0)
+ # r = self.innermodel.transform('rgbd', z, 'laser')
+ # r.printvector('d')
+ # print(r[0], r[1], r[2])
+
+ return True
+'''
+
+TIMEOUT_COMPUTE_CONNECT = '''\
+self.timer.timeout.connect(self.compute)
+self.timer.start(self.Period)
+'''
+
+PROXY_METHODS_COMMENT = '''\
+######################
+# From the ${module_name} you can ${action} this methods:
+${methods}
+
+'''
+
+INTERFACE_TYPES_TEST = '''\
+print(f"Testing ${type} from ifaces.${module_name}")
+test = ifaces.${type}()
+'''
+
+INTERFACE_TYPES_COMMENT = '''\
+######################
+# From the ${module_name} you can use this types:
+${structs}
+'''
+
+METHOD = '''\
+#
+# ${method_str1} ${method_name} method from ${interface_name} interface
+#
+def ${interface_name}_${method_name}(self${param_str_a}):
+ ${return_creation}
+ #
+ # write your CODE here
+ #
+ ${return_str}
+'''
+
+class Populator(robogen.Populator):
+ def populate(self):
+ return {
+ 'component_name': self.artifact.name,
+ 'qtdebug': self.qtdebug(),
+ 'dsr_import': DSR_IMPORT,
+ 'dsr_slots': DSR_SLOTS,
+ 'dsr_init': DSR_INIT,
+ 'dsr_ignored_attrs': self.dsr_ignored_attrs(),
+ 'timeout_compute_connect': TIMEOUT_COMPUTE_CONNECT,
+ 'compute_creation': COMPUTE_METHOD_STR,
+ 'startup_check_ice': self.startup_check_ice(),
+ 'subscribes_to_methods': self.subscribes_to_methods(),
+ 'implements_methods': self.implements_methods(),
+ 'interface_specific_comment': self.interface_specific_comment(),
+ }
+
+ def qtdebug(self):
+ return f'QLoggingCategory.setFilterRules("*.debug={str(self.artifact.qtdebug).lower()}\\n");'
+
+ def methods(self, interfaces, subscribe=False):
+ # TODO: The original method is very, intrincate. Will do this later.
+ return '# TODO'
+
+ def dsr_ignored_attrs(self):
+ ignored_attrs = self.artifact.ignored_attrs
+ result = ''
+
+ if not ignored_attrs:
+ return ''
+
+ result += '# Ignore attributes from G'
+
+ for attr in ignored_attrs:
+ result += f"# TODO: the Python API does not support ignoring attributes! Ignore `{attr}'\n"
+
+ return result
+
+ def subscribes_to_methods(self):
+ subscribes_to = self.artifact.communications['robocomp']['subscribes_to']
+
+ if not subscribes_to:
+ return ''
+
+ return '\n'.join((
+ '# =============== Methods for Component SubscribesTo ================'
+ '# ===================================================================',
+ '',
+ self.methods(subscribes_to, subscribe=True),
+ '# ===================================================================',
+ '# ===================================================================',
+ '',
+ ))
+
+ def implements_methods(self):
+ implements = self.artifact.communications['robocomp']['implements']
+
+ if not implements:
+ return ''
+
+ return '\n'.join((
+ '# =============== Methods for Component Implements ==================',
+ '# ===================================================================',
+ '',
+ self.methods(implements),
+ '# ===================================================================',
+ '# ===================================================================',
+ '',
+ ))
+
+ def startup_check_ice(self):
+ result = ''
+ robocomp_comm = self.artifact.communications['robocomp']
+
+ interfaces = (
+ robocomp_comm['requires']
+ + robocomp_comm['publishes']
+ + robocomp_comm['implements']
+ + robocomp_comm['subscribes_to']
+ )
+
+ for interface in interfaces:
+ if interface['type'] != 'ice':
+ continue
+
+ module = self.artifact.imports.module_providing_interface(interface['name'])
+
+ for name in module.structs:
+ result += Template(INTERFACE_TYPES_TEST).substitute({
+ 'module_name': module.name,
+ 'type': full_type_string(name, module.name),
+ })
+ pass
+
+ return result
+
+ def interface_specific_comment(self):
+ robocomp_comm = self.artifact.communications['robocomp']
+ result = ''
+
+ for type in {'requires', 'publishes', 'implements', 'subscribes_to'}:
+ for interface, i in number_duplicates(robocomp_comm[type], lambda iface: iface['name']):
+ if interface['type'] != 'ice':
+ continue
+
+ module = self.artifact.imports.module_providing_interface(interface['name'])
+
+ if type in {'publishes', 'requires'}:
+ proxy_reference = f'self.{interface["name"].lower()}{i}_proxy.'
+ action = 'publish calling' if type == 'publishes' else 'call'
+
+ proxy_method_calls = '\n'.join(
+ f'# {proxy_reference}{method}(...)'
+ for method in module.interfaces[interface['name']]
+ )
+
+ if proxy_method_calls:
+ result += Template(PROXY_METHODS_COMMENT).substitute({
+ 'module_name': module.name,
+ 'methods': proxy_method_calls,
+ 'action': action,
+ })
+
+ struct_str = '\n'.join(
+ f'# {full_type_string(struct, module.name)}\n'
+ for struct in module.structs
+ )
+
+ if struct_str:
+ result += Template(INTERFACE_TYPES_COMMENT).substitute({
+ 'module_name': module.name,
+ 'structs': struct_str,
+ })
+
+ return result
diff --git a/robogen/extras/plugins/agent_python_licensing/__init__.py b/robogen/extras/plugins/agent_python_licensing/__init__.py
new file mode 100644
index 00000000..15b06fde
--- /dev/null
+++ b/robogen/extras/plugins/agent_python_licensing/__init__.py
@@ -0,0 +1,59 @@
+__name__ = 'agent_python_licensing'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+DEPENDENCIES = {
+ 'agent_generator_python': '==0.1.0',
+}
+
+from datetime import date
+from schema import Schema, And, Or, Use
+from string import Template
+from pyparsing import CaselessKeyword, Suppress, Word, alphanums, alphas
+import schema
+import pyparsing
+import robogen
+
+from .licenses import LICENSES
+
+ADSL = pyparsing.Optional((
+ # Keyword
+ Suppress(CaselessKeyword('license'))
+ # Identifier
+ - Word(alphas + '_', alphanums + '_')
+ # Semicolon
+ - Suppress(Word(';'))
+).setParseAction(lambda x: x[0])('license'))
+
+# Specified license must be supported
+SCHEMA = Schema(And(Use(str.lower), Or(*LICENSES.keys())))
+
+header_template = lambda license: f'''\
+{license}
+$header'''
+
+class LicensePopulator(robogen.Populator):
+ @staticmethod
+ def generate_python_header(license):
+ return '\n'.join(f'# {line}' for line in license.splitlines())
+
+ def populate(self):
+ license = LICENSES[self.artifact.license]
+ license = Template(license).substitute({'copyright_year': date.today().year})
+
+ return {
+ 'header': header_template(LicensePopulator.generate_python_header(license)),
+ }
+
+def ctor_set_license(artifact, importer):
+ setattr(artifact, 'license', artifact.tree['license'])
+
+class Plugin:
+ '''Adds a license header to source files of Python agents.'''
+
+ @classmethod
+ def install(klass, installer):
+ installer.install_populator('agent', 'python', lambda file: file.endswith('.py'), LicensePopulator)
+ installer.install_artifact_ctor_extension('agent', ctor_set_license)
+ installer.install_schema_extension('agent', 'root', schema.Optional('license', default='mit'), SCHEMA)
+ installer.install_syntax_extension('adsl', 'root', ADSL)
diff --git a/robogen/extras/plugins/agent_python_licensing/licenses.py b/robogen/extras/plugins/agent_python_licensing/licenses.py
new file mode 100644
index 00000000..e7180fad
--- /dev/null
+++ b/robogen/extras/plugins/agent_python_licensing/licenses.py
@@ -0,0 +1,42 @@
+MIT = '''\
+MIT License
+
+Copyright (c) ${copyright_year} [fullname]
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.'''
+
+GPLV3 = '''\
+Copyright (c) ${copyright_year} [fullname].
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, version 3.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .'''
+
+LICENSES = {
+ 'mit': MIT,
+ 'gplv3': GPLV3,
+}
\ No newline at end of file
diff --git a/robogen/extras/plugins/interface/__init__.py b/robogen/extras/plugins/interface/__init__.py
new file mode 100644
index 00000000..24353ce5
--- /dev/null
+++ b/robogen/extras/plugins/interface/__init__.py
@@ -0,0 +1,12 @@
+__name__ = 'interface'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+from .parser import IDSLParser
+from .artifact import InterfaceArtifact
+
+class Plugin:
+ @classmethod
+ def install(klass, installer):
+ installer.install_parser('idsl', IDSLParser, {'idsl'})
+ installer.install_artifact('interface', InterfaceArtifact, markers={'i'}, extensions={'idsl'})
diff --git a/robogen/extras/plugins/interface/artifact.py b/robogen/extras/plugins/interface/artifact.py
new file mode 100644
index 00000000..18c20044
--- /dev/null
+++ b/robogen/extras/plugins/interface/artifact.py
@@ -0,0 +1,68 @@
+from schema import Schema, Optional
+import robogen
+
+class InterfaceArtifact(robogen.Artifact):
+ SCHEMA = Schema({
+ 'name': str,
+ Optional('imports', default=[]): [str],
+ Optional('sequences', default={}): {
+ Optional(str): {
+ 'type': str,
+ },
+ },
+ Optional('dictionaries', default={}): {
+ Optional(str): {
+ 'from': str,
+ 'to': str,
+ },
+ },
+ Optional('enums', default={}): {
+ Optional(str): [str],
+ },
+ Optional('exceptions', default={}): {
+ Optional(str): [{
+ 'name': str,
+ 'type': str,
+ }],
+ },
+ Optional('interfaces', default={}): {
+ Optional(str): {
+ Optional(str): {
+ Optional('return_type', default='void'): str,
+ Optional('is_idempotent', default=False): bool,
+ Optional('params', default=[]): [{
+ 'name': str,
+ 'type': str,
+ Optional('is_output', default=False): bool,
+ }],
+ Optional('throws', default=[]): [str]
+ }
+ },
+ },
+ Optional('structs', default={}): {
+ Optional(str): [{
+ 'name': str,
+ 'type': str,
+ Optional('default_value'): str,
+ }],
+ },
+ })
+
+ def __init__(self, raw, importer):
+ super().__init__(raw, importer)
+
+ # Grab tree generated by the superclass constructor and delete it from
+ # the artifact.
+ tree = self.tree
+ # TODO: uncomment line below
+ # del self.tree
+
+ # Dumb copy, but better be explicit. Cleverness is not future-proof.
+ self.name = tree['name']
+ self.imports = importer.import_files(tree['imports'])
+ self.sequences = tree['sequences']
+ self.dictionaries = tree['dictionaries']
+ self.enums = tree['enums']
+ self.exceptions = tree['exceptions']
+ self.interfaces = tree['interfaces']
+ self.structs = tree['structs']
diff --git a/robogen/extras/plugins/interface/parser.py b/robogen/extras/plugins/interface/parser.py
new file mode 100644
index 00000000..2e7d8f8e
--- /dev/null
+++ b/robogen/extras/plugins/interface/parser.py
@@ -0,0 +1,195 @@
+from pyparsing import (
+ CaselessKeyword, CharsNotIn, Group, Optional, QuotedString, Suppress, Word,
+ ZeroOrMore, alphanums, alphas, cppStyleComment, delimitedList,
+ pythonStyleComment
+)
+
+#
+# Keywords
+#
+
+DICTIONARY = CaselessKeyword('dictionary')
+ENUM = CaselessKeyword('enum')
+EXCEPTION = CaselessKeyword('exception')
+IDEMPOTENT = CaselessKeyword('idempotent')
+IMPORT = CaselessKeyword('import')
+INTERFACE = CaselessKeyword('interface')
+MODULE = CaselessKeyword('module')
+OUT = CaselessKeyword('out')
+SEQUENCE = CaselessKeyword('sequence')
+STRUCT = CaselessKeyword('struct')
+THROWS = CaselessKeyword('throws')
+
+#
+# Symbols
+#
+
+COMMA = Suppress(Word(','))
+SC = Suppress(Word(';'))
+OBRACE = Suppress(Word('{'))
+CBRACE = Suppress(Word('}'))
+OPARENS = Suppress(Word('('))
+CPARENS = Suppress(Word(')'))
+LT = Suppress(Word('<'))
+GT = Suppress(Word('>'))
+EQ = Suppress(Word('='))
+
+ident = Word(alphas + '_', alphanums + '_')
+type_ident = delimitedList(ident, delim='::').setParseAction('::'.join)
+
+inport = (
+ Suppress(IMPORT)
+ - QuotedString('"')
+ - SC
+)
+
+member = Group(
+ type_ident('type')
+ - ident('name')
+ - Optional(EQ - CharsNotIn(';')('default_value'))
+ - SC
+)
+
+struct = Group(
+ STRUCT.setParseAction(lambda: 'structs')('category')
+ - ident('name')
+ - Group(
+ OBRACE
+ - ZeroOrMore(member)
+ - CBRACE
+ )('contents')
+ - SC
+)
+
+dictionary = Group(
+ DICTIONARY.setParseAction(lambda: 'dictionaries')('category')
+ - Group(
+ LT
+ - type_ident('from')
+ - COMMA
+ - type_ident('to')
+ - GT
+ )('contents')
+ - ident('name')
+ - SC
+)
+
+sequence = Group(
+ SEQUENCE.setParseAction(lambda: 'sequences')('category')
+ - Group(
+ LT
+ - ident('type')
+ - GT
+ )('contents')
+ - ident('name')
+ - SC
+)
+
+enum = Group(
+ ENUM.setParseAction(lambda: 'enums')('category')
+ - ident('name')
+ - Group(
+ OBRACE
+ - delimitedList(ident)
+ - CBRACE
+ )('contents')
+ - SC
+)
+
+exception = Group(
+ EXCEPTION.setParseAction(lambda: 'exceptions')('category')
+ - ident('name')
+ - OBRACE
+ - Group(ZeroOrMore(member))('contents')
+ - CBRACE
+ - SC
+)
+
+throws = (
+ Suppress(THROWS)
+ - delimitedList(type_ident)
+)
+
+# TODO: booleans not working?
+
+parameter = Group(
+ Optional(OUT.setParseAction(lambda: True))('is_output')
+ + type_ident('type')
+ + ident('name')
+)
+
+remote_method = Group(
+ Optional(IDEMPOTENT.setParseAction(lambda: True))('is_idempotent')
+ + type_ident('return_type')
+ + ident('name')
+ + OPARENS
+ + Optional(delimitedList(parameter))('params')
+ + CPARENS
+ + Optional(throws)('throws')
+ + SC
+)
+
+interface = Group(
+ INTERFACE.setParseAction(lambda: 'interfaces')('category')
+ - ident('name')
+ - OBRACE
+ - Group(ZeroOrMore(remote_method))('contents')
+ - CBRACE
+ - SC
+)
+
+contents = Group(ZeroOrMore(
+ struct
+ | enum
+ | exception
+ | dictionary
+ | sequence
+ | interface
+))
+
+#
+# Top-level parser
+#
+
+IDSL = (
+ ZeroOrMore(inport)('imports')
+ + Suppress(MODULE)
+ - ident('name')
+ - OBRACE
+ - contents('contents')
+ - CBRACE
+ - SC
+).ignore(cppStyleComment | pythonStyleComment)
+
+class IDSLParser:
+ def parse(self, src):
+ result = IDSL.parseString(src).asDict()
+
+ result['structs'] = {}
+ result['enums'] = {}
+ result['exceptions'] = {}
+ result['dictionaries'] = {}
+ result['sequences'] = {}
+ result['interfaces'] = {}
+
+ # Rearrange contents
+ for item in result['contents']:
+ name = item['name']
+ category = item['category']
+ contents = item['contents']
+
+ result[category][name] = contents
+
+ del result['contents']
+
+ # Rearrange interface methods
+ for name, methods in result['interfaces'].items():
+ new_methods = {}
+
+ for method in methods:
+ new_methods[method['name']] = method
+ del method['name']
+
+ result['interfaces'][name] = new_methods
+
+ return result
diff --git a/robogen/extras/plugins/toml.py b/robogen/extras/plugins/toml.py
new file mode 100644
index 00000000..aa4b6543
--- /dev/null
+++ b/robogen/extras/plugins/toml.py
@@ -0,0 +1,15 @@
+__name__ = 'toml'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+import toml
+import robogen
+
+class TOMLParser(robogen.Parser):
+ def parse(self, src):
+ return toml.loads(src)
+
+class Plugin:
+ @classmethod
+ def install(klass, installer):
+ installer.install_parser('toml', TOMLParser, 'toml')
diff --git a/robogen/extras/plugins/yaml.py b/robogen/extras/plugins/yaml.py
new file mode 100644
index 00000000..492a1fbf
--- /dev/null
+++ b/robogen/extras/plugins/yaml.py
@@ -0,0 +1,15 @@
+__name__ = 'yaml'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+import yaml
+import robogen
+
+class YAMLParser(robogen.Parser):
+ def parse(self, src):
+ return yaml.safe_load(src)
+
+class Plugin:
+ @classmethod
+ def install(klass, installer):
+ installer.install_parser('yaml', YAMLParser, 'yaml')
diff --git a/robogen/extras/samples/Bar.i.yaml b/robogen/extras/samples/Bar.i.yaml
new file mode 100644
index 00000000..281eaf18
--- /dev/null
+++ b/robogen/extras/samples/Bar.i.yaml
@@ -0,0 +1 @@
+name: Bar
\ No newline at end of file
diff --git a/robogen/extras/samples/Baz.i.yaml b/robogen/extras/samples/Baz.i.yaml
new file mode 100644
index 00000000..6a4f142a
--- /dev/null
+++ b/robogen/extras/samples/Baz.i.yaml
@@ -0,0 +1 @@
+name: Baz
\ No newline at end of file
diff --git a/robogen/extras/samples/Foo.idsl b/robogen/extras/samples/Foo.idsl
new file mode 100644
index 00000000..f7ed2ba7
--- /dev/null
+++ b/robogen/extras/samples/Foo.idsl
@@ -0,0 +1,4 @@
+import "Baz.i.yaml";
+
+Module Foo {
+};
\ No newline at end of file
diff --git a/robogen/extras/samples/MyFirstAgent.adsl b/robogen/extras/samples/MyFirstAgent.adsl
new file mode 100644
index 00000000..d33fcf9f
--- /dev/null
+++ b/robogen/extras/samples/MyFirstAgent.adsl
@@ -0,0 +1,15 @@
+import "DifferentialRobot.idsl";
+import "Laser.idsl";
+
+Agent MyFirstAgent {
+ Language python;
+ Subscriptions
+ @foo,
+ |bar;
+ Communications {
+ robocomp {
+ Requires DifferentialRobot, Laser;
+ };
+ };
+};
+
diff --git a/robogen/extras/samples/xxx.a.yaml b/robogen/extras/samples/xxx.a.yaml
new file mode 100644
index 00000000..5a7e3e80
--- /dev/null
+++ b/robogen/extras/samples/xxx.a.yaml
@@ -0,0 +1,26 @@
+name: path_planner_astar
+language: python
+qtdebug: true
+
+imports:
+ - Foo.idsl
+ - ../samples/Bar.i.yaml
+
+ignore_attrs:
+ - cam_rgb
+ - cam_depth
+
+apis:
+ - inner_eigen
+ - cam
+ - rt
+
+subscriptions:
+
+ nodes:
+ - node_x
+
+ edges:
+ - edge_y
+
+communications: {}
diff --git a/robogen/extras/samples/xxx.adsl b/robogen/extras/samples/xxx.adsl
new file mode 100644
index 00000000..40cf746f
--- /dev/null
+++ b/robogen/extras/samples/xxx.adsl
@@ -0,0 +1,23 @@
+import "Foo.idsl";
+import "../samples/Bar.i.yaml";
+
+Agent path_planner_astar {
+ Language python;
+ Qtdebug;
+ Apis
+ inner_eigen,
+ cam,
+ rt;
+ IgnoreAttrs
+ cam_rgb,
+ cam_depth;
+ Subscriptions
+ @node_x,
+ |edge_y;
+ Communications {
+ robocomp {
+ Implements Foo;
+ SubscribesTo Bar;
+ };
+ };
+};
diff --git a/robogen/extras/vault/component.py b/robogen/extras/vault/component.py
new file mode 100644
index 00000000..e69de29b
diff --git a/robogen/extras/vault/component_artifact/__init__.py b/robogen/extras/vault/component_artifact/__init__.py
new file mode 100644
index 00000000..d1cea268
--- /dev/null
+++ b/robogen/extras/vault/component_artifact/__init__.py
@@ -0,0 +1,12 @@
+__name__ = 'component_artifact'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+from .parser import CDSLParser
+from .artifact import ComponentArtifact
+
+class Plugin:
+ @classmethod
+ def install(klass, installer):
+ installer.install_parser('cdsl', CDSLParser, {'cdsl'})
+ installer.install_artifact('component', ComponentArtifact, markers={'c'}, extensions={'cdsl'})
diff --git a/robogen/extras/vault/component_artifact/artifact.py b/robogen/extras/vault/component_artifact/artifact.py
new file mode 100644
index 00000000..35e2a693
--- /dev/null
+++ b/robogen/extras/vault/component_artifact/artifact.py
@@ -0,0 +1,58 @@
+from schema import Schema, And, Or, Optional, Use
+import robogen
+import os
+
+COMM_ITEM = Schema({
+ 'name': str,
+ Optional('type', default='ice'): And(Use(str.lower), Or('ros', 'ice')),
+})
+
+class ComponentArtifact(robogen.Artifact):
+ options = set()
+ extensions = {
+ 'root': {}
+ }
+
+ @classmethod
+ def schema(klass):
+ return Schema({
+ 'name': str,
+ 'language': str,
+ Optional('imports', default=[]): [str],
+ Optional('options', default=[]): [And(Use(str.lower), Or(*klass.options))],
+ Optional('implements', default=[]): [COMM_ITEM],
+ Optional('requires', default=[]): [COMM_ITEM],
+ Optional('publishes', default=[]): [COMM_ITEM],
+ Optional('subscribes_to', default=[]): [COMM_ITEM],
+
+ # Unpack extensions
+ **{keyword: schema for keyword, schema in klass.extensions['root'].items()}
+ })
+
+ @classmethod
+ def extend_schema(klass, anchor, keyword, schema):
+ klass.extensions[anchor][keyword] = schema
+
+ def __init__(self, raw):
+ super().__init__(raw)
+
+ self.name = self.tree['name']
+ self.language = self.tree['language']
+ self.options = [option.lower for option in self.tree['options']]
+ self.imports = [*map(os.path.basename, sorted(self.tree['imports']))]
+ self.recursive_imports = []
+
+ self.interfaces = {'ros': [], 'ice': []}
+ self.implements = []
+ self.requires = []
+ self.publishes = []
+ self.subscribes_to = []
+ self.using_ros = False
+
+ for comm_type in {'implements', 'requires', 'publishes', 'subscribes_to'}:
+ if comm_type not in self.tree:
+ continue
+
+ for interface in sorted(self.tree[comm_type], key=lambda comm: comm['name']):
+ getattr(self, comm_type).append(interface['name'])
+ self.interfaces[interface['type']].append(interface)
diff --git a/robogen/extras/vault/component_artifact/parser.py b/robogen/extras/vault/component_artifact/parser.py
new file mode 100644
index 00000000..242a2d4b
--- /dev/null
+++ b/robogen/extras/vault/component_artifact/parser.py
@@ -0,0 +1,99 @@
+from functools import reduce
+from pyparsing import (
+ CaselessKeyword, Group, Optional, QuotedString, Suppress, Word, ZeroOrMore,
+ alphanums, alphas, cppStyleComment, delimitedList, pythonStyleComment
+)
+
+import robogen
+
+#
+# Symbols
+#
+
+SC = Suppress(Word(';'))
+OBRACE = Suppress(Word('{'))
+CBRACE = Suppress(Word('}'))
+OPARENS = Suppress(Word('('))
+CPARENS = Suppress(Word(')'))
+
+#
+# Keywords
+#
+
+COMMUNICATIONS = CaselessKeyword('communications')
+COMPONENT = CaselessKeyword('component')
+IMPLEMENTS = CaselessKeyword('implements')
+IMPORT = CaselessKeyword('import')
+LANGUAGE = CaselessKeyword('language')
+OPTIONS = CaselessKeyword('options')
+PUBLISHES = CaselessKeyword('publishes')
+REQUIRES = CaselessKeyword('requires')
+SUBSCRIBES_TO = CaselessKeyword('subscribesTo')
+
+identifier = Word(alphas + '_', alphanums + '_')
+inport = Suppress(IMPORT) - QuotedString('"') - SC
+comm_type = OPARENS - identifier - CPARENS
+
+comm = Group(
+ identifier('name')
+ + Optional(comm_type('type'))
+)
+
+implements = Suppress(IMPLEMENTS) - delimitedList(comm) - SC
+requires = Suppress(REQUIRES) - delimitedList(comm) - SC
+subscribes_to = Suppress(SUBSCRIBES_TO) - delimitedList(comm) - SC
+publishes = Suppress(PUBLISHES) - delimitedList(comm) - SC
+
+communication_list = (
+ Optional(implements('implements'))
+ & Optional(requires('requires'))
+ & Optional(subscribes_to('suscribes_to'))
+ & Optional(publishes('publishes'))
+)
+
+communications = Group(
+ Suppress(COMMUNICATIONS)
+ - OBRACE
+ - communication_list
+ - CBRACE
+ - SC
+)
+
+language = Suppress(LANGUAGE) - identifier - SC
+
+options = Suppress(OPTIONS) - delimitedList(identifier) - SC
+
+# TODO: things come in arrays and I don't know why? Ugly hack ahead, beware.
+
+contents = (
+ communications.setParseAction(lambda x: x[0])
+ & language('language').setParseAction(lambda x: x[0])
+ & Optional(options('options'))
+)
+
+#
+# Top-level parser
+#
+
+CDSL = lambda ext: (
+ ZeroOrMore(inport)('imports')
+ - Suppress(COMPONENT)
+ - identifier('name')
+ - OBRACE
+ # Unpack extensions
+ - reduce(lambda acc, next: acc & create_item(*next) , ext.items(), contents)
+ - CBRACE
+ - SC
+).ignore(cppStyleComment | pythonStyleComment)
+
+class CDSLParser(robogen.Parser):
+ extensions = {
+ 'root': set(),
+ }
+
+ @classmethod
+ def extend_syntax(klass, anchor, syntax):
+ klass.extensions[anchor].add(syntax)
+
+ def parse(self, src):
+ return CDSL(CDSLParser.extensions).parseString(src).asDict()
diff --git a/robogen/extras/vault/dsr.py b/robogen/extras/vault/dsr.py
new file mode 100644
index 00000000..eb4e16f0
--- /dev/null
+++ b/robogen/extras/vault/dsr.py
@@ -0,0 +1,8 @@
+__name__ = 'dsr'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+class Plugin:
+ @classmethod
+ def install(klass, installer):
+ installer.install_option('dsr')
diff --git a/robogen/extras/vault/gui.py b/robogen/extras/vault/gui.py
new file mode 100644
index 00000000..c429dd29
--- /dev/null
+++ b/robogen/extras/vault/gui.py
@@ -0,0 +1,30 @@
+__name__ = 'gui'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+from schema import Schema, And, Or, Use
+from pyparsing import CaselessKeyword, Suppress, Word, alphanums, alphas
+
+GUI = CaselessKeyword('gui')
+
+identifier = Word(alphas + '_', alphanums + '_')
+
+CDSL = (
+ Suppress(GUI)
+ - identifier('type')
+ - Suppress(Word('('))
+ - identifier('widget')
+ - Suppress(Word(')'))
+)
+
+SCHEMA = Schema({
+ 'type': And(Use(str.lower), 'qt'),
+ 'widget': Or('QWidget', 'QMainWindow', 'QDialog'),
+})
+
+class Plugin:
+ '''Automates the generation of GUI.'''
+
+ @classmethod
+ def install(klass, manager):
+ manager.install_input_element('gui', SCHEMA, cdsl=CDSL)
diff --git a/robogen/extras/vault/idsl.py b/robogen/extras/vault/idsl.py
new file mode 100644
index 00000000..a832c9ca
--- /dev/null
+++ b/robogen/extras/vault/idsl.py
@@ -0,0 +1,198 @@
+from pyparsing import (
+ CaselessKeyword, CharsNotIn, Group, Optional, QuotedString, Suppress, Word,
+ ZeroOrMore, alphanums, alphas, cppStyleComment, delimitedList,
+ pythonStyleComment
+)
+
+#
+# Keywords
+#
+
+DICTIONARY = CaselessKeyword('dictionary')
+ENUM = CaselessKeyword('enum')
+EXCEPTION = CaselessKeyword('enum')
+IDEMPOTENT = CaselessKeyword('idempotent')
+IMPORT = CaselessKeyword('import')
+INTERFACE = CaselessKeyword('interface')
+MODULE = CaselessKeyword('module')
+OUT = CaselessKeyword('out')
+SEQUENCE = CaselessKeyword('sequence')
+STRUCT = CaselessKeyword('struct')
+THROWS = CaselessKeyword('throws')
+
+#
+# Symbols
+#
+
+COMMA = Suppress(Word(','))
+SC = Suppress(Word(';'))
+OBRACE = Suppress(Word('{'))
+CBRACE = Suppress(Word('}'))
+OPARENS = Suppress(Word('('))
+CPARENS = Suppress(Word(')'))
+LT = Suppress(Word('<'))
+GT = Suppress(Word('>'))
+EQ = Suppress(Word('='))
+
+identifier = Word(alphas + '_', alphanums + '_')
+type_identifier = delimitedList(identifier, delim='::').setParseAction('::'.join)
+
+inport = (
+ Suppress(IMPORT)
+ - QuotedString('"')
+ - SC
+)
+
+member = Group(
+ type_identifier('type')
+ + identifier('name')
+ + Optional(EQ - CharsNotIn(';')('default_value'))
+ + SC
+)
+
+struct = Group(
+ STRUCT.setParseAction(lambda: 'structs')('category')
+ - identifier('name')
+ - Group(
+ OBRACE
+ - ZeroOrMore(member)
+ - CBRACE
+ )('contents')
+ - SC
+)
+
+dictionary = Group(
+ DICTIONARY.setParseAction(lambda: 'dictionaries')('category')
+ - Group(
+ LT
+ - type_identifier('from')
+ - COMMA
+ - type_identifier('to')
+ - GT
+ )('contents')
+ - identifier('name')
+ - SC
+)
+
+sequence = Group(
+ SEQUENCE.setParseAction(lambda: 'sequences')('category')
+ - Group(
+ LT
+ - identifier('type')
+ - GT
+ )('contents')
+ - identifier('name')
+ - SC
+)
+
+enum = Group(
+ ENUM.setParseAction(lambda: 'enums')('category')
+ - identifier('name')
+ - Group(
+ OBRACE
+ - delimitedList(identifier)
+ - CBRACE
+ )('contents')
+ - SC
+)
+
+exception = Group(
+ EXCEPTION.setParseAction(lambda: 'exceptions')('category')
+ - identifier('name')
+ - Group(
+ OBRACE
+ - ZeroOrMore(member)
+ - CBRACE
+ )('contents')
+ - SC
+)
+
+throws = (
+ Suppress(THROWS)
+ - delimitedList(identifier)
+)
+
+# TODO: booleans not working?
+
+parameter = Group(
+ Optional(OUT.setParseAction(lambda: True))('is_output')
+ + type_identifier('type')
+ + identifier('name')
+)
+
+remote_method = Group(
+ Optional(IDEMPOTENT.setParseAction(lambda: True))('is_idempotent')
+ + type_identifier('return_type')
+ + identifier('name')
+ + OPARENS
+ + Optional(delimitedList(parameter))('params')
+ + CPARENS
+ + Optional(throws)('throws')
+ + SC
+)
+
+interface = Group(
+ INTERFACE.setParseAction(lambda: 'interfaces')('category')
+ - identifier('name')
+ - Group(
+ OBRACE
+ + ZeroOrMore(remote_method)
+ + CBRACE
+ )('contents')
+ - SC
+)
+
+contents = Group(ZeroOrMore(
+ struct
+ | enum
+ | exception
+ | dictionary
+ | sequence
+ | interface
+))
+
+#
+# Top-level parser
+#
+
+IDSL = (
+ ZeroOrMore(inport)('imports')
+ + Suppress(MODULE)
+ - identifier('name')
+ - OBRACE
+ - contents('contents')
+ - CBRACE
+ - SC
+).ignore(cppStyleComment | pythonStyleComment)
+
+class IDSLParser:
+ def parse(self, src):
+ result = IDSL.parseString(src).asDict()
+ result['structs'] = {}
+ result['enums'] = {}
+ result['exceptions'] = {}
+ result['dictionaries'] = {}
+ result['sequences'] = {}
+ result['interfaces'] = {}
+
+ # Rearrange contents
+ for item in result['contents']:
+ name = item['name']
+ category = item['category']
+ contents = item['contents']
+
+ result[category][name] = contents
+
+ del result['contents']
+
+ # Rearrange interface methods
+ for name, methods in result['interfaces'].items():
+ new_methods = {}
+
+ for method in methods:
+ new_methods[method['name']] = method
+ del method['name']
+
+ result['interfaces'][name] = new_methods
+
+ return result
diff --git a/robogen/extras/vault/module.py b/robogen/extras/vault/module.py
new file mode 100644
index 00000000..467ffa1b
--- /dev/null
+++ b/robogen/extras/vault/module.py
@@ -0,0 +1,52 @@
+from schema import Schema, Optional
+from .artifact import Artifact
+
+class Module(Artifact):
+ SCHEMA = Schema({
+ 'name': str,
+ Optional('imports', default=[]): [str],
+ Optional('sequences', default={}): {
+ Optional(str): {
+ 'type': str,
+ },
+ },
+ Optional('dictionaries', default={}): {
+ Optional(str): {
+ 'from': str,
+ 'to': str,
+ },
+ },
+ Optional('enums', default={}): {
+ Optional(str): [str],
+ },
+ Optional('exceptions', default={}): {
+ Optional(str): [{
+ 'name': str,
+ 'type': str,
+ }],
+ },
+ Optional('interfaces', default={}): {
+ Optional(str): {
+ Optional(str): {
+ Optional('return_type', default='void'): str,
+ Optional('is_idempotent', default=False): bool,
+ Optional('params', default=[]): [{
+ 'name': str,
+ 'type': str,
+ Optional('is_output', default=False): bool,
+ }],
+ }
+ },
+ },
+ Optional('structs', default={}): {
+ Optional(str): [{
+ 'name': str,
+ 'type': str,
+ Optional('default_value'): str,
+ }],
+ },
+ })
+
+ def __init__(self, raw):
+ validated = Module.validate(raw)
+ super().__init__(validated)
diff --git a/robogen/extras/vault/smdsl.py b/robogen/extras/vault/smdsl.py
new file mode 100644
index 00000000..efe835e2
--- /dev/null
+++ b/robogen/extras/vault/smdsl.py
@@ -0,0 +1,87 @@
+from pyparsing import (
+ CaselessKeyword, CaselessLiteral, Group, Optional, ZeroOrMore, Suppress,
+ Word, alphanums, alphas, delimitedList, cppStyleComment,
+ pythonStyleComment
+)
+
+#
+# Keywords
+#
+
+TRANSITIONS = CaselessKeyword('transitions')
+INITIAL_STATE = CaselessKeyword('initial_state')
+END_STATE = CaselessKeyword('end_state')
+STATES = CaselessKeyword('states')
+PARALLEL = CaselessKeyword('parallel')
+
+#
+# Symbols
+#
+
+SC = Suppress(Word(';'))
+OBRACE = Suppress(Word('{'))
+CBRACE = Suppress(Word('}'))
+TO = Suppress(CaselessLiteral('=>') | CaselessLiteral('⇒'))
+
+identifier = Word(alphas + '_', alphanums + '_')
+
+states = (
+ Suppress(STATES)
+ - delimitedList(identifier)
+ - SC
+)
+
+transition = Group(
+ identifier('from')
+ + TO
+ - delimitedList(identifier)('to')
+ - SC
+)
+
+transitions = (
+ Suppress(TRANSITIONS)
+ - OBRACE
+ - ZeroOrMore(transition)
+ - CBRACE
+ - SC
+)
+
+initial_state = Suppress(INITIAL_STATE) - identifier - SC
+final_state = Suppress(END_STATE) - identifier - SC
+
+# TODO: things come in arrays and I don't know why? Ugly hack ahead, beware.
+
+contents = (
+ states('states')
+ & initial_state('initial_state').setParseAction(lambda x: x[0])
+ & final_state('final_state').setParseAction(lambda x: x[0])
+ & transitions('transitions')
+)
+
+parent = Suppress(Word(':')) + identifier
+
+substate = (
+ parent('parent')
+ - Optional(PARALLEL('parallel').setParseAction(lambda: True))
+ - OBRACE
+ - contents
+ - CBRACE
+ - SC
+)
+
+#
+# Top-level parser
+#
+
+SMDSL = (
+ identifier('name')
+ - OBRACE
+ - contents
+ - CBRACE
+ - SC
+ - Group(ZeroOrMore(substate))('substates')
+).ignore(cppStyleComment | pythonStyleComment)
+
+class SMDSLParser:
+ def parse(self, src):
+ return SMDSL.parseString(src).asDict()
diff --git a/robogen/extras/vault/state_machine_artifact.py b/robogen/extras/vault/state_machine_artifact.py
new file mode 100644
index 00000000..7ff10033
--- /dev/null
+++ b/robogen/extras/vault/state_machine_artifact.py
@@ -0,0 +1,32 @@
+from schema import Schema, Optional
+from .artifact import Artifact
+
+# TODO: avoid redundancy?
+
+class StateMachine(Artifact):
+ # TODO: substates
+ SCHEMA = Schema({
+ 'name': str,
+
+ 'states': [str],
+ 'initial_state': str,
+ 'final_state': str,
+ 'transitions': [{
+ 'from': str,
+ 'to': [str],
+ }],
+ Optional('substates', default=[]): [{
+ 'parent': str,
+ 'states': [str],
+ 'initial_state': str,
+ 'final_state': str,
+ 'transitions': [{
+ 'from': str,
+ 'to': [str],
+ }],
+ }],
+ })
+
+ def __init__(self, raw):
+ validated = StateMachine.validate(raw)
+ super().__init__(validated)
diff --git a/robogen/extras/vault/state_machine_plugin.py b/robogen/extras/vault/state_machine_plugin.py
new file mode 100644
index 00000000..9e62ae3d
--- /dev/null
+++ b/robogen/extras/vault/state_machine_plugin.py
@@ -0,0 +1,27 @@
+__name__ = 'state_machine'
+__version__ = '0.1.0'
+__author__ = 'José Miguel Sánchez García'
+
+from pyparsing import CaselessKeyword, Optional as ParserOptional, QuotedString, Suppress
+from schema import Schema, Optional as SchemaOptional
+
+STATE_MACHINE = CaselessKeyword('statemachine')
+VISUAL = CaselessKeyword('visual')
+
+CDSL = (
+ Suppress(STATE_MACHINE)
+ - QuotedString('"')('path')
+ - ParserOptional(VISUAL('is_visual').setParseAction(lambda: True))
+)
+
+SCHEMA = Schema({
+ 'path': str,
+ SchemaOptional('is_visual', default=False): bool,
+})
+
+class Plugin:
+ '''Automates the generation of state machines.'''
+
+ @classmethod
+ def install(klass, manager):
+ manager.install_input_element('state_machine', SCHEMA, cdsl=CDSL)
diff --git a/robogen/requirements.txt b/robogen/requirements.txt
new file mode 100644
index 00000000..b1256134
--- /dev/null
+++ b/robogen/requirements.txt
@@ -0,0 +1,6 @@
+pyparsing~=2.4.7
+PyYAML~=6.0
+schema~=0.7.5
+semver~=2.13.0
+toml~=0.10.2
+typer~=0.3.2
diff --git a/robogen/setup.py b/robogen/setup.py
new file mode 100644
index 00000000..50199756
--- /dev/null
+++ b/robogen/setup.py
@@ -0,0 +1,15 @@
+from setuptools import setup, find_packages
+
+setup(
+ name='robogen',
+ version='0.1.0',
+ include_package_data=True,
+ package_dir={'': 'src'},
+ packages=find_packages(where='src'),
+ url='https://github.com/jmi2k/robogen',
+ author='José Miguel Sánchez García',
+ author_email='soy.jmi2k@gmail.com',
+ description='Code generation tool for RoboComp',
+ entry_points={'console_scripts': ['robogen = robogen.main:app']},
+)
+
diff --git a/robogen/src/robogen/__init__.py b/robogen/src/robogen/__init__.py
new file mode 100644
index 00000000..aae37037
--- /dev/null
+++ b/robogen/src/robogen/__init__.py
@@ -0,0 +1,4 @@
+from .artifact import Artifact
+from .generator import Generator
+from .parser import Parser
+from .populator import Populator
diff --git a/robogen/src/robogen/artifact.py b/robogen/src/robogen/artifact.py
new file mode 100644
index 00000000..972bce94
--- /dev/null
+++ b/robogen/src/robogen/artifact.py
@@ -0,0 +1,150 @@
+from .parser import ParserManager
+from sys import exit, stderr
+import os
+import traceback
+
+class Artifact:
+ '''Entity which conforms to certain rules and can generate some output.'''
+
+ def __init__(self, raw, importer):
+ self.tree = self.__class__.validate(raw)
+
+ if hasattr(self.__class__, 'ctor_extensions'):
+ for function in self.__class__.ctor_extensions:
+ function(self, importer)
+
+ def __repr__(self):
+ return str(self.__dict__)
+
+ @classmethod
+ def extend_schema(klass, anchor, *args, **kwargs):
+ raise NotImplementedError
+
+ @classmethod
+ def extend_ctor(klass, function):
+ if not hasattr(klass, 'ctor_extensions'):
+ setattr(klass, 'ctor_extensions', [])
+ klass.ctor_extensions.insert(0, function)
+
+ @classmethod
+ def schema(klass):
+ return klass.SCHEMA
+
+ @classmethod
+ def validate(klass, raw):
+ return klass.schema().validate(raw)
+
+class ArtifactManager:
+ '''Middleware which takes care of finding and adding artifacts.'''
+
+ artifacts = {}
+ markers = {}
+ extensions = {}
+
+ @staticmethod
+ def analyze_extension(filename):
+ '''Extract parser and artifact from file name extension.'''
+
+ parts = filename.rsplit('.', maxsplit=2)
+ parser = None
+ artifact = None
+
+ if len(parts) == 2: # filename.x
+ parser = ParserManager.find_parser_by_extension(parts[-1])
+ artifact = ArtifactManager.find_artifact_by_marker(parts[-1])
+ elif len(parts) == 3: # filename.x.y
+ parser = ParserManager.find_parser_by_extension(parts[-1])
+ artifact = ArtifactManager.find_artifact_by_extension(parts[-2])
+
+ return artifact, parser
+
+ @classmethod
+ def load_artifact(klass, input_file, importer=None):
+ with open(input_file) as f:
+ src = f.read()
+
+ filename = os.path.basename(input_file)
+ artifact, parser = ArtifactManager.analyze_extension(filename)
+
+ if not importer:
+ importer = ImportManager()
+
+ try:
+ return artifact(parser().parse(src), importer)
+ except:
+ traceback.print_exc()
+ print(f"🛑 Error parsing `{input_file}'!", file=stderr)
+ exit(-1)
+
+ @classmethod
+ def find_artifact_by_name(klass, name):
+ return klass.artifacts.get(name)
+
+ @classmethod
+ def add_artifact(klass, name, artifact, markers={}, extensions={}):
+ klass.artifacts[name] = artifact
+ klass.markers.update({marker: artifact for marker in markers})
+ klass.markers.update({extension: artifact for extension in extensions})
+
+ @classmethod
+ def find_artifact_by_marker(klass, marker):
+ return klass.markers.get(marker)
+
+ @classmethod
+ def find_artifact_by_extension(klass, extension):
+ return klass.markers.get(extension)
+
+class ImportManager:
+ import_paths = []
+
+ @classmethod
+ def add_path(klass, path):
+ klass.import_paths.insert(0, path)
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.cache = parent.cache if parent else {}
+ self.artifacts = {}
+ self.branches = set()
+
+ def __iter__(self):
+ return iter(self.artifacts)
+
+ def __get__(self, key):
+ return self.artifacts[key]
+
+ @property
+ def recursive_imports(self):
+ subimports = {key: artifact for branch in self.branches for key, artifact in branch.recursive_imports.items()}
+ return {**self.artifacts, **subimports}
+
+ def import_files(self, imports):
+ for imp in imports:
+ try:
+ absolute_paths = [os.path.join(path, imp) for path in self.import_paths]
+ found_path = next(path for path in absolute_paths if os.path.exists(path))
+ canonical_path = os.path.realpath(found_path)
+ except StopIteration:
+ print(f"🛑 `{imp}' could not be imported!", file=stderr)
+ exit(-1)
+
+ # Try to retrieve artifact from the cache
+ artifact = self.cache.get(canonical_path)
+
+ # Artifact was not cached, load from file and cache it
+ if not artifact:
+ branch = ImportManager(self)
+ artifact = ArtifactManager.load_artifact(canonical_path, branch)
+ self.branches.add(branch)
+ self.cache[canonical_path] = artifact
+
+ self.artifacts[imp] = artifact
+
+ return self
+
+ def module_providing_interface(self, interface_name):
+ return next(
+ artifact
+ for artifact in self.recursive_imports.values()
+ if interface_name in artifact.interfaces
+ )
diff --git a/robogen/src/robogen/generator.py b/robogen/src/robogen/generator.py
new file mode 100644
index 00000000..c55a8d4e
--- /dev/null
+++ b/robogen/src/robogen/generator.py
@@ -0,0 +1,221 @@
+import importlib
+import os
+import re
+import shlex
+import subprocess
+from string import Template
+from sys import exit, stderr
+
+from .artifact import ArtifactManager
+
+class FileEntry:
+ def __init__(self, output, populators):
+ self.output = output
+ self.populators = populators
+
+ def insert_populator(self, populator):
+ self.populators.insert(0, populator)
+
+class Generator:
+ @classmethod
+ def install_extra_populator(klass, matcher, populator):
+ for file, entry in klass.TREE.items():
+ if matcher(file):
+ entry.insert_populator(populator)
+
+ @classmethod
+ def install_default_file_tree(klass):
+ tree = {}
+
+ for file in klass.TREE:
+ source = f'{klass.PATH}/populators/{file}.py'
+
+ # Dummy plug for files without a populator
+ if not os.path.exists(source):
+ tree[file] = []
+ continue
+
+ spec = importlib.util.spec_from_file_location('', source)
+ module = importlib.util.module_from_spec(spec)
+
+ spec.loader.exec_module(module)
+ tree[file] = FileEntry(file, [module.Populator])
+
+ klass.TREE = tree
+
+ def tree(self, artifact):
+ return self.TREE
+
+ def generate_files(self, artifact, output_path):
+ diff_files = {}
+
+ for file, entry in self.tree(artifact).items():
+ source = f'{self.PATH}/files/{file}'
+ target = f'{output_path}/{entry.output}'
+
+ os.makedirs(os.path.dirname(target), exist_ok=True)
+
+ # Avoid overwriting file if marked in KEEP
+ if os.path.exists(target) and file in self.KEEP:
+ old_file = target
+ target = f'{target}.new'
+ diff_files[old_file] = target
+
+ # Read input template
+ with open(source, 'r') as f:
+ result = f.read()
+
+ # Run all populators through it
+ for populator in entry.populators:
+ template = IndentedTemplate(result.replace('$$', '$$$$'))
+ fields = populator(file, artifact).populate()
+ result = template.substitute(fields)
+
+ # Simple dummy class which will return '' for any subscript attempt
+ # This lets the template clear all leftover hooks.
+ class Dummy:
+ def __getitem__(self, key):
+ return ''
+
+ def get(self, key, default=None):
+ return ''
+
+ # Cleanup unused hooks
+ result = IndentedTemplate(result).substitute(Dummy())
+
+ # Write generated code
+ with open(target, 'w') as f:
+ f.write(result)
+
+ print(f"\t{file} → {target}", file=stderr)
+
+ return diff_files
+
+class GeneratorManager:
+ generators = {}
+ diff_tool = None
+
+ @classmethod
+ def set_diff_tool(klass, diff_tool):
+ klass.diff_tool = diff_tool
+
+ @classmethod
+ def find_generator(klass, artifact, language):
+ return klass.generators.get((artifact, language))
+
+ @classmethod
+ def add_generator(klass, artifact, language, generator):
+ artifact = ArtifactManager.find_artifact_by_name(artifact)
+ language = language.lower()
+ klass.generators[(artifact, language)] = generator
+
+ def generate_files(self, input_file, output_path):
+ # Initialize artifact
+ artifact = ArtifactManager.load_artifact(input_file)
+
+ name = getattr(artifact, 'name', None)
+ language = getattr(artifact, 'language', None)
+ generator = self.find_generator(artifact.__class__, language)
+
+ if not generator:
+ if not language:
+ print(f'🛑 No generator for artifact type found', file=stderr)
+ else:
+ print(f"🛑 No generator for artifact type and language `{language}' found", file=stderr)
+ exit(-1)
+
+ print(f"Generating artifact `{name}' for language `{language}' into `{output_path}'", file=stderr)
+ diff_files = generator().generate_files(artifact, output_path)
+
+ if diff_files:
+ print(f'\nFound {len(diff_files)} modified files:', file=stderr)
+
+ for file in diff_files:
+ print(f'\t{file}', file=stderr)
+
+ print(f"\nOld files kept intact, updated files saved with suffix `.new'", file=stderr)
+
+ if not self.diff_tool:
+ return
+
+ print(file=stderr)
+ for old_file, new_file in diff_files.items():
+ tool, *args = shlex.split(self.diff_tool)
+ subprocess.call([tool, *args, old_file, new_file])
+
+class IndentedTemplate(Template):
+ # TODO: copied straight from RoboCompDSL, maybe cleanup and tidy up the code?
+ delimiter = '$'
+ pattern = r'''
+ (?P[^$\n]*)\$(?:
+ (?P\$) | # Escape sequence of two delimiters
+ (?P[_a-z][_a-z0-9]*) | # delimiter and a Python identifier
+ {(?P[_a-z][_a-z0-9]*)} | # delimiter and a braced identifier
+ (?P) # Other ill-formed delimiter exprs
+ )
+ '''
+
+ def __init__(self, template, trimlines=True):
+ super(IndentedTemplate, self).__init__(template)
+ self.trimlines = trimlines
+
+ def substitute(*args, **kws):
+ if not args:
+ raise TypeError("descriptor 'substitute' of 'Template' object "
+ "needs an argument")
+ self, *args = args # allow the "self" keyword be passed
+ if len(args) > 1:
+ raise TypeError('Too many positional arguments')
+ if not args:
+ mapping = kws
+ elif kws:
+ mapping = ChainMap(kws, args[0])
+ else:
+ mapping = args[0]
+
+ def reindent(previous, string):
+ if previous.strip() == '':
+ out_lines = []
+ lines = string.splitlines()
+ if len(lines) > 0:
+ if self.trimlines:
+ if lines and lines[0].strip() == '':
+ del lines[0]
+ if lines and lines[-1].strip() == '':
+ del lines[-1]
+ for line in lines:
+ if line.strip() != '':
+ out_lines.append(previous + line)
+ else:
+ out_lines.append(line)
+ return '\n'.join(out_lines)
+ else:
+ return previous+string
+
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ converted = reindent(mo.group('previous'), str(mapping.get(named, f'${{{named}}}')))
+ if converted != '':
+ return converted
+ else:
+ return ""
+ if mo.group('escaped') is not None:
+ return mo.group('previous')+self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ substituted = self.pattern.sub(convert, self.template)
+ # The only way to remove extra lines that template leaves.
+ return re.sub('.*\n', '', substituted)
+
+ def identifiers(self):
+ identifiers = []
+ results = self.pattern.findall(self.template)
+ for result in results:
+ if result[3] != '' and result[3] not in identifiers:
+ identifiers.append(result[3])
+ return identifiers
diff --git a/robogen/src/robogen/main.py b/robogen/src/robogen/main.py
new file mode 100644
index 00000000..95caa816
--- /dev/null
+++ b/robogen/src/robogen/main.py
@@ -0,0 +1,60 @@
+#!/bin/python3
+
+import os
+import typer
+
+from typing import List
+
+from robogen.artifact import ImportManager
+from robogen.generator import GeneratorManager
+from robogen.plugin import PluginManager
+
+app = typer.Typer(help='''\
+This application creates components from a component description file or .ice
+files from interface description files.
+
+usage: {name} [OPTIONS] COMPONENT PATH
+
+Arguments:
+ COMPONENT Input file to drive the generation
+ PATH Destination for the generated files
+
+Options:
+ --plugin-dirs Set list of directories to look for plugins
+ --import-dirs Set list of directories to look for import files
+ --diff Diff `.new' files against their old counterparts''')
+
+@app.command()
+def generate(
+ input_file: str = typer.Argument(..., help='Input file to drive the generation'),
+ output_path: str = typer.Argument(..., help='Destination for the generated files'),
+ plugin_dirs: List[str] = typer.Option([], '--plugin-dir', '-P', help='Add directory to look for plugins'),
+ import_dirs: List[str] = typer.Option([], '--import-dir', '-I', help='Add directory to look for import files'),
+ diff_tool: str = typer.Option(None, '--diff', '-d', help="Diff `.new' files against their old counterparts")
+):
+ '''Entry point for the application.'''
+
+ for entry in plugin_dirs:
+ PluginManager.add_path(entry)
+
+ # Add plugin paths
+ PluginManager.add_path('/opt/robocomp/robogen/plugins')
+ if 'RG_PLUGIN_DIR' in os.environ:
+ PluginManager.add_path(os.environ['RG_PLUGIN_DIR'])
+ for plugin_dir in plugin_dirs:
+ PluginManager.add_path(plugin_dir)
+
+ # Add import paths
+ ImportManager.add_path('/opt/robocomp/interfaces/IDSLs')
+ if 'RG_IMPORT_DIR' in os.environ:
+ ImportManager.add_path(os.environ['RG_IMPORT_DIR'])
+ for import_dir in import_dirs:
+ ImportManager.add_path(import_dir)
+
+ # Run!
+ PluginManager.load_plugins()
+ GeneratorManager.set_diff_tool(diff_tool)
+ GeneratorManager().generate_files(input_file, output_path)
+
+if __name__ == '__main__':
+ app()
diff --git a/robogen/src/robogen/parser.py b/robogen/src/robogen/parser.py
new file mode 100644
index 00000000..3c572bf3
--- /dev/null
+++ b/robogen/src/robogen/parser.py
@@ -0,0 +1,43 @@
+import json
+
+from abc import ABC, abstractmethod
+
+class Parser(ABC):
+ '''Turn an input string into a dictionary-backed syntax tree.'''
+
+ @classmethod
+ def extend_syntax(klass, anchor, *args, **kwargs):
+ raise NotImplementedError
+
+ @abstractmethod
+ def parse(self, src):
+ pass
+
+class JSONParser(Parser):
+ def parse(self, src):
+ return json.loads(src)
+
+class ParserManager:
+ '''Middleware which takes care of finding and adding parsers.'''
+ parsers = {
+ # Base JSON syntax
+ 'json': JSONParser,
+ }
+
+ extensions = {
+ # Base JSON syntax
+ 'json': JSONParser
+ }
+
+ @classmethod
+ def add_parser(klass, name, parser, extensions={}):
+ klass.parsers[name] = parser
+ klass.extensions.update({extension: parser for extension in extensions})
+
+ @classmethod
+ def find_parser_by_name(klass, name):
+ return klass.parsers.get(name)
+
+ @classmethod
+ def find_parser_by_extension(klass, extension):
+ return klass.parsers.get(extension)
diff --git a/robogen/src/robogen/plugin.py b/robogen/src/robogen/plugin.py
new file mode 100644
index 00000000..447dd2ac
--- /dev/null
+++ b/robogen/src/robogen/plugin.py
@@ -0,0 +1,122 @@
+import os
+from importlib.machinery import SourceFileLoader
+import semver
+from sys import stderr
+from traceback import print_exc
+
+from .artifact import ArtifactManager
+from .parser import ParserManager
+from .generator import GeneratorManager
+
+class PluginInstaller:
+ '''Gateway for plugins to install functionality into robogen.'''
+
+ @classmethod
+ def install_artifact(klass, name, artifact, markers={}, extensions={}):
+ ArtifactManager.add_artifact(name, artifact, markers, extensions)
+
+ @classmethod
+ def install_parser(klass, name, parser, extensions={}):
+ ParserManager.add_parser(name, parser, extensions)
+
+ @classmethod
+ def install_generator(klass, artifact, language, generator):
+ GeneratorManager.add_generator(artifact, language, generator)
+
+ @classmethod
+ def install_populator(klass, artifact, language, matcher, populator):
+ artifact = ArtifactManager.find_artifact_by_name(artifact)
+ (GeneratorManager
+ .find_generator(artifact, language)
+ .install_extra_populator(matcher, populator))
+
+ @classmethod
+ def install_schema_extension(klass, artifact, anchor, *args, **kwargs):
+ (ArtifactManager
+ .find_artifact_by_name(artifact)
+ .extend_schema(anchor, *args, **kwargs))
+
+ @classmethod
+ def install_syntax_extension(klass, parser, anchor, *args, **kwargs):
+ (ParserManager
+ .find_parser_by_name(parser)
+ .extend_syntax(anchor, *args, **kwargs))
+
+ @classmethod
+ def install_artifact_ctor_extension(klass, artifact, function):
+ (ArtifactManager
+ .find_artifact_by_name(artifact)
+ .extend_ctor(function))
+
+class PluginManager:
+ '''Middleware which takes care of finding and loading plugins.'''
+
+ plugins = {}
+ paths = set()
+
+ @classmethod
+ def add_path(klass, path):
+ klass.paths.add(path)
+
+ @classmethod
+ def is_plugin_installed(klass, name, version=None):
+ module = klass.plugins.get(name, None)
+ return module and (not version or semver.match(module.__version__, version))
+
+ @classmethod
+ def load_plugins(klass):
+ candidates = {}
+
+ # Look inside each plugin path for candidates
+ for path in klass.paths:
+ if not os.path.exists(path):
+ continue
+
+ for entry in os.listdir(path):
+ entry = f'{path}/{entry}'
+
+ # Ignore Python package shenanigans
+ if entry.endswith('/__pycache__') or entry.endswith('/__init__.py'):
+ continue
+
+ if os.path.isdir(entry):
+ entry = f'{entry}/__init__.py'
+
+ try:
+ module = SourceFileLoader(entry, entry).load_module()
+ candidates[module.__name__] = module
+ except Exception as e:
+ print_exc()
+ print(f"⚠️ Error loading plugin at `{entry}'", file=stderr)
+
+ # Load each plugin one by one, taking care of dependencies first
+ for module in candidates.values():
+ try:
+ klass.install_candidate(module, candidates)
+ except:
+ print_exc()
+ print(f"⚠️ Error installing plugin `{module.__name__}'", file=stderr)
+
+ # Print info about loaded plugins
+ print(f'Loaded {len(klass.plugins)} plugins', file=stderr)
+
+ for name, module in klass.plugins.items():
+ print(f'\t{name} ({module.__version__})', file=stderr)
+
+ print(file=stderr)
+
+ @classmethod
+ def install_candidate(klass, module, candidates):
+ # Ignore already installed plugins
+ if klass.is_plugin_installed(module.__name__, f'=={module.__version__}'):
+ return
+
+ # Install plugin dependencies first
+ deps = getattr(module, 'DEPENDENCIES', {})
+ for name, version in deps.items():
+ dep = candidates[name]
+ assert not version or semver.match(dep.__version__, version)
+ klass.install_candidate(dep, candidates)
+
+ module.Plugin.install(PluginInstaller)
+ klass.plugins[module.__name__] = module
diff --git a/robogen/src/robogen/populator.py b/robogen/src/robogen/populator.py
new file mode 100644
index 00000000..cff5e510
--- /dev/null
+++ b/robogen/src/robogen/populator.py
@@ -0,0 +1,11 @@
+from abc import ABC, abstractmethod
+
+class Populator(ABC):
+ def __init__(self, file, artifact):
+ super().__init__()
+ self.file = file
+ self.artifact = artifact
+
+ @abstractmethod
+ def populate(self):
+ pass
diff --git a/robogen/src/robogen/utils.py b/robogen/src/robogen/utils.py
new file mode 100644
index 00000000..2c8be265
--- /dev/null
+++ b/robogen/src/robogen/utils.py
@@ -0,0 +1,16 @@
+PYTHON_TYPES = {'bool', 'float', 'int', 'long', 'str', 'double', 'byte'}
+
+def full_type_string(type, module):
+ if type in PYTHON_TYPES: return type
+ elif type == 'byte': return 'bytes'
+ elif type == 'string': return 'str'
+ else: return f'{module}.{type}'
+
+def number_duplicates(list, matcher=lambda x: x):
+ indices = {}
+
+ for element in list:
+ key = matcher(element)
+ index = indices.get(key, 0)
+ indices[key] = index+1
+ yield element, str(index) if index else ''