Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

logging toggle with command line #79

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions apprentice/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@

from . import working_memory, agents, learners, planners


def setup_logging(default_path='logging.yaml', default_level=logging.INFO,
def setup_logging(default_path='logging.yaml', default_level=logging.NOTSET,
env_key='LOG_CFG'):
# https://gist.github.com/kingspp/9451566a5555fb022215ca2b7b802f19
path = default_path
Expand Down
Binary file added apprentice/agents/.DS_Store
Binary file not shown.
44 changes: 38 additions & 6 deletions apprentice/agents/ModularAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,25 @@
import atexit
import time
import logging
import os

performance_logger = logging.getLogger('al-performance')
agent_logger = logging.getLogger('al-agent')
agent_logger.setLevel("ERROR")
performance_logger.setLevel("ERROR")

debugging_state = os.environ.get('DEBUGGING_STATE')
debugging_agent = os.environ.get('DEBUGGING_AGENT')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While this is a workable patch to the problem of adding command line controls for debugging, I'd ideally like to see a more general solution. These loggers already have names 'al-performance' and 'al-agent' so rather than creating new tags that have to be maintained in some space can we just use the existing tags in someway? Additionally, different components of the broader library use loggers differently, (usually using the common convention of the a logger named after the class path). Ideally a solution for the whole library would be able to support that use case as well.

debugging_performance = os.environ.get('DEBUGGING_PERFORMANCE')

if(not debugging_agent):
agent_logger.propagate = False
if(not debugging_performance):
performance_logger.propagate = False
if(not debugging_state and not debugging_agent and not debugging_performance):
performance_logger.propagate = False
agent_logger.propagate = False

performance_logger.debug("Performance logger working.")
agent_logger.debug("Agent logger also working.")

def add_QMele_to_state(state):
''' A function which adds ?ele- to state keys... this is necessary in order to use
Expand Down Expand Up @@ -121,11 +132,15 @@ def variablize_by_where_swap(self,state,rhs, match):
state = state.get_view("flat_ungrounded")
# print(state)
# print(type(state))
performance_logger.error(state)
performance_logger.error(type(state))
mapping = {'arg' + str(i-1) if i > 0 else 'sel':
ele for i, ele in enumerate(match)}
# for i,x in enumerate(state):
# print("attr%i"%i,x)
# print("val%i"%i,state[x])
for i,x in enumerate(state):
# print("attr%i"%i,x)
# print("val%i"%i,state[x])
performance_logger.error("attr%i"%i,x)
performance_logger.error("val%i"%i,state[x])

r_state = rename_flat(state, {mapping[a]: a for a in mapping})
# r_state = state
Expand Down Expand Up @@ -187,13 +202,16 @@ def _relative_rename_recursive(state,center,center_name="sel",mapping=None,dist_
mapping = {center:center_name}
dist_map = {center:0}
# print(state)
performance_logger.error(state)
center_obj = state[center]

stack = []
for d in dirs:
ele = center_obj.get(d,None)
# print("ele")
# print(ele)
performance_logger.error("ele")
performance_logger.error(ele)
if(ele is None or ele == "" or
(ele in dist_map and dist_map[ele] <= dist_map[center] + 1) or
ele not in state):
Expand Down Expand Up @@ -267,13 +285,14 @@ def variablize_state_metaskill(self,state,rhs, where_match):
exp = Explanation(rhs,mapping)
resp = exp.to_response(state,self)
# pprint(skill_info)
performance_logger.error(skill_info)
key = ("skill-%s"%resp["rhs_id"], *mapping.values())
to_append[key] = resp["inputs"]
to_append[("skill-%s"%resp["rhs_id"],"count")] = to_append.get(("skill-%s"%resp["rhs_id"],"count"),0) + 1
to_append[("all-skills","count")] = to_append.get(("all-skills","count"),0) + 1
# for attr,val in resp["inputs"].items():
# key = (attr,("skill-%s"%resp["rhs_id"], *skill_info['mapping'].values()))
# # print(key, ":", val)
# print(key, ":", val)
# flat_ungrounded[key] = val
# print("--------END THIS---------")

Expand Down Expand Up @@ -418,6 +437,7 @@ def applicable_explanations(self, state, rhs_list=None,
else:
pred_state = state
# print("MATCH", rhs,match)
agent_logger.error("MATCH", rhs, match)
if(not skip_when):
p = self.when_learner.predict(rhs, pred_state)

Expand Down Expand Up @@ -509,6 +529,8 @@ def explanations_from_skills(self, state, sai, rhs_list,
else:
# print("Trying:", rhs)
# print(self.planner.unify_op.__code__.co_varnames)
agent_logger.error("Trying:", rhs)
agent_logger.error(self.planner.unify_op.__code__.co_varnames)
mappings = self.planner.unify_op(state,rhs.input_rule, sai,
foci_of_attention=foci_of_attention)

Expand All @@ -521,6 +543,7 @@ def explanations_from_skills(self, state, sai, rhs_list,
# allow_copy=False)
for mapping in mappings:
# print("MAAAP", mapping)
agent_logger.error("MAAAP", mapping)
if(type(self.planner).__name__ == "FoPlannerModule"):
m = {"?sel": "?ele-" + sai.selection if sai.selection[0] != "?" else sai.selection}
else:
Expand Down Expand Up @@ -574,6 +597,7 @@ def fit(self, explanations, state, reward): # -> return None
for exp,_reward in zip(explanations,reward):
mapping = list(exp.mapping.values())
# print(exp, mapping, 'rew:', _reward)
agent_logger.error(exp, mapping, 'rew:', _reward)
self.when_learner.ifit(exp.rhs, state, mapping, _reward)
self.which_learner.ifit(exp.rhs, state, _reward)
self.where_learner.ifit(exp.rhs, mapping, state, _reward)
Expand All @@ -582,6 +606,7 @@ def train(self, state:Dict, sai:Sai=None, reward:float=None,
skill_label=None, foci_of_attention=None, rhs_id=None, mapping=None,
ret_train_expl=False, add_skill_info=False,**kwargs): # -> return None
# pprint(state)
agent_logger.error(state)

if(type(self.planner).__name__ == "FoPlannerModule"):
state = add_QMele_to_state(state)
Expand All @@ -594,6 +619,7 @@ def train(self, state:Dict, sai:Sai=None, reward:float=None,


# print(sai, foci_of_attention)
agent_logger.error(sai, foci_of_attention)
###########ONLY NECESSARY FOR IMPLICIT NEGATIVES#############
_ = [x for x in self.applicable_explanations(state)]
############################################################
Expand All @@ -602,11 +628,15 @@ def train(self, state:Dict, sai:Sai=None, reward:float=None,
# or we must infer it from the skills that would have fired
if(rhs_id is not None and mapping is not None):
# print("Reward: ", reward)
agent_logger("Reward: ", reward)
explanations = [Explanation(self.rhs_list[rhs_id], mapping)]
# print("EX: ",str(explanations[0]))
agent_logger("EX: ",str(explanations[0]))
elif(sai is not None):
# pprint(state.get_view("object"))
agent_logger.error(state.get_view("object"))
# print("TO HOW")
agent_logger.error("TO HOW")
t_s = time.time_ns()
explanations = self.explanations_from_skills(state, sai,
self.rhs_list,
Expand Down Expand Up @@ -634,6 +664,7 @@ def train(self, state:Dict, sai:Sai=None, reward:float=None,
rhs_by_how = self.rhs_by_how.get(skill_label, {})
for exp in explanations:
# print("FOUND EX:", str(exp))
agent_logger.error("FOUND EX:", str(exp))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The logging library's various message functions don't automatically concatenate arguments in the same way that print does so it can't be used as a drop-in replacement like this. See: https://docs.python.org/3/library/logging.html#logging.Logger.debug. When I run this code on my end I mostly just see a bunch of exception stack traces.

if(exp.rhs.as_tuple in rhs_by_how):
exp.rhs = rhs_by_how[exp.rhs.as_tuple]
else:
Expand Down Expand Up @@ -665,6 +696,7 @@ def check(self, state, selection, action, inputs):
responses = resp['responses']
for resp in responses:
# print(resp['selection'],resp['action'],resp['inputs'], _inputs_equal(resp['inputs'],inputs))
agent_logger.error(resp['selection'],resp['action'],resp['inputs'], _inputs_equal(resp['inputs'],inputs))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would be careful of logger constructions like this (i.e., that make function calls inside the log call) because those calls will be evaluated whether or not the logger is active. It would be good to gate these behind a check that the appropriate level is active.

if(resp['selection'] == selection and
resp['action'] == action and
_inputs_equal(resp['inputs'],inputs)):
Expand Down
2 changes: 1 addition & 1 deletion apprentice/logging.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ formatters:
handlers:
console:
class: logging.StreamHandler
level: DEBUG #This seems to change the level globally, which is very annoying esp. w/ numba
level: NOTSET #This seems to change the level globally, which is very annoying esp. w/ numba
formatter: standard
stream: ext://sys.stdout

Expand Down