Skip to content

Commit

Permalink
fix lint (#542)
Browse files Browse the repository at this point in the history
  • Loading branch information
lanpa authored Dec 30, 2019
1 parent 72f5e9e commit e614f38
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 18 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
[flake8]
max-line-length = 120
ignore = E305,E402,E721,E741,F401,F403,F405,F821,F841,F999
ignore = W605,E305,E402,E721,E741,F401,F403,F405,F821,F841,F999
exclude = tensorboardX/proto
2 changes: 1 addition & 1 deletion tensorboardX/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# see https://github.com/lanpa/tensorboardX/issues/516
TB_MAX_SPRITE_SIZE = 8192


def maybe_upload_file(local_path):
'''Upload a file to remote cloud storage
if the path starts with gs:// or s3://
Expand Down Expand Up @@ -32,7 +33,6 @@ def maybe_upload_file(local_path):
blob.upload_from_filename(local_path)



def make_tsv(metadata, save_path, metadata_header=None):
if not metadata_header:
metadata = [str(x) for x in metadata]
Expand Down
22 changes: 12 additions & 10 deletions tensorboardX/pytorch_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
methods_IO = []
backward_compat_mode = False


class NodeBase(object):
def __init__(self,
debugName=None,
Expand Down Expand Up @@ -74,7 +75,7 @@ def __init__(self, node_cpp, valid_methods):
class NodePyIO(NodePy):
def __init__(self, node_cpp, input_or_output=None, debugName=''):
super(NodePyIO, self).__init__(node_cpp, methods_IO)
self.tensor_size = [] # tensor_size
self.tensor_size = [] # tensor_size
# Kind attribute string is purely descriptive and will be shown
# in detailed information for the node in TensorBoard's graph plugin.
#
Expand Down Expand Up @@ -221,9 +222,9 @@ def find_time_for(node_name):
should_show_warning = True

node_stats.append(
NodeExecStats(node_name=v.debugName,
all_start_micros=int(time.time() * 1e7),
all_end_rel_micros=total_time))
NodeExecStats(node_name=v.debugName,
all_start_micros=int(time.time() * 1e7),
all_end_rel_micros=total_time))

if v.tensor_size and len(v.tensor_size) > 0: # assume data is float32, only parameter is counted
node_stats.append(
Expand Down Expand Up @@ -256,7 +257,7 @@ def parse(graph, args=None, profile_result=None):
if not backward_compat_mode:
try:
inputnodes[0].debugName()
except:
except AttributeError:
backward_compat_mode = True

nodes_py = GraphPy()
Expand All @@ -267,23 +268,22 @@ def parse(graph, args=None, profile_result=None):
continue
nodes_py.append(NodePyIO(node, input_or_output='Input', debugName=node.debugName()))


for node in graph.nodes():
# These nodes refers to parameters such as kernel size, stride, etc.
# The graph will be very tedious if we include all of them. So skip.
# p.s. Those Constant will be composed by 'prim::listConstruct' and then
# send to common OPs such as Maxpool, Conv, Linear.
# We can let user pass verbosity value to dicide how detailed the graph is.
if node.kind()=='prim::Constant':
if node.kind() == 'prim::Constant':
continue

# By observation, prim::GetAttr are parameter related. ClassType is used to decorate its scope.
if node.kind()=='prim::GetAttr':
if node.kind() == 'prim::GetAttr':
assert node.scopeName() == ''

# Since `populate_namespace_from_OP_to_IO` is already available, we just ignore this.
# TODO: When it comes to shared parameter, will it still work?
if " : ClassType" in node.__repr__():
if " : ClassType" in node.__repr__():
continue

nodes_py.append(NodePyIO(node, debugName=list(node.outputs())[0].debugName()))
Expand All @@ -295,6 +295,7 @@ def parse(graph, args=None, profile_result=None):
nodes_py.populate_namespace_from_OP_to_IO()
return nodes_py.to_proto()


def recursive_to_cuda(x):
"""
Recursively convert tensors in a tuple or list to GPU tensor.
Expand All @@ -306,6 +307,7 @@ def recursive_to_cuda(x):
else:
return [recursive_to_cuda(_x) for _x in x]


def graph(model, args, verbose=False, use_cuda=False, **kwargs):
"""
This method processes a PyTorch model and produces a `GraphDef` proto
Expand Down Expand Up @@ -339,7 +341,7 @@ def graph(model, args, verbose=False, use_cuda=False, **kwargs):
try:
if use_cuda:
model.cuda()
args = recursive_to_cuda(args)
args = recursive_to_cuda(args)
with torch.autograd.profiler.profile(record_shapes=True, use_cuda=use_cuda) as prof:
result = model(*args)

Expand Down
2 changes: 0 additions & 2 deletions tensorboardX/record_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,7 @@ def __init__(self, path):
self.path = path
self.buffer = io.BytesIO()

from google.cloud import storage
client = storage.Client()

bucket_name, filepath = self.bucket_and_path()
bucket = storage.Bucket(client, bucket_name)
self.blob = storage.Blob(filepath, bucket)
Expand Down
2 changes: 0 additions & 2 deletions tensorboardX/summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ def hparams(hparam_dict=None, metric_dict=None):
# mt = MetricInfo(name=MetricName(tag='accuracy'), display_name='accuracy', description='', dataset_type=DatasetType.DATASET_VALIDATION) # noqa E501
# exp = Experiment(name='123', description='456', time_created_secs=100.0, hparam_infos=[hp], metric_infos=[mt], user='tw') # noqa E501


hps = []

ssi = SessionStartInfo()
Expand Down Expand Up @@ -119,7 +118,6 @@ def hparams(hparam_dict=None, metric_dict=None):
content=content.SerializeToString()))
exp = Summary(value=[Summary.Value(tag=EXPERIMENT_TAG, metadata=smd)])


sei = SessionEndInfo(status=Status.STATUS_SUCCESS)
content = HParamsPluginData(session_end_info=sei, version=PLUGIN_DATA_VERSION)
smd = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME,
Expand Down
4 changes: 2 additions & 2 deletions tensorboardX/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ def _get_file_writer(self):
self.all_writers = {self.file_writer.get_logdir(): self.file_writer}
return self.file_writer

def add_hparams(self, hparam_dict=None, metric_dict=None, name=None, global_step = None):
def add_hparams(self, hparam_dict=None, metric_dict=None, name=None, global_step=None):
"""Add a set of hyperparameters to be compared in tensorboard.
Args:
Expand Down Expand Up @@ -367,7 +367,7 @@ def add_hparams(self, hparam_dict=None, metric_dict=None, name=None, global_step

if not name:
name = str(time.time())

with SummaryWriter(logdir=os.path.join(self.file_writer.get_logdir(), name)) as w_hp:
w_hp.file_writer.add_summary(exp)
w_hp.file_writer.add_summary(ssi)
Expand Down

0 comments on commit e614f38

Please sign in to comment.