Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
d26bca9
update readme
xionghuichen Nov 11, 2022
d005bd9
support backuping multiple single files
liyc-ai Nov 13, 2022
fc8c626
Merge branch 'main' of github.com:xionghuichen/RLAssistant
xionghuichen Nov 14, 2022
ef0b368
docs(easy_log): add documents
xionghuichen Nov 21, 2022
7663a5b
docs(easy_log): add documents
xionghuichen Nov 21, 2022
e7c58d4
Merge branch 'main' of github.com:xionghuichen/RLAssistant
xionghuichen Nov 21, 2022
0147554
Merge branch 'main' of github.com:polixir/RLAssistant
xionghuichen Nov 21, 2022
72b7d5e
fix(exp_manager): add log_name_formatter
xionghuichen Nov 23, 2022
ab61214
Merge branch 'main' of github.com:xionghuichen/RLAssistant
xionghuichen Nov 23, 2022
c3d00ba
Merge branch 'main' of github.com:polixir/RLAssistant
xionghuichen Nov 23, 2022
ebe2b21
feat(plot): add multiply-metric plotting mode. add demos of plotting …
xionghuichen Nov 24, 2022
ac8e887
feat(plot): add multiply-metric plotting mode. add demos of plotting …
xionghuichen Nov 24, 2022
a20ed50
Update README.md
xionghuichen Nov 24, 2022
85ec328
Merge branch 'main' of github.com:xionghuichen/RLAssistant
xionghuichen Nov 24, 2022
c9a6d5b
Update setup.py
xionghuichen Nov 24, 2022
3214774
Add time tracker
xionghuichen Nov 25, 2022
0657764
rm dpi (set to default)
xionghuichen Nov 25, 2022
14780d9
Merge branch 'main' of github.com:xionghuichen/RLAssistant
xionghuichen Nov 25, 2022
0f4c814
Update setup.py
xionghuichen Nov 24, 2022
13d3811
rm dpi (set to default)
xionghuichen Nov 25, 2022
9eac654
Merge branch 'main' of github.com:xionghuichen/RLAssistant
xionghuichen Nov 25, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -213,8 +213,8 @@ Result visualization:
We can view the results in tensorboard by: `tensorboard --logdir ${data_root}/log/${task_name}`.
For example, lanuch tensorboard by `tensorboard --logdir ./example/simplest_code/log/demo_task/2022/03`. We can see results:
![img.png](resource/demo-tb-res.png)
2. Easy_plot toolkit: The intermediate scalar variables are saved in a CSV file in `${data_root}/log/${task_name}/${index_name}/progress.csv`.
We develop high-level APIs to load the CSV files from multiple experiments and group the lines by custom keys. We give an example to use easy_plot toolkit in https://github.com/xionghuichen/RLAssistant/blob/main/example/plot_res.ipynb and more user cases in https://github.com/xionghuichen/RLAssistant/blob/main/test/test_plot.py
2. Easy_plot toolkit:
**We recommend the users maintain their research projects via some jupyter notebooks. You can record your ideas, surmises, related empirical evidence, and benchmark results in a notebook together.** We develop high-level APIs to load the CSV files of the experiments (stored in `${data_root}/log/${task_name}/${index_name}/progress.csv`) and group the curves by custom keys. We give common user cases of the plotter in https://github.com/xionghuichen/RLAssistant/blob/main/test/test_plot.ipynb
The result will be something like this:
![img.png](resource/demo-easy-to-plot-res.png)
3. View data in "results" directory directly: other type of data are stored in `${data_root}/results/${task_name}/${index_name}`
Expand Down
1 change: 1 addition & 0 deletions RLA/easy_log/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ def writekvs(self, kvs):
lines = self.file.readlines()
self.file = open(self.filename, 'w+t')
self.file.seek(0)

for (i, key) in enumerate(self.keys):
if i > 0:
self.file.write(',')
Expand Down
71 changes: 70 additions & 1 deletion RLA/easy_log/time_used_recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,78 @@
from RLA.easy_log import logger
import time

class SingleTimeTracker:
def __init__(self,name:str='untitled') -> None:
self.name=name
self.t = 0.0
self.call_time=0
self.time_cost = 0.0

rc_start_time = {}
def __enter__(self):
# trace time
self.call_time+=1
self.t = time.perf_counter()

return self

def __exit__(self, exc_type, exc_val, exc_tb):
self.time_cost += time.perf_counter() - self.t


class TimeTracker:
def __init__(self):
self.t0=time.time()#to calc total time
self.time_dict=dict()

def add(self,name='untitled'):
"""
:param name: specify the SingleTimeTracker in the time_dict, recommend use
line num in the scripts, like 'xxx.py Line xxx', can be easily got in python scripts using
`os.path.basename(__file__)+' line '+str(sys._getframe().f_lineno)`
"""
if name not in self.time_dict.keys():
self.time_dict.update({name:SingleTimeTracker(name)})
return self.time_dict[name]

def __call__(self, name:str):
return self.time_dict[name]

def clear(self):
self.time_dict=dict()

def statistic_entry(self,name:str):
"""
calc total calls of the
"""
assert name in self.time_dict.keys()

t1=time.time()
t_passed=t1-self.t0
return {
'total calls/'+name:self.time_dict[name].call_time,
'total time cost/'+name:self.time_dict[name].time_cost,
'average time cost/'+name:self.time_dict[name].time_cost/(1e-6+self.time_dict[name].call_time),
'time cost percentage/'+name:self.time_dict[name].time_cost/(1e-6+t_passed)
}

def get_info(self):
info={}
for k in self.time_dict.keys():
for entry_k,entry_v in self.statistic_entry(k).items():
info[entry_k]=entry_v
return info

def log(self,exclude_lst=['csv']):
logger.info('---------time dashboard---------')
for k in self.time_dict.keys():
for entry_k,entry_v in self.statistic_entry(k).items():
logger.record_tabular('time_used/'+entry_k,entry_v,exclude=exclude_lst)
logger.info(f"[{entry_k}]: {entry_v}")
logger.info('')
logger.info('---------dashboard end---------')


rc_start_time = {}

def time_record(name):
"""
Expand Down
20 changes: 14 additions & 6 deletions RLA/easy_plot/plot_func_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,12 @@



def default_key_to_legend(parse_dict, split_keys, y_name):
def default_key_to_legend(parse_dict, split_keys, y_name, use_y_name=True):
task_split_key = '.'.join(f'{k}={parse_dict[k]}' for k in split_keys)
return task_split_key + ' eval:' + y_name
if use_y_name:
return task_split_key + ' eval:' + y_name
else:
return task_split_key


def plot_func(data_root:str, task_table_name:str, regs:list, split_keys:list, metrics:list,
Expand All @@ -29,6 +32,7 @@ def plot_func(data_root:str, task_table_name:str, regs:list, split_keys:list, me
xlabel: Optional[str] = DEFAULT_X_NAME, ylabel: Optional[str] = None,
scale_dict: Optional[dict] = None, regs2legends: Optional[list] = None,
key_to_legend_fn: Optional[Callable] = default_key_to_legend,
split_by_metrics=True,
save_name: Optional[str] = None, *args, **kwargs):
"""
A high-level matplotlib plotter.
Expand Down Expand Up @@ -68,8 +72,11 @@ def plot_func(data_root:str, task_table_name:str, regs:list, split_keys:list, me
:param key_to_legend_fn: we give a default function to stringify the k-v pairs. you can customize your own function in key_to_legend_fn.
See default_key_to_legend for the detault way and test/test_plot/test_customize_legend_name_mode for details.
:type key_to_legend_fn: Optional[Callable] = default_key_to_legend
:param split_by_metrics: you can plot figure with multiple metrics together.
By default, we will split the curves with the metric and merge them into a group figure.
If you would like to print multiple metrics in single figure, please set the parameter to False.
:type split_by_metrics: Optional[bool]
:param args/kwargs: send other parameters to plot_util.plot_results

:return:
:rtype:
"""
Expand Down Expand Up @@ -112,10 +119,12 @@ def plot_func(data_root:str, task_table_name:str, regs:list, split_keys:list, me
group_fn = lambda r: split_by_reg(taskpath=r, reg_group=reg_group, y_names=y_names)
else:
group_fn = lambda r: picture_split(taskpath=r, split_keys=split_keys, y_names=y_names,
key_to_legend_fn=key_to_legend_fn)
key_to_legend_fn=lambda parse_dict, split_keys, y_name:
key_to_legend_fn(parse_dict, split_keys, y_name, not split_by_metrics))
_, _, lgd, texts, g2lf, score_results = \
plot_util.plot_results(results, xy_fn= lambda r, y_names: csv_to_xy(r, DEFAULT_X_NAME, y_names, final_scale_dict),
group_fn=group_fn, average_group=True, ylabel=ylabel, xlabel=xlabel, regs2legends=regs2legends, *args, **kwargs)
group_fn=group_fn, average_group=True, ylabel=ylabel, xlabel=xlabel, metrics=metrics,
split_by_metrics=split_by_metrics, regs2legends=regs2legends, *args, **kwargs)
print("--- complete process ---")
if save_name is not None:
import os
Expand All @@ -141,7 +150,6 @@ def split_by_reg(taskpath, reg_group, y_names):
assert len(y_names) == 1
return task_split_key, y_names


def split_by_task(taskpath, split_keys, y_names, key_to_legend_fn):
pair_delimiter = '&'
kv_delimiter = '='
Expand Down
Loading