|
def | __init__ (self, hSeg.Human_ColorSG_HeightInRange human_seg, rSeg.robot_inRange_Height robot_seg, tSeg.tabletop_GMM bg_seg, pSeg.Puzzle_Residual puzzle_seg, HeightEstimator heightEstimator, Params params, np.ndarray nonROI_init=None) |
|
def | adapt (self) |
|
def | correct (self) |
|
def | get_layer (self, layer_name, mask_only=False, BEV_rectify=False) |
|
def | get_nonROI (self) |
|
def | get_trackers (self, layer_name, BEV_rectify=False) |
|
def | measure (self, img) |
|
def | predict (self) |
|
def | process (self, img) |
|
def | process_depth (self, depth) |
|
def | vis_layer (self, layer_name, bool mask_only=False, bool BEV_rectify=False, plt.Axes ax=None) |
|
def | vis_puzzles (self, mask_only=False, BEV_rectify=True, fh=None) |
|
def | vis_scene (self, List[bool] mask_only=[False, False, False, False], List[bool] BEV_rectify=[False, False, False, True], fh=None) |
|
|
def | buildFromRosbag (rosbag_file, rTh_high=1.0, rTh_low=0.02, hTracker=None, pTracker=None, rTracker=None, hSeg.Params hParams=hSeg.Params(), rSeg.Params rParams=rSeg.Params(), pSeg.Params_Residual pParams=pSeg.Params_Residual(), tSeg.Params_GMM bgParams=tSeg.Params_GMM(), bool reCalibrate=True, str cache_dir=None, bool ros_pub=False, str empty_table_rgb_topic="empty_table_rgb", str empty_table_dep_topic="empty_table_dep", str glove_rgb_topic="glove_rgb", str human_wave_rgb_topic="human_wave_rgb", str human_wave_dep_topic="human_wave_dep", float depth_scale=None, intrinsic=None, nonROI_region=None, Params() params=Params()) |
|
def | buildFromSourceDir (Callable imgSource, intrinsic, rTh_high=1.0, rTh_low=0.02, hTracker=None, pTracker=None, rTracker=None, hSeg.Params hParams=hSeg.Params(), rSeg.Params rParams=rSeg.Params(), pSeg.Params_Residual pParams=pSeg.Params_Residual(), tSeg.Params_GMM bgParams=tSeg.Params_GMM(), Params() params=Params(), bool reCalibrate=True, str cache_dir=None) |
|
def | buildFromSourcePub (Base cam_runner, rTh_high=1.0, rTh_low=0.02, hTracker=None, pTracker=None, rTracker=None, hSeg.Params hParams=hSeg.Params(), rSeg.Params rParams=rSeg.Params(), pSeg.Params_Residual pParams=pSeg.Params_Residual(), tSeg.Params_GMM bgParams=tSeg.Params_GMM(), bool ros_pub=True, str empty_table_rgb_topic="empty_table_rgb", str empty_table_dep_topic="empty_table_dep", str glove_rgb_topic="glove_rgb", str human_wave_rgb_topic="human_wave_rgb", str human_wave_dep_topic="human_wave_dep", nonROI_region=None, Params() params=Params()) |
|
The scene interpreter will split the scene into three four layers:
1. Background (tabletop) layer
2. Human layer
3. Robot arm layer
4. Puzzle piece layer
The first three relys on their own segmenter, and the puzzle piece layer
is assumed to be the residual.
The interpreter will provide the following additional functions:
1. Bird-eye-view rectification
@param[in] human_seg The human segmenter.
@param[in] robot_seg The robot segmenter.
@param[in] bg_seg The background segmenter.
@param[in] params Other parameters
@param[in] nonROI_init A mask of initial nonROI region, which will be always treated as the background
def buildFromSourceDir |
( |
Callable |
imgSource, |
|
|
|
intrinsic, |
|
|
|
rTh_high = 1.0 , |
|
|
|
rTh_low = 0.02 , |
|
|
|
hTracker = None , |
|
|
|
pTracker = None , |
|
|
|
rTracker = None , |
|
|
hSeg.Params |
hParams = hSeg.Params() , |
|
|
rSeg.Params |
rParams = rSeg.Params() , |
|
|
pSeg.Params_Residual |
pParams = pSeg.Params_Residual() , |
|
|
tSeg.Params_GMM |
bgParams = tSeg.Params_GMM() , |
|
|
Params() |
params = Params() , |
|
|
bool |
reCalibrate = True , |
|
|
str |
cache_dir = None |
|
) |
| |
|
static |
The interface for building the sceneInterpreterV1.0 from an image source.
Given an image source which can provide the stream of the rgb and depth data,
this builder will build the scene interpreter in the following process:
1. Ask for an empty tabletop rgb and depth data.
2. Use the depth to build a height estimator
3. Ask for a target color glove rgb image
4. Use the target color glove rgb image and the tabletop rgb image to build the \
human segmenter
5. Build a tabletop segmenter
6. Ask for the human to wave across the working area with the glove. \
The rgb data will be used to calibrate the tabletop segmenter
7. Build the robot segmenter and the puzzle
This function will save the calibration data to various files in the cache_dir,
and load from that directory when reCalibrate is set to False
Args:
imgSource (Callable): A callable for getting the rgb and depth image. \
Could be the camera runner interface or the ROS subscriber
intrinsic (np.ndarray. Shape:(3,3)): The camera intrinsic matrix
rTh_high (float, optional): The upper height threshold for the robot segmenter. Defaults to 1.0.
rTh_low (float, optional): The lower hieght threshold for the robot segmenter. Defaults to 0.02.
hTracker ([type], optional): human tracker. Defaults to None.
pTracker ([type], optional): puzzle tracker. Defaults to None.
rTracker ([type], optional): robot tracker. Defaults to None.
hParams (hSeg.Params, optional): human segmenter parameters. Defaults to hSeg.Params().
rParams (rSeg.Params, optional): robot segmenter parameters. Defaults to rSeg.Params().
pParams (pSeg.Params_Residual, optional): puzzle segmenter parameters. Defaults to pSeg.Params_Residual().
bgParams (tSeg.Params_GMM, optional): background segmenter parameters. Defaults to tSeg.Params_GMM().
params (Params, optional): the scene interpreter parameters. Defaults to Params().
reCalibrate (bool, optional): Defaults to True. If set to True, will ignore previous calibration results and re-calibrate
cache_dir (Srting, optional): the directory storing the calibration data. Defaults to None, in which case will need \
manual calibration. Otherwise will directly look for the calibration data. If no desired data found, then will \
still need manual calibration, where the data will be saved in the cache folder.
publish_calib_data
def buildFromSourcePub |
( |
Base |
cam_runner, |
|
|
|
rTh_high = 1.0 , |
|
|
|
rTh_low = 0.02 , |
|
|
|
hTracker = None , |
|
|
|
pTracker = None , |
|
|
|
rTracker = None , |
|
|
hSeg.Params |
hParams = hSeg.Params() , |
|
|
rSeg.Params |
rParams = rSeg.Params() , |
|
|
pSeg.Params_Residual |
pParams = pSeg.Params_Residual() , |
|
|
tSeg.Params_GMM |
bgParams = tSeg.Params_GMM() , |
|
|
bool |
ros_pub = True , |
|
|
str |
empty_table_rgb_topic = "empty_table_rgb" , |
|
|
str |
empty_table_dep_topic = "empty_table_dep" , |
|
|
str |
glove_rgb_topic = "glove_rgb" , |
|
|
str |
human_wave_rgb_topic = "human_wave_rgb" , |
|
|
str |
human_wave_dep_topic = "human_wave_dep" , |
|
|
|
nonROI_region = None , |
|
|
Params() |
params = Params() |
|
) |
| |
|
static |
The interface for building the sceneInterpreterV1.0 from an image source.
Given an image source which can provide the stream of the rgb and depth data,
this builder will build the scene interpreter in the following process:
1. Ask for an empty tabletop rgb and depth data.
2. Use the depth to build a height estimator
3. Ask for a target color glove rgb image
4. Use the target color glove rgb image and the tabletop rgb image to build the \
human segmenter
5. Build a tabletop segmenter
6. Ask for the human to wave across the working area with the glove. \
The rgb data will be used to calibrate the tabletop segmenter
7. Build the robot segmenter and the puzzle
The builder provides the option to publish all the calibration data (and the depth scale) to ros opics
Args:
cam_runner: The camera runner
intrinsic (np.ndarray. Shape:(3,3)): The camera intrinsic matrix
rTh_high (float, optional): The upper height threshold for the robot segmenter. Defaults to 1.0.
rTh_low (float, optional): The lower hieght threshold for the robot segmenter. Defaults to 0.02.
hTracker ([type], optional): human tracker. Defaults to None.
pTracker ([type], optional): puzzle tracker. Defaults to None.
rTracker ([type], optional): robot tracker. Defaults to None.
hParams (hSeg.Params, optional): human segmenter parameters. Defaults to hSeg.Params().
rParams (rSeg.Params, optional): robot segmenter parameters. Defaults to rSeg.Params().
pParams (pSeg.Params_Residual, optional): puzzle segmenter parameters. Defaults to pSeg.Params_Residual().
bgParams (tSeg.Params_GMM, optional): background segmenter parameters. Defaults to tSeg.Params_GMM().
ros_pub (bool, optional): If true, will publish the data to the ros. Defaults to True
params (Params, optional): the scene interpreter parameters. Defaults to Params().
def get_layer |
( |
|
self, |
|
|
|
layer_name, |
|
|
|
mask_only = False , |
|
|
|
BEV_rectify = False |
|
) |
| |
Get the content or the binary mask of a layer
@param[in] layer_name The name of the layer mask to get
Choices = ["bg", "human", "robot", "puzzle", "nonROI", "sourceRGB"]
@param[in] mask_only Binary. If true, will get the binary mask
@param[in] BEV_rectify Binary. If true, will rectify the layer
to the bird-eye-view before return
def get_trackers |
( |
|
self, |
|
|
|
layer_name, |
|
|
|
BEV_rectify = False |
|
) |
| |
Get the track pointers for a layer.
If no tracker is applied or no trackpointers are detected, then will return None
Args:
layer_name (str): The name of the layer trackers to get. \
Choices = ["human", "robot", "puzzle"]
BEV_rectify (bool, optional): Rectify to the bird-eye-view or not. Defaults to False.
Returns:
tpt [np.ndarray, (2, N)]: The tracker pointers of the layer
def vis_layer |
( |
|
self, |
|
|
|
layer_name, |
|
|
bool |
mask_only = False , |
|
|
bool |
BEV_rectify = False , |
|
|
plt.Axes |
ax = None |
|
) |
| |
Visualize the layer
@param[in] layer_name The name of the layer mask to visualize
Choices = ["bg", "human", "robot", "puzzle"]
@param[in] mask_only Binary. If true, will visualize the binary mask
@param[in] BEV_rectify Binary. If true, will rectify the layer
to the bird-eye-view for visualization
@param[in] ax The axis for visualization
def vis_scene |
( |
|
self, |
|
|
List[bool] |
mask_only = [False, False, False, False] , |
|
|
List[bool] |
BEV_rectify = [False, False, False, True] , |
|
|
|
fh = None |
|
) |
| |
Visualize four layers ["bg", "human", "robot", "puzzle"]
@param[in] mask_only A list of bool corresponding to the 4 layers above.
If true, will only visualize the binary mask
@param[in] BEV_rectify A list of bool corresponding to the 4 layers above.
If true, will visualize the bird-eye-view of the layer
@param[in] fh The figure handle. matplotlib Figure type