further eye catcher plot work
This commit is contained in:
		
							parent
							
								
									e42e151c17
								
							
						
					
					
						commit
						eecee47b9e
					
				
					 5 changed files with 794 additions and 114 deletions
				
			
		
							
								
								
									
										4
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							|  | @ -9,8 +9,10 @@ cache/ | |||
| *.svg | ||||
| *.gv | ||||
| 
 | ||||
| *.png | ||||
| 
 | ||||
| traces/ | ||||
| venv310/ | ||||
| **/build/ | ||||
| **/install/ | ||||
| **/log/ | ||||
| **/log/ | ||||
|  |  | |||
										
											Binary file not shown.
										
									
								
							| Before Width: | Height: | Size: 539 KiB | 
|  | @ -1,113 +0,0 @@ | |||
| experiment_type,experiment_dir,chain,mean_latency_ms,std_latency_ms,actual_runs,theoretical_max_runs,completion_percentage,input_delay_ms | ||||
| edf_single_timed_20_boosted_10,edf_single_timed_20_boosted_10,/input/baroA/alt --> /output/flight/cmd,35.42897435897436,6.192051282051281,4.102564102564102,200,2.051282051282051,100 | ||||
| edf_single_timed_20_boosted_10,edf_single_timed_20_boosted_10,/input/cameraA/raw --> /output/cameraA/mapped,74.52780000000001,6.6764,26.0,100,26.0,200 | ||||
| edf_single_timed_20_boosted_10,edf_single_timed_20_boosted_10,/input/cameraB/raw --> /output/classifier/classification,98.844,31.873800000000003,32.7,133,24.586466165413537,150 | ||||
| edf_single_timed_20_boosted_10,edf_single_timed_20_boosted_10,/input/gpsA/fix --> /output/flight/cmd,30.827368421052633,5.925526315789473,4.105263157894737,200,2.0526315789473686,100 | ||||
| edf_single_timed_20_boosted_10,edf_single_timed_20_boosted_10,/input/imuA/data --> /output/flight/cmd,33.086052631578944,6.046052631578946,4.105263157894737,200,2.0526315789473686,100 | ||||
| edf_single_timed_20_boosted_10,edf_single_timed_20_boosted_10,/input/lidar/scan --> /output/flight/cmd,23.061794871794874,5.396410256410257,4.102564102564102,200,2.051282051282051,100 | ||||
| edf_single_timed_20_boosted_10,edf_single_timed_20_boosted_10,/input/operator/commands --> /output/flight/cmd,29.039743589743587,5.063333333333334,4.102564102564102,200,2.051282051282051,100 | ||||
| edf_single_timed_20,edf_single_timed_20,/input/baroA/alt --> /output/flight/cmd,35.124418604651154,6.056976744186046,3.8372093023255816,200,1.9186046511627908,100 | ||||
| edf_single_timed_20,edf_single_timed_20,/input/cameraA/raw --> /output/cameraA/mapped,74.8638,6.913800000000001,25.96,100,25.96,200 | ||||
| edf_single_timed_20,edf_single_timed_20,/input/cameraB/raw --> /output/classifier/classification,99.209,32.5044,32.6,133,24.51127819548872,150 | ||||
| edf_single_timed_20,edf_single_timed_20,/input/gpsA/fix --> /output/flight/cmd,30.65209302325581,5.489767441860465,3.8372093023255816,200,1.9186046511627908,100 | ||||
| edf_single_timed_20,edf_single_timed_20,/input/imuA/data --> /output/flight/cmd,32.79976744186047,5.736976744186046,3.8372093023255816,200,1.9186046511627908,100 | ||||
| edf_single_timed_20,edf_single_timed_20,/input/lidar/scan --> /output/flight/cmd,23.58,4.84953488372093,3.8372093023255816,200,1.9186046511627908,100 | ||||
| edf_single_timed_20,edf_single_timed_20,/input/operator/commands --> /output/flight/cmd,27.936046511627907,4.471627906976744,3.8372093023255816,200,1.9186046511627908,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/baroA/alt --> /output/flight/cmd,77.7224,38.486999999999995,1073.12,200,105.0,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/baroB/alt --> /output/telemetry/radio,67.8986,38.959799999999994,3197.82,200,105.0,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/cameraA/raw --> /output/cameraA/mapped,76.226,8.531600000000001,96.86,100,96.86,200 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/cameraA/raw --> /output/telemetry/radio,183.4386,68.3676,3184.16,100,105.0,200 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/cameraB/raw --> /output/classifier/classification,81.2846,9.4038,128.34,133,96.49624060150376,150 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/gpsA/fix --> /output/flight/cmd,77.76100000000001,37.5182,1073.06,200,105.0,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/gpsB/fix --> /output/telemetry/radio,67.8846,36.48500000000001,3200.64,200,105.0,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/imuA/data --> /output/flight/cmd,77.796,38.1552,1073.18,200,105.0,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/imuB/data --> /output/telemetry/radio,67.9932,37.3536,3199.1,200,105.0,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/lidar/scan --> /output/flight/cmd,68.4622,37.3688,1073.76,200,105.0,100 | ||||
| ros_multi_timed_20,ros_multi_timed_20,/input/operator/commands --> /output/flight/cmd,68.36500000000001,35.9894,1073.48,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/baroA/alt --> /output/flight/cmd,59.1838775510204,29.148571428571433,769.0204081632653,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/baroB/alt --> /output/telemetry/radio,35.7862,16.4298,174.42,200,87.21,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/cameraA/raw --> /output/cameraA/mapped,74.4708,6.9518,98.46,100,98.46,200 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/cameraA/raw --> /output/telemetry/radio,161.5576,33.4002,174.2,100,105.0,200 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/cameraB/raw --> /output/classifier/classification,80.88102040816325,9.17,130.55102040816325,133,98.158661961025,150 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/gpsA/fix --> /output/flight/cmd,63.50000000000001,31.609387755102045,769.0,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/gpsB/fix --> /output/telemetry/radio,31.027199999999997,17.3174,174.42,200,87.21,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/imuA/data --> /output/flight/cmd,59.050399999999996,28.727199999999996,753.64,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/imuB/data --> /output/telemetry/radio,33.2768,18.6738,174.42,200,87.21,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/lidar/scan --> /output/flight/cmd,53.76862745098039,29.196470588235297,753.8627450980392,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_500,edf_multi_timed_20_boosted_500,/input/operator/commands --> /output/flight/cmd,58.61078431372549,29.589607843137255,753.8823529411765,200,105.0,100 | ||||
| edf_single_timed_20_boosted_50,edf_single_timed_20_boosted_50,/input/baroA/alt --> /output/flight/cmd,35.738139534883715,5.748372093023256,4.0,200,2.0,100 | ||||
| edf_single_timed_20_boosted_50,edf_single_timed_20_boosted_50,/input/cameraA/raw --> /output/cameraA/mapped,74.54119999999999,6.5826,26.0,100,26.0,200 | ||||
| edf_single_timed_20_boosted_50,edf_single_timed_20_boosted_50,/input/cameraB/raw --> /output/classifier/classification,99.0342,32.3718,32.74,133,24.61654135338346,150 | ||||
| edf_single_timed_20_boosted_50,edf_single_timed_20_boosted_50,/input/gpsA/fix --> /output/flight/cmd,30.709534883720924,5.705581395348838,4.0,200,2.0,100 | ||||
| edf_single_timed_20_boosted_50,edf_single_timed_20_boosted_50,/input/imuA/data --> /output/flight/cmd,33.36255813953488,5.513488372093024,4.0,200,2.0,100 | ||||
| edf_single_timed_20_boosted_50,edf_single_timed_20_boosted_50,/input/lidar/scan --> /output/flight/cmd,23.437441860465114,4.923255813953489,4.0,200,2.0,100 | ||||
| edf_single_timed_20_boosted_50,edf_single_timed_20_boosted_50,/input/operator/commands --> /output/flight/cmd,28.58860465116279,3.7818604651162793,4.0,200,2.0,100 | ||||
| edf_single_timed_20_boosted_500,edf_single_timed_20_boosted_500,/input/baroA/alt --> /output/flight/cmd,35.773250000000004,5.296,3.9,200,1.95,100 | ||||
| edf_single_timed_20_boosted_500,edf_single_timed_20_boosted_500,/input/cameraA/raw --> /output/cameraA/mapped,74.5806,6.636,26.0,100,26.0,200 | ||||
| edf_single_timed_20_boosted_500,edf_single_timed_20_boosted_500,/input/cameraB/raw --> /output/classifier/classification,99.0724,32.465999999999994,32.84,133,24.691729323308273,150 | ||||
| edf_single_timed_20_boosted_500,edf_single_timed_20_boosted_500,/input/gpsA/fix --> /output/flight/cmd,30.87375,5.2219999999999995,3.9,200,1.95,100 | ||||
| edf_single_timed_20_boosted_500,edf_single_timed_20_boosted_500,/input/imuA/data --> /output/flight/cmd,33.338,5.1255,3.9,200,1.95,100 | ||||
| edf_single_timed_20_boosted_500,edf_single_timed_20_boosted_500,/input/lidar/scan --> /output/flight/cmd,23.27475,4.28075,3.9,200,1.95,100 | ||||
| edf_single_timed_20_boosted_500,edf_single_timed_20_boosted_500,/input/operator/commands --> /output/flight/cmd,29.66375,4.34175,3.9,200,1.95,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/baroA/alt --> /output/flight/cmd,59.2382,28.2522,774.5,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/baroB/alt --> /output/telemetry/radio,34.864599999999996,14.1428,171.72,200,85.86,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/cameraA/raw --> /output/cameraA/mapped,74.5012,6.878599999999999,98.72,100,98.72,200 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/cameraA/raw --> /output/telemetry/radio,159.89939999999996,31.602999999999998,171.68,100,105.0,200 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/cameraB/raw --> /output/classifier/classification,80.8814,8.9892,130.94,133,98.45112781954887,150 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/gpsA/fix --> /output/flight/cmd,63.6704,30.875800000000005,774.5,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/gpsB/fix --> /output/telemetry/radio,30.210000000000004,15.1086,171.72,200,85.86,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/imuA/data --> /output/flight/cmd,59.562,28.5752,774.5,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/imuB/data --> /output/telemetry/radio,32.63,16.3702,171.72,200,85.86,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/lidar/scan --> /output/flight/cmd,54.1478,28.7682,774.5,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_10,edf_multi_timed_20_boosted_10,/input/operator/commands --> /output/flight/cmd,58.743599999999994,29.187400000000004,774.5,200,105.0,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/baroA/alt --> /output/flight/cmd,483.0948,63.2838,118.36,200,59.18,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/baroB/alt --> /output/telemetry/radio,467.39779999999996,59.0954,117.08,200,58.540000000000006,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/cameraA/raw --> /output/cameraA/mapped,385.3639999999999,237.4138,94.42,100,94.42,200 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/cameraA/raw --> /output/telemetry/radio,844.8803999999999,321.32239999999996,115.9,100,105.0,200 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/cameraB/raw --> /output/classifier/classification,169.2946,66.2818,112.24,133,84.39097744360902,150 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/gpsA/fix --> /output/flight/cmd,486.62440000000004,64.31439999999999,118.48,200,59.24,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/gpsB/fix --> /output/telemetry/radio,470.49940000000004,61.70079999999999,117.46,200,58.72999999999999,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/imuA/data --> /output/flight/cmd,484.7596,63.57379999999999,118.38,200,59.19,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/imuB/data --> /output/telemetry/radio,468.7872,60.461400000000005,117.16,200,58.58,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/lidar/scan --> /output/flight/cmd,311.30480000000006,43.138400000000004,119.2,200,59.599999999999994,100 | ||||
| ros_single_timed_20,ros_single_timed_20,/input/operator/commands --> /output/flight/cmd,309.44019999999995,42.80259999999999,119.06,200,59.53000000000001,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/baroA/alt --> /output/flight/cmd,59.171400000000006,28.876400000000004,769.32,200,105.0,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/baroB/alt --> /output/telemetry/radio,35.613,14.744599999999998,173.64,200,86.82,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/cameraA/raw --> /output/cameraA/mapped,74.48700000000001,7.1496,98.68,100,98.68,200 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/cameraA/raw --> /output/telemetry/radio,161.5632,31.481800000000003,173.54,100,105.0,200 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/cameraB/raw --> /output/classifier/classification,80.9862,9.209,130.7,133,98.27067669172932,150 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/gpsA/fix --> /output/flight/cmd,64.0672,32.28,769.32,200,105.0,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/gpsB/fix --> /output/telemetry/radio,30.8428,15.6802,173.64,200,86.82,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/imuA/data --> /output/flight/cmd,60.100199999999994,29.397199999999994,769.32,200,105.0,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/imuB/data --> /output/telemetry/radio,32.967600000000004,16.905,173.64,200,86.82,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/lidar/scan --> /output/flight/cmd,53.48519999999999,28.731399999999997,769.28,200,105.0,100 | ||||
| edf_multi_timed_20,edf_multi_timed_20,/input/operator/commands --> /output/flight/cmd,58.02719999999999,29.204199999999997,769.32,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/baroA/alt --> /output/flight/cmd,59.22893617021277,28.212553191489366,771.1702127659574,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/baroB/alt --> /output/telemetry/radio,34.84583333333333,15.932291666666666,174.875,200,87.4375,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/cameraA/raw --> /output/cameraA/mapped,74.66879999999999,8.4206,98.52,100,98.52,200 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/cameraA/raw --> /output/telemetry/radio,160.73208333333335,32.815000000000005,174.85416666666666,100,105.0,200 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/cameraB/raw --> /output/classifier/classification,81.05958333333332,10.574375,130.64583333333334,133,98.22994987468672,150 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/gpsA/fix --> /output/flight/cmd,62.287659574468094,30.74404255319149,771.1702127659574,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/gpsB/fix --> /output/telemetry/radio,30.258750000000003,16.552916666666665,174.875,200,87.4375,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/imuA/data --> /output/flight/cmd,58.75000000000001,28.383829787234042,771.1702127659574,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/imuB/data --> /output/telemetry/radio,32.87978723404255,17.99340425531915,175.2340425531915,200,87.61702127659575,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/lidar/scan --> /output/flight/cmd,53.87255319148936,29.093617021276597,771.1702127659574,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_50,edf_multi_timed_20_boosted_50,/input/operator/commands --> /output/flight/cmd,58.9159574468085,29.80680851063829,771.1702127659574,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/baroA/alt --> /output/flight/cmd,59.09428571428571,28.03642857142857,771.4285714285714,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/baroB/alt --> /output/telemetry/radio,34.47714285714286,13.967142857142857,167.28571428571428,200,83.64285714285714,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/cameraA/raw --> /output/cameraA/mapped,74.82000000000001,8.075714285714286,98.71428571428571,100,98.71428571428571,200 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/cameraA/raw --> /output/telemetry/radio,159.91142857142856,31.444999999999997,167.0,100,105.0,200 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/cameraB/raw --> /output/classifier/classification,81.09785714285715,9.535,130.92857142857142,133,98.44253490870031,150 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/gpsA/fix --> /output/flight/cmd,63.925000000000004,30.982857142857142,771.4285714285714,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/gpsB/fix --> /output/telemetry/radio,30.61857142857142,15.20142857142857,167.28571428571428,200,83.64285714285714,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/imuA/data --> /output/flight/cmd,59.98,28.602857142857147,771.4285714285714,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/imuB/data --> /output/telemetry/radio,32.915000000000006,16.017857142857142,167.28571428571428,200,83.64285714285714,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/lidar/scan --> /output/flight/cmd,53.777142857142856,28.602142857142848,771.4285714285714,200,105.0,100 | ||||
| edf_multi_timed_20_boosted_100,edf_multi_timed_20_boosted_100,/input/operator/commands --> /output/flight/cmd,58.76928571428572,29.2,771.4285714285714,200,105.0,100 | ||||
| edf_single_timed_20_boosted_100,edf_single_timed_20_boosted_100,/input/baroA/alt --> /output/flight/cmd,35.05540540540541,5.996756756756756,4.378378378378378,200,2.189189189189189,100 | ||||
| edf_single_timed_20_boosted_100,edf_single_timed_20_boosted_100,/input/cameraA/raw --> /output/cameraA/mapped,74.62279999999998,6.7632,25.98,100,25.980000000000004,200 | ||||
| edf_single_timed_20_boosted_100,edf_single_timed_20_boosted_100,/input/cameraB/raw --> /output/classifier/classification,99.44580000000002,32.4518,32.7,133,24.586466165413537,150 | ||||
| edf_single_timed_20_boosted_100,edf_single_timed_20_boosted_100,/input/gpsA/fix --> /output/flight/cmd,30.72783783783784,5.941081081081082,4.378378378378378,200,2.189189189189189,100 | ||||
| edf_single_timed_20_boosted_100,edf_single_timed_20_boosted_100,/input/imuA/data --> /output/flight/cmd,32.395135135135135,5.7843243243243245,4.378378378378378,200,2.189189189189189,100 | ||||
| edf_single_timed_20_boosted_100,edf_single_timed_20_boosted_100,/input/lidar/scan --> /output/flight/cmd,23.244864864864862,5.153243243243244,4.378378378378378,200,2.189189189189189,100 | ||||
| edf_single_timed_20_boosted_100,edf_single_timed_20_boosted_100,/input/operator/commands --> /output/flight/cmd,28.249999999999993,4.603783783783783,4.378378378378378,200,2.189189189189189,100 | ||||
| 
 | 
							
								
								
									
										424
									
								
								eye_catcher_plot_dis.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										424
									
								
								eye_catcher_plot_dis.py
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,424 @@ | |||
| import pandas as pd | ||||
| import numpy as np | ||||
| import matplotlib.pyplot as plt | ||||
| import seaborn as sns | ||||
| import os | ||||
| import glob | ||||
| import argparse | ||||
| from pathlib import Path | ||||
| 
 | ||||
| def parse_arguments(): | ||||
|     parser = argparse.ArgumentParser(description='Cross-experiment analysis of chain performance.') | ||||
|     parser.add_argument('--experiments-dir', '-e', required=True,  | ||||
|                        help='Path to directory containing experiment subdirectories') | ||||
|     parser.add_argument('--supplementary', '-s', required=True, | ||||
|                        help='Path to supplementary.csv file with input delays') | ||||
|     parser.add_argument('--output', '-o', default='cross_experiment_analysis', | ||||
|                        help='Output filename prefix for the plots (will add chain name and .png)') | ||||
|     parser.add_argument('--experiment-duration', '-d', type=int, default=20, | ||||
|                        help='Duration of each experiment in seconds (default: 20)') | ||||
|     return parser.parse_args() | ||||
| 
 | ||||
| def load_supplementary_data(supplementary_path): | ||||
|     """Load the supplementary data with input delays and theoretical perfect times for each chain.""" | ||||
|     supp_df = pd.read_csv(supplementary_path) | ||||
|     # Create dictionaries for quick lookup | ||||
|     delay_dict = dict(zip(supp_df['chain'], supp_df['input_delay'])) | ||||
|      | ||||
|     # Load theoretical perfect e2e time (assuming the third column exists) | ||||
|     if len(supp_df.columns) >= 3: | ||||
|         perfect_time_dict = dict(zip(supp_df['chain'], supp_df.iloc[:, 2]))  # Third column | ||||
|         return delay_dict, perfect_time_dict | ||||
|     else: | ||||
|         print("Warning: No third column found for theoretical perfect times. Using input_delay as fallback.") | ||||
|         perfect_time_dict = delay_dict.copy()  # Fallback to input_delay | ||||
|         return delay_dict, perfect_time_dict | ||||
| 
 | ||||
| def calculate_theoretical_max_runs(chain, input_delay_ms, experiment_duration_s): | ||||
|     """Calculate the theoretical maximum number of runs for a chain.""" | ||||
|     runs_per_second = 1000 / input_delay_ms  # Convert ms to runs per second | ||||
|     max_runs = runs_per_second * experiment_duration_s | ||||
|     return int(max_runs) | ||||
| 
 | ||||
| def parse_experiment_subtypes(exp_name): | ||||
|     """Parse experiment name to extract sub-types.""" | ||||
|     exp_name_lower = exp_name.lower() | ||||
|      | ||||
|     # Extract scheduler type | ||||
|     scheduler = 'edf' if 'edf' in exp_name_lower else 'ros' if 'ros' in exp_name_lower else 'unknown' | ||||
|      | ||||
|     # Extract threading type | ||||
|     threading = 'multi' if 'multi' in exp_name_lower else 'single' if 'single' in exp_name_lower else 'unknown' | ||||
|      | ||||
|     # Extract timing type | ||||
|     timing = 'direct' if 'direct' in exp_name_lower else 'timed' if 'timed' in exp_name_lower else 'unknown' | ||||
|      | ||||
|     return scheduler, threading, timing | ||||
| 
 | ||||
| def load_experiment_data(experiments_dir, delay_dict, perfect_time_dict, experiment_duration): | ||||
|     """Load all experiment data and calculate performance metrics.""" | ||||
|     all_data = [] | ||||
|      | ||||
|     # Find all subdirectories containing results.csv | ||||
|     experiment_dirs = [d for d in Path(experiments_dir).iterdir()  | ||||
|                       if d.is_dir() and (d / 'results.csv').exists()] | ||||
|      | ||||
|     print(f"Found {len(experiment_dirs)} experiment directories") | ||||
|      | ||||
|     for exp_dir in experiment_dirs: | ||||
|         results_path = exp_dir / 'results.csv' | ||||
|          | ||||
|         try: | ||||
|             df = pd.read_csv(results_path) | ||||
|              | ||||
|             # Extract experiment name (remove timestamp if present) | ||||
|             if 'experiment_name' in df.columns: | ||||
|                 exp_name = df['experiment_name'].iloc[0] | ||||
|                 exp_name = exp_name.split('-')[0] if '-' in exp_name else exp_name | ||||
|             else: | ||||
|                 exp_name = exp_dir.name | ||||
|              | ||||
|             # Parse experiment sub-types | ||||
|             scheduler, threading, timing = parse_experiment_subtypes(exp_name) | ||||
|              | ||||
|             # Group by chain and calculate metrics | ||||
|             for chain, chain_data in df.groupby('chain'): | ||||
|                 if chain in delay_dict and chain in perfect_time_dict: | ||||
|                     # Calculate theoretical maximum runs | ||||
|                     input_delay = delay_dict[chain] | ||||
|                     perfect_time = perfect_time_dict[chain] | ||||
|                     theoretical_max = calculate_theoretical_max_runs( | ||||
|                         chain, input_delay, experiment_duration | ||||
|                     ) | ||||
|                      | ||||
|                     # Calculate actual performance metrics | ||||
|                     actual_runs = chain_data['count'].mean() | ||||
|                     mean_latency = chain_data['mean'].mean() | ||||
|                     std_latency = chain_data['std'].mean() | ||||
|                      | ||||
|                     # Normalize latency by theoretical perfect time | ||||
|                     normalized_latency = mean_latency / perfect_time | ||||
|                      | ||||
|                     # Calculate percentage of theoretical maximum | ||||
|                     completion_percentage = (actual_runs / theoretical_max) * 100 | ||||
| 
 | ||||
|                     if completion_percentage > 100: | ||||
|                         print(f"Warning: Completion percentage for {chain} in {exp_name} exceeds 100%: {completion_percentage:.2f}%") | ||||
|                         # Cap at 105% for visualization purposes | ||||
|                         # This is to avoid visual clutter in the plot | ||||
|                         # and to handle cases where the actual runs exceed theoretical max. | ||||
|                         # This is a safeguard and should be adjusted based on actual data characteristics. | ||||
|                         # In practice, this might indicate an issue with the data or the calculation. | ||||
|                         completion_percentage = 105 | ||||
|                      | ||||
|                     all_data.append({ | ||||
|                         'experiment_type': exp_name, | ||||
|                         'experiment_dir': exp_dir.name, | ||||
|                         'scheduler': scheduler, | ||||
|                         'threading': threading, | ||||
|                         'timing': timing, | ||||
|                         'chain': chain, | ||||
|                         'mean_latency_ms': mean_latency, | ||||
|                         'normalized_latency': normalized_latency, | ||||
|                         'std_latency_ms': std_latency, | ||||
|                         'actual_runs': actual_runs, | ||||
|                         'theoretical_max_runs': theoretical_max, | ||||
|                         'completion_percentage': completion_percentage, | ||||
|                         'input_delay_ms': input_delay, | ||||
|                         'perfect_time_ms': perfect_time | ||||
|                     }) | ||||
|                 else: | ||||
|                     missing_info = [] | ||||
|                     if chain not in delay_dict: | ||||
|                         missing_info.append("input delay") | ||||
|                     if chain not in perfect_time_dict: | ||||
|                         missing_info.append("perfect time") | ||||
|                     print(f"Warning: Chain '{chain}' missing {', '.join(missing_info)} in supplementary data") | ||||
|                      | ||||
|         except Exception as e: | ||||
|             print(f"Error processing {results_path}: {e}") | ||||
|      | ||||
|     return pd.DataFrame(all_data) | ||||
| 
 | ||||
| def create_visualizations(data_df, output_prefix): | ||||
|     """Create separate visualization plots for each chain showing all experiment types.""" | ||||
|     plt.style.use('seaborn-v0_8-darkgrid') | ||||
|      | ||||
|     # Get unique chains | ||||
|     chains = sorted(data_df['chain'].unique()) | ||||
|      | ||||
|     print(f"Creating {len(chains)} separate plots for chains: {chains}") | ||||
|      | ||||
|     created_files = [] | ||||
|      | ||||
|     for chain in chains: | ||||
|         # Filter data for this chain | ||||
|         chain_data = data_df[data_df['chain'] == chain] | ||||
|          | ||||
|         # Get unique experiment types for this chain | ||||
|         experiment_types = sorted(chain_data['experiment_type'].unique()) | ||||
|          | ||||
|         # Create a more sophisticated color mapping based on sub-types | ||||
|         # Define base colors for each scheduler type | ||||
|         scheduler_colors = {'edf-direct': 'blue', 'edf-timed': 'cyan', 'ros-direct': 'red', 'ros-timed': 'orange', 'unknown': 'gray'} | ||||
|          | ||||
|         # Create markers for threading type | ||||
|         threading_markers = {'multi': 'o', 'single': 's', 'unknown': '^'} | ||||
|          | ||||
|         # Create alpha values for timing type | ||||
|         timing_alpha = {'direct': 0.9, 'timed': 0.9, 'unknown': 0.4} | ||||
|          | ||||
|         # Set up the figure | ||||
|         fig, ax = plt.subplots(figsize=(16, 10)) | ||||
|          | ||||
|         # Plot data points for each experiment type | ||||
|         for exp_type in experiment_types: | ||||
|             exp_data = chain_data[chain_data['experiment_type'] == exp_type] | ||||
|              | ||||
|             # Get the first row to extract sub-types (should be same for all rows with same exp_type) | ||||
|             first_row = exp_data.iloc[0] | ||||
|             scheduler = first_row['scheduler'] | ||||
|             threading = first_row['threading'] | ||||
|             timing = first_row['timing'] | ||||
|              | ||||
|             # Create label with sub-type information | ||||
|             label = f"{exp_type} ({scheduler.upper()}-{threading.upper()}-{timing.upper()})" | ||||
|              | ||||
|             ax.scatter( | ||||
|                 exp_data['completion_percentage'],  | ||||
|                 exp_data['normalized_latency'], | ||||
|                 color=scheduler_colors.get(f"{scheduler}-{timing}", 'gray'), | ||||
|                 marker=threading_markers.get(threading, 'o'), | ||||
|                 alpha=timing_alpha.get(timing, 0.7), | ||||
|                 label=label, | ||||
|                 s=120, | ||||
|                 edgecolors='black', | ||||
|                 linewidth=0.8 | ||||
|             ) | ||||
|          | ||||
|         # Set labels and title | ||||
|         ax.set_xlabel('Completion Rate (% of Theoretical Maximum)', fontsize=14, fontweight='bold') | ||||
|         ax.set_ylabel('Normalized Latency (Actual / Theoretical Perfect)', fontsize=14, fontweight='bold') | ||||
|         ax.set_title(f'Performance Analysis: {chain}\nNormalized Latency vs Completion Rate Across Experiments\n' + | ||||
|                     f'Colors: EDF(Blue)/ROS(Red), Markers: Multi(○)/Single(□), Alpha: Direct(High)/Timed(Low)',  | ||||
|                     fontsize=16, fontweight='bold', pad=20) | ||||
|          | ||||
|         # Add grid for better readability | ||||
|         ax.grid(True, alpha=0.3) | ||||
|          | ||||
|         # Set axis limits | ||||
|         ax.set_xlim(0, 107) | ||||
|         ax.set_ylim(bottom=1) | ||||
|          | ||||
|         # Create legend for experiment types | ||||
|         legend = ax.legend(title='Experiment Configuration',  | ||||
|                           loc='best',  | ||||
|                           fontsize=9,  | ||||
|                           title_fontsize=11, | ||||
|                           framealpha=0.9,  | ||||
|                           fancybox=True,  | ||||
|                           shadow=True, | ||||
|                           bbox_to_anchor=(1.05, 1)) | ||||
|          | ||||
|         # Add a second legend for the encoding | ||||
|         from matplotlib.lines import Line2D | ||||
|         legend_elements = [ | ||||
|             Line2D([0], [0], marker='o', color='blue', linestyle='None', markersize=10, alpha=0.9, label='EDF-Multi-Direct'), | ||||
|             Line2D([0], [0], marker='s', color='blue', linestyle='None', markersize=10, alpha=0.9, label='EDF-Single-Direct'), | ||||
|             Line2D([0], [0], marker='o', color='red', linestyle='None', markersize=10, alpha=0.9, label='ROS-Multi-Direct'), | ||||
|             Line2D([0], [0], marker='s', color='red', linestyle='None', markersize=10, alpha=0.9, label='ROS-Single-Direct'), | ||||
|             Line2D([0], [0], marker='o', color='cyan', linestyle='None', markersize=10, alpha=0.9, label='EDF-Multi-Timed'), | ||||
|             Line2D([0], [0], marker='s', color='cyan', linestyle='None', markersize=10, alpha=0.9, label='EDF-Single-Timed'), | ||||
|             Line2D([0], [0], marker='o', color='orange', linestyle='None', markersize=10, alpha=0.9, label='ROS-Multi-Timed'), | ||||
|             Line2D([0], [0], marker='s', color='orange', linestyle='None', markersize=10, alpha=0.9, label='ROS-Single-Timed'), | ||||
|         ] | ||||
|          | ||||
|         # Add encoding legend in a separate box | ||||
|         encoding_legend = ax.legend(handles=legend_elements, title='Encoding Guide',  | ||||
|                                    loc='upper left', fontsize=8, title_fontsize=10, | ||||
|                                    framealpha=0.9, fancybox=True, shadow=True) | ||||
|         ax.add_artist(encoding_legend)  # Keep both legends | ||||
|          | ||||
|         # Adjust layout to accommodate legend | ||||
|         plt.tight_layout() | ||||
|          | ||||
|         # Save the plot | ||||
|         safe_chain_name = chain.replace('/', '_').replace(' ', '_') | ||||
|         output_path = f"{output_prefix}_{safe_chain_name}.png" | ||||
|         plt.savefig(output_path, dpi=300, bbox_inches='tight') | ||||
|         created_files.append(output_path) | ||||
|          | ||||
|         # Show the plot | ||||
|         plt.show() | ||||
|          | ||||
|         # Close the figure to free memory | ||||
|         plt.close() | ||||
|      | ||||
|     return created_files | ||||
| 
 | ||||
| def create_combined_summary_plot(data_df, output_prefix): | ||||
|     """Create a combined summary plot showing all chains in subplots.""" | ||||
|     chains = sorted(data_df['chain'].unique()) | ||||
|     n_chains = len(chains) | ||||
|      | ||||
|     # Calculate subplot grid dimensions | ||||
|     n_cols = min(3, n_chains)  # Max 3 columns | ||||
|     n_rows = (n_chains + n_cols - 1) // n_cols  # Ceiling division | ||||
|      | ||||
|     fig, axes = plt.subplots(n_rows, n_cols, figsize=(6*n_cols, 5*n_rows)) | ||||
|      | ||||
|     # Ensure axes is always a 2D array | ||||
|     if n_rows == 1 and n_cols == 1: | ||||
|         axes = np.array([[axes]]) | ||||
|     elif n_rows == 1: | ||||
|         axes = axes.reshape(1, -1) | ||||
|     elif n_cols == 1: | ||||
|         axes = axes.reshape(-1, 1) | ||||
|      | ||||
|     plt.style.use('seaborn-v0_8-darkgrid') | ||||
|      | ||||
|     for i, chain in enumerate(chains): | ||||
|         row = i // n_cols | ||||
|         col = i % n_cols | ||||
|         ax = axes[row, col] | ||||
|          | ||||
|         # Filter data for this chain | ||||
|         chain_data = data_df[data_df['chain'] == chain] | ||||
|         experiment_types = sorted(chain_data['experiment_type'].unique()) | ||||
|          | ||||
|         # Create color palette for experiment types | ||||
|         exp_colors = sns.color_palette("husl", len(experiment_types)) | ||||
|         exp_color_map = dict(zip(experiment_types, exp_colors)) | ||||
|          | ||||
|         # Plot data points | ||||
|         for exp_type in experiment_types: | ||||
|             exp_data = chain_data[chain_data['experiment_type'] == exp_type] | ||||
|             ax.scatter( | ||||
|                 exp_data['completion_percentage'],  | ||||
|                 exp_data['normalized_latency'], | ||||
|                 color=exp_color_map[exp_type], | ||||
|                 s=60, | ||||
|                 alpha=0.7, | ||||
|                 edgecolors='black', | ||||
|                 linewidth=0.5 | ||||
|             ) | ||||
|          | ||||
|         ax.set_title(chain, fontsize=12, fontweight='bold') | ||||
|         ax.set_xlabel('Completion Rate (%)', fontsize=10) | ||||
|         ax.set_ylabel('Normalized Latency', fontsize=10) | ||||
|         ax.grid(True, alpha=0.3) | ||||
|          | ||||
|         # Set axis limits for consistency | ||||
|         ax.set_xlim(0, 107) | ||||
|         ax.set_ylim(bottom=1) | ||||
|      | ||||
|     # Hide unused subplots | ||||
|     for i in range(n_chains, n_rows * n_cols): | ||||
|         row = i // n_cols | ||||
|         col = i % n_cols | ||||
|         axes[row, col].set_visible(False) | ||||
|      | ||||
|     plt.suptitle('Performance Analysis Summary - All Chains\n(Normalized Latency vs Completion Rate)',  | ||||
|                  fontsize=16, fontweight='bold', y=0.98) | ||||
|     plt.tight_layout() | ||||
|      | ||||
|     summary_output = f"{output_prefix}_summary.png" | ||||
|     plt.savefig(summary_output, dpi=300, bbox_inches='tight') | ||||
|     plt.show() | ||||
|     plt.close() | ||||
|      | ||||
|     return summary_output | ||||
| 
 | ||||
| def print_summary_statistics(data_df): | ||||
|     """Print summary statistics for the analysis.""" | ||||
|     print("\n" + "="*80) | ||||
|     print("CROSS-EXPERIMENT ANALYSIS SUMMARY") | ||||
|     print("="*80) | ||||
|      | ||||
|     print(f"\nTotal experiments analyzed: {data_df['experiment_type'].nunique()}") | ||||
|     print(f"Total chains analyzed: {data_df['chain'].nunique()}") | ||||
|     print(f"Total data points: {len(data_df)}") | ||||
|      | ||||
|     print("\nPer Chain Summary:") | ||||
|     chain_summary = data_df.groupby('chain').agg({ | ||||
|         'completion_percentage': ['mean', 'std', 'min', 'max'], | ||||
|         'normalized_latency': ['mean', 'std', 'min', 'max'], | ||||
|         'mean_latency_ms': ['mean', 'std', 'min', 'max'], | ||||
|         'experiment_type': 'count' | ||||
|     }).round(2) | ||||
|     print(chain_summary) | ||||
|      | ||||
|     print("\nPer Experiment Type Summary:") | ||||
|     exp_summary = data_df.groupby('experiment_type').agg({ | ||||
|         'completion_percentage': ['mean', 'std'], | ||||
|         'normalized_latency': ['mean', 'std'], | ||||
|         'mean_latency_ms': ['mean', 'std'], | ||||
|         'chain': 'count' | ||||
|     }).round(2) | ||||
|     print(exp_summary) | ||||
|      | ||||
|     # Find best and worst performing combinations | ||||
|     print("\nBest Performance (highest completion rate):") | ||||
|     best_completion = data_df.loc[data_df['completion_percentage'].idxmax()] | ||||
|     print(f"  {best_completion['chain']} - {best_completion['experiment_type']}") | ||||
|     print(f"  Completion: {best_completion['completion_percentage']:.1f}%, Normalized Latency: {best_completion['normalized_latency']:.2f}x, Raw Latency: {best_completion['mean_latency_ms']:.1f}ms") | ||||
|      | ||||
|     print("\nWorst Performance (lowest completion rate):") | ||||
|     worst_completion = data_df.loc[data_df['completion_percentage'].idxmin()] | ||||
|     print(f"  {worst_completion['chain']} - {worst_completion['experiment_type']}") | ||||
|     print(f"  Completion: {worst_completion['completion_percentage']:.1f}%, Normalized Latency: {worst_completion['normalized_latency']:.2f}x, Raw Latency: {worst_completion['mean_latency_ms']:.1f}ms") | ||||
|      | ||||
|     print("\nBest Normalized Latency (closest to theoretical perfect):") | ||||
|     best_latency = data_df.loc[data_df['normalized_latency'].idxmin()] | ||||
|     print(f"  {best_latency['chain']} - {best_latency['experiment_type']}") | ||||
|     print(f"  Normalized Latency: {best_latency['normalized_latency']:.2f}x, Completion: {best_latency['completion_percentage']:.1f}%, Raw Latency: {best_latency['mean_latency_ms']:.1f}ms") | ||||
|      | ||||
|     print("\nWorst Normalized Latency (furthest from theoretical perfect):") | ||||
|     worst_latency = data_df.loc[data_df['normalized_latency'].idxmax()] | ||||
|     print(f"  {worst_latency['chain']} - {worst_latency['experiment_type']}") | ||||
|     print(f"  Normalized Latency: {worst_latency['normalized_latency']:.2f}x, Completion: {worst_latency['completion_percentage']:.1f}%, Raw Latency: {worst_latency['mean_latency_ms']:.1f}ms") | ||||
| 
 | ||||
| def main(): | ||||
|     args = parse_arguments() | ||||
|      | ||||
|     print("Starting cross-experiment analysis...") | ||||
|      | ||||
|     # Load supplementary data | ||||
|     print(f"Loading supplementary data from: {args.supplementary}") | ||||
|     delay_dict, perfect_time_dict = load_supplementary_data(args.supplementary) | ||||
|     print(f"Found delay information for {len(delay_dict)} chains") | ||||
|     print(f"Found perfect time information for {len(perfect_time_dict)} chains") | ||||
|      | ||||
|     # Load all experiment data | ||||
|     print(f"Loading experiment data from: {args.experiments_dir}") | ||||
|     data_df = load_experiment_data(args.experiments_dir, delay_dict, perfect_time_dict, args.experiment_duration) | ||||
|      | ||||
|     if data_df.empty: | ||||
|         print("No data found! Please check your paths and file formats.") | ||||
|         return | ||||
|      | ||||
|     print(f"Loaded data for {len(data_df)} experiment-chain combinations") | ||||
|      | ||||
|     # Create individual visualizations for each chain | ||||
|     print(f"Creating individual visualizations...") | ||||
|     created_files = create_visualizations(data_df, args.output) | ||||
|      | ||||
|     # Create combined summary plot | ||||
|     print(f"Creating combined summary plot...") | ||||
|     summary_file = create_combined_summary_plot(data_df, args.output) | ||||
|     created_files.append(summary_file) | ||||
|      | ||||
|     # Print summary statistics | ||||
|     print_summary_statistics(data_df) | ||||
|      | ||||
|     # Save detailed data to CSV for further analysis | ||||
|     csv_output = f"{args.output}_detailed_data.csv" | ||||
|     data_df.to_csv(csv_output, index=False) | ||||
|     print(f"\nDetailed data saved to: {csv_output}") | ||||
|      | ||||
|     print(f"\nCreated visualization files:") | ||||
|     for file in created_files: | ||||
|         print(f"  - {file}") | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
|     main() | ||||
							
								
								
									
										367
									
								
								eye_catcher_plot_inverse.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										367
									
								
								eye_catcher_plot_inverse.py
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,367 @@ | |||
| import pandas as pd | ||||
| import numpy as np | ||||
| import matplotlib.pyplot as plt | ||||
| import seaborn as sns | ||||
| import os | ||||
| import glob | ||||
| import argparse | ||||
| from pathlib import Path | ||||
| 
 | ||||
| def parse_arguments(): | ||||
|     parser = argparse.ArgumentParser(description='Cross-experiment analysis of chain performance.') | ||||
|     parser.add_argument('--experiments-dir', '-e', required=True,  | ||||
|                        help='Path to directory containing experiment subdirectories') | ||||
|     parser.add_argument('--supplementary', '-s', required=True, | ||||
|                        help='Path to supplementary.csv file with input delays') | ||||
|     parser.add_argument('--output', '-o', default='cross_experiment_analysis', | ||||
|                        help='Output filename prefix for the plots (will add chain name and .png)') | ||||
|     parser.add_argument('--experiment-duration', '-d', type=int, default=20, | ||||
|                        help='Duration of each experiment in seconds (default: 20)') | ||||
|     return parser.parse_args() | ||||
| 
 | ||||
| def load_supplementary_data(supplementary_path): | ||||
|     """Load the supplementary data with input delays and theoretical perfect times for each chain.""" | ||||
|     supp_df = pd.read_csv(supplementary_path) | ||||
|     # Create dictionaries for quick lookup | ||||
|     delay_dict = dict(zip(supp_df['chain'], supp_df['input_delay'])) | ||||
|      | ||||
|     # Load theoretical perfect e2e time (assuming the third column exists) | ||||
|     if len(supp_df.columns) >= 3: | ||||
|         perfect_time_dict = dict(zip(supp_df['chain'], supp_df.iloc[:, 2]))  # Third column | ||||
|         return delay_dict, perfect_time_dict | ||||
|     else: | ||||
|         print("Warning: No third column found for theoretical perfect times. Using input_delay as fallback.") | ||||
|         perfect_time_dict = delay_dict.copy()  # Fallback to input_delay | ||||
|         return delay_dict, perfect_time_dict | ||||
| 
 | ||||
| def calculate_theoretical_max_runs(chain, input_delay_ms, experiment_duration_s): | ||||
|     """Calculate the theoretical maximum number of runs for a chain.""" | ||||
|     runs_per_second = 1000 / input_delay_ms  # Convert ms to runs per second | ||||
|     max_runs = runs_per_second * experiment_duration_s | ||||
|     return int(max_runs) | ||||
| 
 | ||||
| def load_experiment_data(experiments_dir, delay_dict, perfect_time_dict, experiment_duration): | ||||
|     """Load all experiment data and calculate performance metrics.""" | ||||
|     all_data = [] | ||||
|      | ||||
|     # Find all subdirectories containing results.csv | ||||
|     experiment_dirs = [d for d in Path(experiments_dir).iterdir()  | ||||
|                       if d.is_dir() and (d / 'results.csv').exists()] | ||||
|      | ||||
|     print(f"Found {len(experiment_dirs)} experiment directories") | ||||
|      | ||||
|     for exp_dir in experiment_dirs: | ||||
|         results_path = exp_dir / 'results.csv' | ||||
|          | ||||
|         try: | ||||
|             df = pd.read_csv(results_path) | ||||
|              | ||||
|             # Extract experiment name (remove timestamp if present) | ||||
|             if 'experiment_name' in df.columns: | ||||
|                 exp_name = df['experiment_name'].iloc[0] | ||||
|                 exp_name = exp_name.split('-')[0] if '-' in exp_name else exp_name | ||||
|             else: | ||||
|                 exp_name = exp_dir.name | ||||
|              | ||||
|             # Group by chain and calculate metrics | ||||
|             for chain, chain_data in df.groupby('chain'): | ||||
|                 if chain in delay_dict and chain in perfect_time_dict: | ||||
|                     # Calculate theoretical maximum runs | ||||
|                     input_delay = delay_dict[chain] | ||||
|                     perfect_time = perfect_time_dict[chain] | ||||
|                     theoretical_max = calculate_theoretical_max_runs( | ||||
|                         chain, input_delay, experiment_duration | ||||
|                     ) | ||||
|                      | ||||
|                     # Calculate actual performance metrics | ||||
|                     actual_runs = chain_data['count'].mean() | ||||
|                     mean_latency = chain_data['mean'].mean() | ||||
|                     std_latency = chain_data['std'].mean() | ||||
|                      | ||||
|                     # Normalize latency by theoretical perfect time | ||||
|                     normalized_latency = mean_latency / perfect_time | ||||
|                      | ||||
|                     # Calculate percentage of theoretical maximum | ||||
|                     completion_percentage = (actual_runs / theoretical_max) * 100 | ||||
| 
 | ||||
|                     if completion_percentage > 100: | ||||
|                         print(f"Warning: Completion percentage for {chain} in {exp_name} exceeds 100%: {completion_percentage:.2f}%") | ||||
|                         # Cap at 105% for visualization purposes | ||||
|                         # This is to avoid visual clutter in the plot | ||||
|                         # and to handle cases where the actual runs exceed theoretical max. | ||||
|                         # This is a safeguard and should be adjusted based on actual data characteristics. | ||||
|                         # In practice, this might indicate an issue with the data or the calculation. | ||||
|                         completion_percentage = 105 | ||||
|                      | ||||
|                     all_data.append({ | ||||
|                         'experiment_type': exp_name, | ||||
|                         'experiment_dir': exp_dir.name, | ||||
|                         'chain': chain, | ||||
|                         'mean_latency_ms': mean_latency, | ||||
|                         'normalized_latency': normalized_latency, | ||||
|                         'std_latency_ms': std_latency, | ||||
|                         'actual_runs': actual_runs, | ||||
|                         'theoretical_max_runs': theoretical_max, | ||||
|                         'completion_percentage': completion_percentage, | ||||
|                         'input_delay_ms': input_delay, | ||||
|                         'perfect_time_ms': perfect_time | ||||
|                     }) | ||||
|                 else: | ||||
|                     missing_info = [] | ||||
|                     if chain not in delay_dict: | ||||
|                         missing_info.append("input delay") | ||||
|                     if chain not in perfect_time_dict: | ||||
|                         missing_info.append("perfect time") | ||||
|                     print(f"Warning: Chain '{chain}' missing {', '.join(missing_info)} in supplementary data") | ||||
|                      | ||||
|         except Exception as e: | ||||
|             print(f"Error processing {results_path}: {e}") | ||||
|      | ||||
|     return pd.DataFrame(all_data) | ||||
| 
 | ||||
| def create_visualizations(data_df, output_prefix): | ||||
|     """Create separate visualization plots for each chain showing all experiment types.""" | ||||
|     plt.style.use('seaborn-v0_8-darkgrid') | ||||
|      | ||||
|     # Get unique chains | ||||
|     chains = sorted(data_df['chain'].unique()) | ||||
|      | ||||
|     print(f"Creating {len(chains)} separate plots for chains: {chains}") | ||||
|      | ||||
|     created_files = [] | ||||
|      | ||||
|     for chain in chains: | ||||
|         # Filter data for this chain | ||||
|         chain_data = data_df[data_df['chain'] == chain] | ||||
|          | ||||
|         # Get unique experiment types for this chain | ||||
|         experiment_types = sorted(chain_data['experiment_type'].unique()) | ||||
|          | ||||
|         # Create color palette for experiment types | ||||
|         exp_colors = sns.color_palette("husl", len(experiment_types)) | ||||
|         exp_color_map = dict(zip(experiment_types, exp_colors)) | ||||
|          | ||||
|         # Set up the figure | ||||
|         fig, ax = plt.subplots(figsize=(14, 10)) | ||||
|          | ||||
|         # Plot data points for each experiment type | ||||
|         for exp_type in experiment_types: | ||||
|             exp_data = chain_data[chain_data['experiment_type'] == exp_type] | ||||
|              | ||||
|             ax.scatter( | ||||
|                 exp_data['completion_percentage'],  | ||||
|                 exp_data['normalized_latency'], | ||||
|                 color=exp_color_map[exp_type], | ||||
|                 label=exp_type, | ||||
|                 s=120, | ||||
|                 alpha=0.8, | ||||
|                 edgecolors='black', | ||||
|                 linewidth=0.8 | ||||
|             ) | ||||
|          | ||||
|         # Set labels and title | ||||
|         ax.set_xlabel('Completion Rate (% of Theoretical Maximum)', fontsize=14, fontweight='bold') | ||||
|         ax.set_ylabel('Normalized Latency (Actual / Theoretical Perfect)', fontsize=14, fontweight='bold') | ||||
|         ax.set_title(f'Performance Analysis: {chain}\nNormalized Latency vs Completion Rate Across Experiments',  | ||||
|                     fontsize=16, fontweight='bold', pad=20) | ||||
|          | ||||
|         # Add grid for better readability | ||||
|         ax.grid(True, alpha=0.3) | ||||
|          | ||||
|         # Set axis limits | ||||
|         ax.set_xlim(0, 107) | ||||
|         ax.set_ylim(bottom=1) | ||||
|          | ||||
|         # Create legend for experiment types | ||||
|         legend = ax.legend(title='Experiment Type',  | ||||
|                           loc='best',  | ||||
|                           fontsize=10,  | ||||
|                           title_fontsize=12, | ||||
|                           framealpha=0.9,  | ||||
|                           fancybox=True,  | ||||
|                           shadow=True, | ||||
|                           bbox_to_anchor=(1.05, 1)) | ||||
|          | ||||
|         # Adjust layout to accommodate legend | ||||
|         plt.tight_layout() | ||||
|          | ||||
|         # Save the plot | ||||
|         safe_chain_name = chain.replace('/', '_').replace(' ', '_') | ||||
|         output_path = f"{output_prefix}_{safe_chain_name}.png" | ||||
|         plt.savefig(output_path, dpi=300, bbox_inches='tight') | ||||
|         created_files.append(output_path) | ||||
|          | ||||
|         # Show the plot | ||||
|         plt.show() | ||||
|          | ||||
|         # Close the figure to free memory | ||||
|         plt.close() | ||||
|      | ||||
|     return created_files | ||||
| 
 | ||||
| def create_combined_summary_plot(data_df, output_prefix): | ||||
|     """Create a combined summary plot showing all chains in subplots.""" | ||||
|     chains = sorted(data_df['chain'].unique()) | ||||
|     n_chains = len(chains) | ||||
|      | ||||
|     # Calculate subplot grid dimensions | ||||
|     n_cols = min(3, n_chains)  # Max 3 columns | ||||
|     n_rows = (n_chains + n_cols - 1) // n_cols  # Ceiling division | ||||
|      | ||||
|     fig, axes = plt.subplots(n_rows, n_cols, figsize=(6*n_cols, 5*n_rows)) | ||||
|      | ||||
|     # Ensure axes is always a 2D array | ||||
|     if n_rows == 1 and n_cols == 1: | ||||
|         axes = np.array([[axes]]) | ||||
|     elif n_rows == 1: | ||||
|         axes = axes.reshape(1, -1) | ||||
|     elif n_cols == 1: | ||||
|         axes = axes.reshape(-1, 1) | ||||
|      | ||||
|     plt.style.use('seaborn-v0_8-darkgrid') | ||||
|      | ||||
|     for i, chain in enumerate(chains): | ||||
|         row = i // n_cols | ||||
|         col = i % n_cols | ||||
|         ax = axes[row, col] | ||||
|          | ||||
|         # Filter data for this chain | ||||
|         chain_data = data_df[data_df['chain'] == chain] | ||||
|         experiment_types = sorted(chain_data['experiment_type'].unique()) | ||||
|          | ||||
|         # Create color palette for experiment types | ||||
|         exp_colors = sns.color_palette("husl", len(experiment_types)) | ||||
|         exp_color_map = dict(zip(experiment_types, exp_colors)) | ||||
|          | ||||
|         # Plot data points | ||||
|         for exp_type in experiment_types: | ||||
|             exp_data = chain_data[chain_data['experiment_type'] == exp_type] | ||||
|             ax.scatter( | ||||
|                 exp_data['completion_percentage'],  | ||||
|                 exp_data['normalized_latency'], | ||||
|                 color=exp_color_map[exp_type], | ||||
|                 s=60, | ||||
|                 alpha=0.7, | ||||
|                 edgecolors='black', | ||||
|                 linewidth=0.5 | ||||
|             ) | ||||
|          | ||||
|         ax.set_title(chain, fontsize=12, fontweight='bold') | ||||
|         ax.set_xlabel('Completion Rate (%)', fontsize=10) | ||||
|         ax.set_ylabel('Normalized Latency', fontsize=10) | ||||
|         ax.grid(True, alpha=0.3) | ||||
|          | ||||
|         # Set axis limits for consistency | ||||
|         ax.set_xlim(0, 107) | ||||
|         ax.set_ylim(bottom=1) | ||||
|      | ||||
|     # Hide unused subplots | ||||
|     for i in range(n_chains, n_rows * n_cols): | ||||
|         row = i // n_cols | ||||
|         col = i % n_cols | ||||
|         axes[row, col].set_visible(False) | ||||
|      | ||||
|     plt.suptitle('Performance Analysis Summary - All Chains\n(Normalized Latency vs Completion Rate)',  | ||||
|                  fontsize=16, fontweight='bold', y=0.98) | ||||
|     plt.tight_layout() | ||||
|      | ||||
|     summary_output = f"{output_prefix}_summary.png" | ||||
|     plt.savefig(summary_output, dpi=300, bbox_inches='tight') | ||||
|     plt.show() | ||||
|     plt.close() | ||||
|      | ||||
|     return summary_output | ||||
| 
 | ||||
| def print_summary_statistics(data_df): | ||||
|     """Print summary statistics for the analysis.""" | ||||
|     print("\n" + "="*80) | ||||
|     print("CROSS-EXPERIMENT ANALYSIS SUMMARY") | ||||
|     print("="*80) | ||||
|      | ||||
|     print(f"\nTotal experiments analyzed: {data_df['experiment_type'].nunique()}") | ||||
|     print(f"Total chains analyzed: {data_df['chain'].nunique()}") | ||||
|     print(f"Total data points: {len(data_df)}") | ||||
|      | ||||
|     print("\nPer Chain Summary:") | ||||
|     chain_summary = data_df.groupby('chain').agg({ | ||||
|         'completion_percentage': ['mean', 'std', 'min', 'max'], | ||||
|         'normalized_latency': ['mean', 'std', 'min', 'max'], | ||||
|         'mean_latency_ms': ['mean', 'std', 'min', 'max'], | ||||
|         'experiment_type': 'count' | ||||
|     }).round(2) | ||||
|     print(chain_summary) | ||||
|      | ||||
|     print("\nPer Experiment Type Summary:") | ||||
|     exp_summary = data_df.groupby('experiment_type').agg({ | ||||
|         'completion_percentage': ['mean', 'std'], | ||||
|         'normalized_latency': ['mean', 'std'], | ||||
|         'mean_latency_ms': ['mean', 'std'], | ||||
|         'chain': 'count' | ||||
|     }).round(2) | ||||
|     print(exp_summary) | ||||
|      | ||||
|     # Find best and worst performing combinations | ||||
|     print("\nBest Performance (highest completion rate):") | ||||
|     best_completion = data_df.loc[data_df['completion_percentage'].idxmax()] | ||||
|     print(f"  {best_completion['chain']} - {best_completion['experiment_type']}") | ||||
|     print(f"  Completion: {best_completion['completion_percentage']:.1f}%, Normalized Latency: {best_completion['normalized_latency']:.2f}x, Raw Latency: {best_completion['mean_latency_ms']:.1f}ms") | ||||
|      | ||||
|     print("\nWorst Performance (lowest completion rate):") | ||||
|     worst_completion = data_df.loc[data_df['completion_percentage'].idxmin()] | ||||
|     print(f"  {worst_completion['chain']} - {worst_completion['experiment_type']}") | ||||
|     print(f"  Completion: {worst_completion['completion_percentage']:.1f}%, Normalized Latency: {worst_completion['normalized_latency']:.2f}x, Raw Latency: {worst_completion['mean_latency_ms']:.1f}ms") | ||||
|      | ||||
|     print("\nBest Normalized Latency (closest to theoretical perfect):") | ||||
|     best_latency = data_df.loc[data_df['normalized_latency'].idxmin()] | ||||
|     print(f"  {best_latency['chain']} - {best_latency['experiment_type']}") | ||||
|     print(f"  Normalized Latency: {best_latency['normalized_latency']:.2f}x, Completion: {best_latency['completion_percentage']:.1f}%, Raw Latency: {best_latency['mean_latency_ms']:.1f}ms") | ||||
|      | ||||
|     print("\nWorst Normalized Latency (furthest from theoretical perfect):") | ||||
|     worst_latency = data_df.loc[data_df['normalized_latency'].idxmax()] | ||||
|     print(f"  {worst_latency['chain']} - {worst_latency['experiment_type']}") | ||||
|     print(f"  Normalized Latency: {worst_latency['normalized_latency']:.2f}x, Completion: {worst_latency['completion_percentage']:.1f}%, Raw Latency: {worst_latency['mean_latency_ms']:.1f}ms") | ||||
| 
 | ||||
| def main(): | ||||
|     args = parse_arguments() | ||||
|      | ||||
|     print("Starting cross-experiment analysis...") | ||||
|      | ||||
|     # Load supplementary data | ||||
|     print(f"Loading supplementary data from: {args.supplementary}") | ||||
|     delay_dict, perfect_time_dict = load_supplementary_data(args.supplementary) | ||||
|     print(f"Found delay information for {len(delay_dict)} chains") | ||||
|     print(f"Found perfect time information for {len(perfect_time_dict)} chains") | ||||
|      | ||||
|     # Load all experiment data | ||||
|     print(f"Loading experiment data from: {args.experiments_dir}") | ||||
|     data_df = load_experiment_data(args.experiments_dir, delay_dict, perfect_time_dict, args.experiment_duration) | ||||
|      | ||||
|     if data_df.empty: | ||||
|         print("No data found! Please check your paths and file formats.") | ||||
|         return | ||||
|      | ||||
|     print(f"Loaded data for {len(data_df)} experiment-chain combinations") | ||||
|      | ||||
|     # Create individual visualizations for each chain | ||||
|     print(f"Creating individual visualizations...") | ||||
|     created_files = create_visualizations(data_df, args.output) | ||||
|      | ||||
|     # Create combined summary plot | ||||
|     print(f"Creating combined summary plot...") | ||||
|     summary_file = create_combined_summary_plot(data_df, args.output) | ||||
|     created_files.append(summary_file) | ||||
|      | ||||
|     # Print summary statistics | ||||
|     print_summary_statistics(data_df) | ||||
|      | ||||
|     # Save detailed data to CSV for further analysis | ||||
|     csv_output = f"{args.output}_detailed_data.csv" | ||||
|     data_df.to_csv(csv_output, index=False) | ||||
|     print(f"\nDetailed data saved to: {csv_output}") | ||||
|      | ||||
|     print(f"\nCreated visualization files:") | ||||
|     for file in created_files: | ||||
|         print(f"  - {file}") | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
|     main() | ||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue