@@ -1285,22 +1285,71 @@ void tracing_reset_all_online_cpus(void)
12851285 }
12861286}
12871287
1288- #define SAVED_CMDLINES 128
1288+ #define SAVED_CMDLINES_DEFAULT 128
12891289#define NO_CMDLINE_MAP UINT_MAX
1290- static unsigned map_pid_to_cmdline [PID_MAX_DEFAULT + 1 ];
1291- static unsigned map_cmdline_to_pid [SAVED_CMDLINES ];
1292- static char saved_cmdlines [SAVED_CMDLINES ][TASK_COMM_LEN ];
1293- static int cmdline_idx ;
12941290static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED ;
1291+ struct saved_cmdlines_buffer {
1292+ unsigned map_pid_to_cmdline [PID_MAX_DEFAULT + 1 ];
1293+ unsigned * map_cmdline_to_pid ;
1294+ unsigned cmdline_num ;
1295+ int cmdline_idx ;
1296+ char * saved_cmdlines ;
1297+ };
1298+ static struct saved_cmdlines_buffer * savedcmd ;
12951299
12961300/* temporary disable recording */
12971301static atomic_t trace_record_cmdline_disabled __read_mostly ;
12981302
1299- static void trace_init_cmdlines (void )
1303+ static inline char * get_saved_cmdlines (int idx )
1304+ {
1305+ return & savedcmd -> saved_cmdlines [idx * TASK_COMM_LEN ];
1306+ }
1307+
1308+ static inline void set_cmdline (int idx , const char * cmdline )
13001309{
1301- memset (& map_pid_to_cmdline , NO_CMDLINE_MAP , sizeof (map_pid_to_cmdline ));
1302- memset (& map_cmdline_to_pid , NO_CMDLINE_MAP , sizeof (map_cmdline_to_pid ));
1303- cmdline_idx = 0 ;
1310+ memcpy (get_saved_cmdlines (idx ), cmdline , TASK_COMM_LEN );
1311+ }
1312+
1313+ static int allocate_cmdlines_buffer (unsigned int val ,
1314+ struct saved_cmdlines_buffer * s )
1315+ {
1316+ s -> map_cmdline_to_pid = kmalloc (val * sizeof (* s -> map_cmdline_to_pid ),
1317+ GFP_KERNEL );
1318+ if (!s -> map_cmdline_to_pid )
1319+ return - ENOMEM ;
1320+
1321+ s -> saved_cmdlines = kmalloc (val * TASK_COMM_LEN , GFP_KERNEL );
1322+ if (!s -> saved_cmdlines ) {
1323+ kfree (s -> map_cmdline_to_pid );
1324+ return - ENOMEM ;
1325+ }
1326+
1327+ s -> cmdline_idx = 0 ;
1328+ s -> cmdline_num = val ;
1329+ memset (& s -> map_pid_to_cmdline , NO_CMDLINE_MAP ,
1330+ sizeof (s -> map_pid_to_cmdline ));
1331+ memset (s -> map_cmdline_to_pid , NO_CMDLINE_MAP ,
1332+ val * sizeof (* s -> map_cmdline_to_pid ));
1333+
1334+ return 0 ;
1335+ }
1336+
1337+ static int trace_create_savedcmd (void )
1338+ {
1339+ int ret ;
1340+
1341+ savedcmd = kmalloc (sizeof (struct saved_cmdlines_buffer ), GFP_KERNEL );
1342+ if (!savedcmd )
1343+ return - ENOMEM ;
1344+
1345+ ret = allocate_cmdlines_buffer (SAVED_CMDLINES_DEFAULT , savedcmd );
1346+ if (ret < 0 ) {
1347+ kfree (savedcmd );
1348+ savedcmd = NULL ;
1349+ return - ENOMEM ;
1350+ }
1351+
1352+ return 0 ;
13041353}
13051354
13061355int is_tracing_stopped (void )
@@ -1457,27 +1506,27 @@ static int trace_save_cmdline(struct task_struct *tsk)
14571506 if (!arch_spin_trylock (& trace_cmdline_lock ))
14581507 return 0 ;
14591508
1460- idx = map_pid_to_cmdline [tsk -> pid ];
1509+ idx = savedcmd -> map_pid_to_cmdline [tsk -> pid ];
14611510 if (idx == NO_CMDLINE_MAP ) {
1462- idx = (cmdline_idx + 1 ) % SAVED_CMDLINES ;
1511+ idx = (savedcmd -> cmdline_idx + 1 ) % savedcmd -> cmdline_num ;
14631512
14641513 /*
14651514 * Check whether the cmdline buffer at idx has a pid
14661515 * mapped. We are going to overwrite that entry so we
14671516 * need to clear the map_pid_to_cmdline. Otherwise we
14681517 * would read the new comm for the old pid.
14691518 */
1470- pid = map_cmdline_to_pid [idx ];
1519+ pid = savedcmd -> map_cmdline_to_pid [idx ];
14711520 if (pid != NO_CMDLINE_MAP )
1472- map_pid_to_cmdline [pid ] = NO_CMDLINE_MAP ;
1521+ savedcmd -> map_pid_to_cmdline [pid ] = NO_CMDLINE_MAP ;
14731522
1474- map_cmdline_to_pid [idx ] = tsk -> pid ;
1475- map_pid_to_cmdline [tsk -> pid ] = idx ;
1523+ savedcmd -> map_cmdline_to_pid [idx ] = tsk -> pid ;
1524+ savedcmd -> map_pid_to_cmdline [tsk -> pid ] = idx ;
14761525
1477- cmdline_idx = idx ;
1526+ savedcmd -> cmdline_idx = idx ;
14781527 }
14791528
1480- memcpy ( & saved_cmdlines [ idx ] , tsk -> comm , TASK_COMM_LEN );
1529+ set_cmdline ( idx , tsk -> comm );
14811530
14821531 arch_spin_unlock (& trace_cmdline_lock );
14831532
@@ -1503,9 +1552,9 @@ static void __trace_find_cmdline(int pid, char comm[])
15031552 return ;
15041553 }
15051554
1506- map = map_pid_to_cmdline [pid ];
1555+ map = savedcmd -> map_pid_to_cmdline [pid ];
15071556 if (map != NO_CMDLINE_MAP )
1508- strcpy (comm , saved_cmdlines [ map ] );
1557+ strcpy (comm , get_saved_cmdlines ( map ) );
15091558 else
15101559 strcpy (comm , "<...>" );
15111560}
@@ -3593,6 +3642,7 @@ static const char readme_msg[] =
35933642 " trace_options\t\t- Set format or modify how tracing happens\n"
35943643 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
35953644 "\t\t\t option name\n"
3645+ " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
35963646#ifdef CONFIG_DYNAMIC_FTRACE
35973647 "\n available_filter_functions - list of functions that can be filtered on\n"
35983648 " set_ftrace_filter\t- echo function name in here to only trace these\n"
@@ -3715,7 +3765,8 @@ static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
37153765
37163766 (* pos )++ ;
37173767
3718- for (; ptr < & map_cmdline_to_pid [SAVED_CMDLINES ]; ptr ++ ) {
3768+ for (; ptr < & savedcmd -> map_cmdline_to_pid [savedcmd -> cmdline_num ];
3769+ ptr ++ ) {
37193770 if (* ptr == -1 || * ptr == NO_CMDLINE_MAP )
37203771 continue ;
37213772
@@ -3733,7 +3784,7 @@ static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
37333784 preempt_disable ();
37343785 arch_spin_lock (& trace_cmdline_lock );
37353786
3736- v = & map_cmdline_to_pid [0 ];
3787+ v = & savedcmd -> map_cmdline_to_pid [0 ];
37373788 while (l <= * pos ) {
37383789 v = saved_cmdlines_next (m , v , & l );
37393790 if (!v )
@@ -3781,6 +3832,79 @@ static const struct file_operations tracing_saved_cmdlines_fops = {
37813832 .release = seq_release ,
37823833};
37833834
3835+ static ssize_t
3836+ tracing_saved_cmdlines_size_read (struct file * filp , char __user * ubuf ,
3837+ size_t cnt , loff_t * ppos )
3838+ {
3839+ char buf [64 ];
3840+ int r ;
3841+
3842+ arch_spin_lock (& trace_cmdline_lock );
3843+ r = sprintf (buf , "%u\n" , savedcmd -> cmdline_num );
3844+ arch_spin_unlock (& trace_cmdline_lock );
3845+
3846+ return simple_read_from_buffer (ubuf , cnt , ppos , buf , r );
3847+ }
3848+
3849+ static void free_saved_cmdlines_buffer (struct saved_cmdlines_buffer * s )
3850+ {
3851+ kfree (s -> saved_cmdlines );
3852+ kfree (s -> map_cmdline_to_pid );
3853+ kfree (s );
3854+ }
3855+
3856+ static int tracing_resize_saved_cmdlines (unsigned int val )
3857+ {
3858+ struct saved_cmdlines_buffer * s , * savedcmd_temp ;
3859+
3860+ s = kmalloc (sizeof (struct saved_cmdlines_buffer ), GFP_KERNEL );
3861+ if (!s )
3862+ return - ENOMEM ;
3863+
3864+ if (allocate_cmdlines_buffer (val , s ) < 0 ) {
3865+ kfree (s );
3866+ return - ENOMEM ;
3867+ }
3868+
3869+ arch_spin_lock (& trace_cmdline_lock );
3870+ savedcmd_temp = savedcmd ;
3871+ savedcmd = s ;
3872+ arch_spin_unlock (& trace_cmdline_lock );
3873+ free_saved_cmdlines_buffer (savedcmd_temp );
3874+
3875+ return 0 ;
3876+ }
3877+
3878+ static ssize_t
3879+ tracing_saved_cmdlines_size_write (struct file * filp , const char __user * ubuf ,
3880+ size_t cnt , loff_t * ppos )
3881+ {
3882+ unsigned long val ;
3883+ int ret ;
3884+
3885+ ret = kstrtoul_from_user (ubuf , cnt , 10 , & val );
3886+ if (ret )
3887+ return ret ;
3888+
3889+ /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3890+ if (!val || val > PID_MAX_DEFAULT )
3891+ return - EINVAL ;
3892+
3893+ ret = tracing_resize_saved_cmdlines ((unsigned int )val );
3894+ if (ret < 0 )
3895+ return ret ;
3896+
3897+ * ppos += cnt ;
3898+
3899+ return cnt ;
3900+ }
3901+
3902+ static const struct file_operations tracing_saved_cmdlines_size_fops = {
3903+ .open = tracing_open_generic ,
3904+ .read = tracing_saved_cmdlines_size_read ,
3905+ .write = tracing_saved_cmdlines_size_write ,
3906+ };
3907+
37843908static ssize_t
37853909tracing_set_trace_read (struct file * filp , char __user * ubuf ,
37863910 size_t cnt , loff_t * ppos )
@@ -6375,6 +6499,9 @@ static __init int tracer_init_debugfs(void)
63756499 trace_create_file ("saved_cmdlines" , 0444 , d_tracer ,
63766500 NULL , & tracing_saved_cmdlines_fops );
63776501
6502+ trace_create_file ("saved_cmdlines_size" , 0644 , d_tracer ,
6503+ NULL , & tracing_saved_cmdlines_size_fops );
6504+
63786505#ifdef CONFIG_DYNAMIC_FTRACE
63796506 trace_create_file ("dyn_ftrace_total_info" , 0444 , d_tracer ,
63806507 & ftrace_update_tot_cnt , & tracing_dyn_info_fops );
@@ -6611,18 +6738,19 @@ __init static int tracer_alloc_buffers(void)
66116738 if (!temp_buffer )
66126739 goto out_free_cpumask ;
66136740
6741+ if (trace_create_savedcmd () < 0 )
6742+ goto out_free_temp_buffer ;
6743+
66146744 /* TODO: make the number of buffers hot pluggable with CPUS */
66156745 if (allocate_trace_buffers (& global_trace , ring_buf_size ) < 0 ) {
66166746 printk (KERN_ERR "tracer: failed to allocate ring buffer!\n" );
66176747 WARN_ON (1 );
6618- goto out_free_temp_buffer ;
6748+ goto out_free_savedcmd ;
66196749 }
66206750
66216751 if (global_trace .buffer_disabled )
66226752 tracing_off ();
66236753
6624- trace_init_cmdlines ();
6625-
66266754 if (trace_boot_clock ) {
66276755 ret = tracing_set_clock (& global_trace , trace_boot_clock );
66286756 if (ret < 0 )
@@ -6668,6 +6796,8 @@ __init static int tracer_alloc_buffers(void)
66686796
66696797 return 0 ;
66706798
6799+ out_free_savedcmd :
6800+ free_saved_cmdlines_buffer (savedcmd );
66716801out_free_temp_buffer :
66726802 ring_buffer_free (temp_buffer );
66736803out_free_cpumask :
0 commit comments