diff options
| author | Christoph Hellwig <hch@lst.de> | 2003-01-14 16:32:37 -0800 | 
|---|---|---|
| committer | Christoph Hellwig <hch@lst.de> | 2003-01-14 16:32:37 -0800 | 
| commit | 36ece8f9024c249c5604ff5d6c2dfd6298fcdb3c (patch) | |
| tree | b8adee10e2d435c08b11305e5062b0a4d6641858 /fs/proc/task_nommu.c | |
| parent | 7a8f29d23fe7faf2e015df2cdd5a9bd34f05b17b (diff) | |
[PATCH] more procfs bits for !CONFIG_MMU
New version with all ifdef CONFIG_MMU gone from procfs.
Instead, the conditional code is in either task_mmu.c/task_nommu.c, and
the Makefile will select the proper file for inclusion depending on
CONFIG_MMU.
Diffstat (limited to 'fs/proc/task_nommu.c')
| -rw-r--r-- | fs/proc/task_nommu.c | 107 | 
1 files changed, 107 insertions, 0 deletions
| diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c new file mode 100644 index 000000000000..16f05cfae496 --- /dev/null +++ b/fs/proc/task_nommu.c @@ -0,0 +1,107 @@ + +#include <linux/mm.h> + +/* + * Logic: we've got two memory sums for each process, "shared", and + * "non-shared". Shared memory may get counted more then once, for + * each process that owns it. Non-shared memory is counted + * accurately. + */ +char *task_mem(struct mm_struct *mm, char *buffer) +{ +	unsigned long bytes = 0, sbytes = 0, slack = 0; +	struct mm_tblock_struct *tblock; +         +	down_read(&mm->mmap_sem); +	for (tblock = &mm->context.tblock; tblock; tblock = tblock->next) { +		if (!tblock->rblock) +			continue; +		bytes += kobjsize(tblock); +		if (atomic_read(&mm->mm_count) > 1 || +		    tblock->rblock->refcount > 1) { +			sbytes += kobjsize(tblock->rblock->kblock); +			sbytes += kobjsize(tblock->rblock); +		} else { +			bytes += kobjsize(tblock->rblock->kblock); +			bytes += kobjsize(tblock->rblock); +			slack += kobjsize(tblock->rblock->kblock) - +					tblock->rblock->size; +		} +	} + +	if (atomic_read(&mm->mm_count) > 1) +		sbytes += kobjsize(mm); +	else +		bytes += kobjsize(mm); +	 +	if (current->fs && atomic_read(¤t->fs->count) > 1) +		sbytes += kobjsize(current->fs); +	else +		bytes += kobjsize(current->fs); + +	if (current->files && atomic_read(¤t->files->count) > 1) +		sbytes += kobjsize(current->files); +	else +		bytes += kobjsize(current->files); + +	if (current->sig && atomic_read(¤t->sig->count) > 1) +		sbytes += kobjsize(current->sig); +	else +		bytes += kobjsize(current->sig); + +	bytes += kobjsize(current); /* includes kernel stack */ + +	buffer += sprintf(buffer, +		"Mem:\t%8lu bytes\n" +		"Slack:\t%8lu bytes\n" +		"Shared:\t%8lu bytes\n", +		bytes, slack, sbytes); + +	up_read(&mm->mmap_sem); +	return buffer; +} + +unsigned long task_vsize(struct mm_struct *mm) +{ +	struct mm_tblock_struct *tbp; +	unsigned long vsize; + +	for (tbp = &mm->context.tblock; tbp; tbp = tbp->next) { +		if (tbp->rblock) +			vsize += kobjsize(tbp->rblock->kblock); +	} + +	return vsize; +} + +int task_statm(struct mm_struct *mm, int *shared, int *text, +	       int *data, int *resident) +{ +	struct mm_tblock_struct *tbp; +	int size = kobjsize(mm); +	 +	for (tbp = &mm->context.tblock; tbp; tbp = tbp->next) { +		if (tbp->next) +			size += kobjsize(tbp->next); +		if (tbp->rblock) { +			size += kobjsize(tbp->rblock); +			size += kobjsize(tbp->rblock->kblock); +		} +	} + +	size += (text = mm->end_code - mm->start_code); +	size += (data = mm->start_stack - mm->start_data); + +	*resident = size; +	return size; +} + +/* + * Albert D. Cahalan suggested to fake entries for the traditional + * sections here.  This might be worth investigating. + */ +ssize_t proc_pid_read_maps(struct task_struct *task, struct file *file, +			   char *buf, size_t count, loff_t *ppos) +{ +	return 0; +} | 
