

The problem is that we can be anticipating IO from a task, but it has
exitted.  Fix this by breaking anticipation if _any_ task exits.


 block/as-iosched.c |    7 +++++++
 linux/sched.h      |    1 +
 exit.c             |    4 ++++
 3 files changed, 12 insertions(+)

diff -puN drivers/block/as-iosched.c~as-notice-exit drivers/block/as-iosched.c
--- 25/drivers/block/as-iosched.c~as-notice-exit	2003-02-23 22:39:07.000000000 -0800
+++ 25-akpm/drivers/block/as-iosched.c	2003-02-23 22:39:07.000000000 -0800
@@ -120,6 +120,7 @@ struct as_data {
 	struct timer_list antic_timer;	/* anticipatory scheduling timer */
 	struct work_struct antic_work;	/* anticipatory scheduling work */
 	unsigned long current_id;	/* Identify the expected process */
+	long exit_count;		/* At the time of anticipation start */
 
 	/*
 	 * settings that change how the i/o scheduler behaves
@@ -756,6 +757,9 @@ elevator_wrap:
  * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
  * dispatch it ASAP, because we know that application will not be submitting
  * any new reads.
+ *
+ * And it returns true if some task has exitted since anticipation started; it
+ * might have been the task we were anticipating.
  */
 static int as_break_anticipation(struct as_data *ad, struct as_rq *arq)
 {
@@ -769,6 +773,8 @@ static int as_break_anticipation(struct 
 			ant_stats.broken_by_write++;
 		return 1;
 	}
+	if (atomic_read(&task_exit_count) != ad->exit_count)
+		return 1;
 	return 0;
 }
 
@@ -922,6 +928,7 @@ static int as_dispatch_request(struct re
 				mod_timer(&ad->antic_timer, timeout);
 				
 				ad->antic_status = ANTIC_WAIT;
+				ad->exit_count = atomic_read(&task_exit_count);
 				blk_plug_device(q);
 
 				return 0;
diff -puN include/linux/sched.h~as-notice-exit include/linux/sched.h
--- 25/include/linux/sched.h~as-notice-exit	2003-02-23 22:39:07.000000000 -0800
+++ 25-akpm/include/linux/sched.h	2003-02-23 22:39:07.000000000 -0800
@@ -624,6 +624,7 @@ extern void reparent_to_init(void);
 extern void daemonize(const char *, ...);
 extern int allow_signal(int);
 extern task_t *child_reaper;
+extern atomic_t task_exit_count;
 
 extern int do_execve(char *, char **, char **, struct pt_regs *);
 extern struct task_struct *do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *);
diff -puN kernel/exit.c~as-notice-exit kernel/exit.c
--- 25/kernel/exit.c~as-notice-exit	2003-02-23 22:39:07.000000000 -0800
+++ 25-akpm/kernel/exit.c	2003-02-23 22:39:07.000000000 -0800
@@ -29,6 +29,8 @@
 extern void sem_exit (void);
 extern struct task_struct *child_reaper;
 
+atomic_t task_exit_count;	/* Global counter of do_exit() calls */
+
 int getrusage(struct task_struct *, int, struct rusage *);
 
 static struct dentry * __unhash_process(struct task_struct *p)
@@ -688,6 +690,8 @@ NORET_TYPE void do_exit(long code)
 {
 	struct task_struct *tsk = current;
 
+	atomic_inc(&task_exit_count);
+
 	if (unlikely(in_interrupt()))
 		panic("Aiee, killing interrupt handler!");
 	if (unlikely(!tsk->pid))

_
