diff --git a/suites/rados/singleton/all/watch-notify-same-primary.yaml b/suites/rados/singleton/all/watch-notify-same-primary.yaml
new file mode 100644
index 00000000000..4b3c661cbcd
--- /dev/null
+++ b/suites/rados/singleton/all/watch-notify-same-primary.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.0
+  - mon.1
+  - mon.2
+  - osd.0
+  - osd.1
+  - osd.2
+  - client.0
+tasks:
+- install:
+- ceph:
+    config:
+      global:
+        osd pool default min size : 1
+    log-whitelist:
+    - objects unfound and apparently lost
+- watch_notify_same_primary:
+    clients: [client.0]
diff --git a/tasks/watch_notify_same_primary.py b/tasks/watch_notify_same_primary.py
new file mode 100644
index 00000000000..10251b860c7
--- /dev/null
+++ b/tasks/watch_notify_same_primary.py
@@ -0,0 +1,104 @@
+
+"""
+watch_notify_same_primary task
+"""
+from cStringIO import StringIO
+import contextlib
+import logging
+
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+    """
+    Run watch_notify_same_primary
+
+    The config should be as follows:
+
+    watch_notify_same_primary:
+        clients: [client list]
+
+    The client list should contain 1 client
+
+    The test requires 3 osds.
+
+    example:
+
+    tasks:
+    - ceph:
+    - watch_notify_same_primary:
+        clients: [client.0]
+    - interactive:
+    """
+    log.info('Beginning watch_notify_same_primary...')
+    assert isinstance(config, dict), \
+        "please list clients to run on"
+
+    clients = config.get('clients', ['client.0'])
+    assert len(clients) == 1
+    role = clients[0]
+    assert isinstance(role, basestring)
+    PREFIX = 'client.'
+    assert role.startswith(PREFIX)
+    (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+    ctx.manager.raw_cluster_cmd('osd', 'set', 'noout')
+
+    pool = ctx.manager.create_pool_with_unique_name()
+    def obj(n): return "foo-{num}".format(num=n)
+    def start_watch(n):
+        remote.run(
+            args = [
+                "rados",
+                "-p", pool,
+                "put",
+                obj(n),
+                "/etc/resolv.conf"],
+            logger=log.getChild('watch.{id}'.format(id=n)))
+        return remote.run(
+            args = [
+                "rados",
+                "-p", pool,
+                "watch",
+                obj(n)],
+            stdin=run.PIPE,
+            stdout=StringIO(),
+            stderr=StringIO(),
+            wait=False)
+    watches = [start_watch(i) for i in range(20)]
+
+    def notify(n, msg):
+        remote.run(
+            args = [
+                "rados",
+                "-p", pool,
+                "notify",
+                obj(n),
+                msg],
+            logger=log.getChild('notify.{id}'.format(id=n)))
+
+    [notify(n, 'notify1') for n in range(len(watches))]
+
+    ctx.manager.kill_osd(0)
+    ctx.manager.mark_down_osd(0)
+
+    [notify(n, 'notify2') for n in range(len(watches))]
+
+    try:
+        yield
+    finally:
+        log.info('joining watch_notify_stress')
+        for watch in watches:
+            watch.stdin.write("\n")
+
+        run.wait(watches)
+
+        for watch in watches:
+            lines = watch.stdout.getvalue().split("\n")
+            print lines
+            assert len(lines) == 4
+
+        ctx.manager.revive_osd(0)
+        ctx.manager.remove_pool(pool)