[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#768010: future mongodb unblock



On Sat, 2014-11-08 at 11:06 +0000, Jonathan Wiltshire wrote:
> Control: tag -1 moreinfo
> 
> On Tue, Nov 04, 2014 at 07:30:09AM +0100, Laszlo Boszormenyi (GCS) wrote:
> > Upstream released MongoDB 2.6.0 too late for Jessie and started to
> > work for 2.8.0. Then I was blind to see they backport important fixes
> > for the 2.4.x tree. The 2.4.11 [1] and 2.4.12 [2] changelogs are
> > available, as well the upcoming 2.4.13 [3].
[...]
> Assuming the diffs are sane, I'll accept the security fix and the SSLv3
> ciphers through sid please.
 I do attach both versions debdiff, those are named to include the
target package version. The new upstream version is safe as well, it
contains only important bugfixes proven in the current stable tree, in
2.6.x versions.

Thanks for considering,
Laszlo/GCS
diff -Nru mongodb-2.4.10/debian/changelog mongodb-2.4.10/debian/changelog
--- mongodb-2.4.10/debian/changelog	2014-11-02 00:42:59.000000000 +0000
+++ mongodb-2.4.10/debian/changelog	2014-11-10 19:22:20.000000000 +0000
@@ -1,3 +1,10 @@
+mongodb (1:2.4.10-4) unstable; urgency=high
+
+  * Backport potential information leak security fix from 2.4.11 .
+  * Backport disable SSLv3 ciphers from pre-2.4.13 to fix CVE-2014-3566 .
+
+ -- Laszlo Boszormenyi (GCS) <gcs@debian.org>  Mon, 10 Nov 2014 18:24:57 +0000
+
 mongodb (1:2.4.10-3) unstable; urgency=medium
 
   * Enable systemd unit file (closes: #767211).
diff -Nru mongodb-2.4.10/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch mongodb-2.4.10/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch
--- mongodb-2.4.10/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.10/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch	2014-11-10 18:21:42.000000000 +0000
@@ -0,0 +1,22 @@
+From 656f78711632a5dc37221422c99e3c4619bcc58f Mon Sep 17 00:00:00 2001
+From: Dan Pasette <dan@10gen.com>
+Date: Mon, 27 Oct 2014 23:49:33 -0400
+Subject: [PATCH] SERVER-15673 fix typo in backport
+
+---
+ src/mongo/util/net/ssl_manager.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
+index 0efdd7c..de49e38 100644
+--- a/src/mongo/util/net/ssl_manager.cpp
++++ b/src/mongo/util/net/ssl_manager.cpp
+@@ -142,7 +142,7 @@ namespace mongo {
+         // Activate all bug workaround options, to support buggy client SSL's.
+         // SSL_OP_NO_SSLv2 - Disable SSL v2 support
+         // SSL_OP_NO_SSLv3 - Disable SSL v3 support
+-        SSL_CTX_set_options(*context, SSL_OP_ALL|SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3);
++        SSL_CTX_set_options(_context, SSL_OP_ALL|SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3);
+ 
+         // If renegotiation is needed, don't return from recv() or send() until it's successful.
+         // Note: this is for blocking sockets only.
diff -Nru mongodb-2.4.10/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch mongodb-2.4.10/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch
--- mongodb-2.4.10/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.10/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch	2014-11-10 18:21:42.000000000 +0000
@@ -0,0 +1,25 @@
+From 8b9242837510e6410ddcf4f19969da4c7b01b2f7 Mon Sep 17 00:00:00 2001
+From: Dan Pasette <dan@10gen.com>
+Date: Mon, 27 Oct 2014 22:45:56 -0400
+Subject: [PATCH] SERVER-15673 Disable SSLv3 ciphers (CVE-2014-3566 "POODLE")
+
+(cherry picked from commit 035b5a90f56d653e930fcbe20c89f4dda7e48a30)
+---
+ src/mongo/util/net/ssl_manager.cpp | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
+index dd8b3a2..0efdd7c 100644
+--- a/src/mongo/util/net/ssl_manager.cpp
++++ b/src/mongo/util/net/ssl_manager.cpp
+@@ -140,7 +140,9 @@ namespace mongo {
+                 _context);
+    
+         // Activate all bug workaround options, to support buggy client SSL's.
+-        SSL_CTX_set_options(_context, SSL_OP_ALL);
++        // SSL_OP_NO_SSLv2 - Disable SSL v2 support
++        // SSL_OP_NO_SSLv3 - Disable SSL v3 support
++        SSL_CTX_set_options(*context, SSL_OP_ALL|SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3);
+ 
+         // If renegotiation is needed, don't return from recv() or send() until it's successful.
+         // Note: this is for blocking sockets only.
diff -Nru mongodb-2.4.10/debian/patches/9105b69e1ded5b7d0d384d574103b0ee6bbb6122.patch mongodb-2.4.10/debian/patches/9105b69e1ded5b7d0d384d574103b0ee6bbb6122.patch
--- mongodb-2.4.10/debian/patches/9105b69e1ded5b7d0d384d574103b0ee6bbb6122.patch	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.10/debian/patches/9105b69e1ded5b7d0d384d574103b0ee6bbb6122.patch	2014-11-10 18:36:24.000000000 +0000
@@ -0,0 +1,801 @@
+From 9105b69e1ded5b7d0d384d574103b0ee6bbb6122 Mon Sep 17 00:00:00 2001
+From: Mark Benvenuto <mark.benvenuto@mongodb.com>
+Date: Mon, 11 Aug 2014 18:48:46 -0400
+Subject: [PATCH] SERVER-14268: Backport from 2.6 to 2.4
+
+---
+ src/mongo/db/SConscript            |  11 +++
+ src/mongo/db/dbmessage.cpp         | 134 ++++++++++++++++++++++++++++++++
+ src/mongo/db/dbmessage.h           | 155 ++++++++++++-------------------------
+ src/mongo/db/dbmessage_test.cpp    | 143 ++++++++++++++++++++++++++++++++++
+ src/mongo/db/instance.cpp          |  65 +++++++++-------
+ src/mongo/s/cursors.cpp            |  11 +--
+ src/mongo/s/request.cpp            |   3 +-
+ src/mongo/s/strategy_shard.cpp     |  11 ++-
+ src/mongo/s/writeback_listener.cpp |   2 +-
+ src/mongo/tools/sniffer.cpp        |   6 +-
+ src/mongo/util/net/message.h       |  10 ++-
+ 11 files changed, 401 insertions(+), 150 deletions(-)
+ create mode 100644 src/mongo/db/dbmessage_test.cpp
+
+diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
+index 2d3a4f6..8ac7897 100644
+--- a/src/mongo/db/SConscript
++++ b/src/mongo/db/SConscript
+@@ -13,3 +13,14 @@ env.StaticLibrary('common', ['field_ref.cpp'],
+                            '$BUILD_DIR/mongo/foundation'])
+ 
+ env.CppUnitTest('field_ref_test', ['field_ref_test.cpp'], LIBDEPS=['common'])
++
++env.CppUnitTest(
++    target="dbmessage_test",
++    source=[
++        "dbmessage_test.cpp"
++    ],
++    LIBDEPS=[
++        "common",
++        "$BUILD_DIR/mongo/clientdriver",
++    ],
++)
+diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp
+index 3f1e866..aebe88b 100644
+--- a/src/mongo/db/dbmessage.cpp
++++ b/src/mongo/db/dbmessage.cpp
+@@ -52,6 +52,140 @@ namespace mongo {
+         return ss.str();
+     }
+ 
++    DbMessage::DbMessage(const Message& msg) : _msg(msg), _nsStart(NULL), _mark(NULL), _nsLen(0) {
++        // for received messages, Message has only one buffer
++        _theEnd = _msg.singleData()->_data + _msg.singleData()->dataLen();
++        _nextjsobj = _msg.singleData()->_data;
++
++        _reserved = readAndAdvance<int>();
++
++        // Read packet for NS
++        if (messageShouldHaveNs()) {
++
++            // Limit = buffer size of message -
++            //        (first int4 in message which is either flags or a zero constant)
++            size_t limit = _msg.singleData()->dataLen() - sizeof(int);
++
++            _nsStart = _nextjsobj;
++            _nsLen = strnlen(_nsStart, limit);
++
++            // Validate there is room for a null byte in the buffer
++            // Strings can be zero length
++            uassert(18633, "Failed to parse ns string", _nsLen <= (limit - 1));
++
++            _nextjsobj += _nsLen + 1; // skip namespace + null
++        }
++    }
++
++    const char * DbMessage::getns() const {
++        verify(messageShouldHaveNs());
++        return _nsStart;
++    }
++
++    long long DbMessage::getInt64(int offsetBytes) const {
++        verify(messageShouldHaveNs());
++        const char* p = _nsStart + _nsLen + 1;
++        checkReadOffset<long long>(p, offsetBytes);
++
++        return ((reinterpret_cast<const long long*>(p + offsetBytes)))[0];
++    }
++
++    int DbMessage::getQueryNToReturn() const {
++        verify(messageShouldHaveNs());
++        const char* p = _nsStart + _nsLen + 1;
++        checkRead<int>(p, 2);
++
++        return ((reinterpret_cast<const int*>(p)))[1];
++    }
++
++
++    int DbMessage::getFlags() const {
++        verify(messageShouldHaveNs());
++        const char* p = _nsStart + _nsLen + 1;
++        checkRead<int>(p, 1);
++
++        return ((reinterpret_cast<const int*>(p)))[0];
++    }
++
++    void DbMessage::setFlags(int value) {
++        verify(messageShouldHaveNs());
++        char* p = const_cast<char*>(_nsStart) + _nsLen + 1;
++        checkRead<int>(p, 1);
++
++        ((reinterpret_cast<int*>(p)))[0] = value;
++    }
++
++
++    int DbMessage::pullInt() {
++        return readAndAdvance<int>();
++    }
++
++    long long DbMessage::pullInt64() {
++        return readAndAdvance<long long>();
++    }
++
++    const long long* DbMessage::getArray(size_t count) const {
++        checkRead<long long>(_nextjsobj, count);
++        return reinterpret_cast<const long long*>(_nextjsobj);
++    }
++
++    BSONObj DbMessage::nextJsObj() {
++        massert(10304,
++            "Client Error: Remaining data too small for BSON object",
++            _nextjsobj != NULL && _theEnd - _nextjsobj >= 5);
++
++        if (cmdLine.objcheck) {
++            Status status = validateBSON(_nextjsobj, _theEnd - _nextjsobj);
++            massert(10307,
++                str::stream() << "Client Error: bad object in message: " << status.reason(),
++                status.isOK());
++        }
++
++        BSONObj js(_nextjsobj);
++        verify(js.objsize() >= 5);
++        verify(js.objsize() <= (_theEnd - _nextjsobj));
++
++        _nextjsobj += js.objsize();
++        if (_nextjsobj >= _theEnd)
++            _nextjsobj = NULL;
++        return js;
++    }
++
++    void DbMessage::markReset(const char * toMark) {
++        if (toMark == NULL) {
++            toMark = _mark;
++        }
++
++        verify(toMark);
++        _nextjsobj = toMark;
++    }
++
++    template<typename T>
++    void DbMessage::checkRead(const char* start, size_t count) const {
++        if ((_theEnd - start) < static_cast<int>(sizeof(T) * count)) {
++            uassert(18634, "Not enough data to read", false);
++        }
++    }
++
++    template<typename T>
++    void DbMessage::checkReadOffset(const char* start, size_t offset) const {
++        if ((_theEnd - start) < static_cast<int>(offset + sizeof(T))) {
++            uassert(18626, "Not enough data to read", false);
++        }
++    }
++
++    template<typename T>
++    T DbMessage::read() const {
++        checkRead<T>(_nextjsobj, 1);
++
++        return *(reinterpret_cast<const T*>(_nextjsobj));
++    }
++
++    template<typename T> T DbMessage::readAndAdvance() {
++        T t = read<T>();
++        _nextjsobj += sizeof(T);
++        return t;
++    }
+ 
+     void replyToQuery(int queryResultFlags,
+                       AbstractMessagingPort* p, Message& requestMsg,
+diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
+index aeb9313..50658dd 100644
+--- a/src/mongo/db/dbmessage.h
++++ b/src/mongo/db/dbmessage.h
+@@ -110,138 +110,83 @@ namespace mongo {
+        See http://dochub.mongodb.org/core/mongowireprotocol
+     */
+     class DbMessage {
++    // Assume sizeof(int) == 4 bytes
++    BOOST_STATIC_ASSERT(sizeof(int) == 4);
++
+     public:
+-        DbMessage(const Message& _m) : m(_m) , mark(0) {
+-            // for received messages, Message has only one buffer
+-            theEnd = _m.singleData()->_data + _m.header()->dataLen();
+-            char *r = _m.singleData()->_data;
+-            reserved = (int *) r;
+-            data = r + 4;
+-            nextjsobj = data;
++        // Note: DbMessage constructor reads the first 4 bytes and stores it in reserved
++        DbMessage(const Message& msg);
++
++        // Indicates whether this message is expected to have a ns
++        // or in the case of dbMsg, a string in the same place as ns
++        bool messageShouldHaveNs() const {
++            return (_msg.operation() >= dbMsg) && (_msg.operation() <= dbDelete);
+         }
+ 
+-        /** the 32 bit field before the ns 
++        /** the 32 bit field before the ns
+          * track all bit usage here as its cross op
+          * 0: InsertOption_ContinueOnError
+          * 1: fromWriteback
+          */
+-        int& reservedField() { return *reserved; }
++        int reservedField() const { return _reserved; }
++        void setReservedField(int value) {  _reserved = value; }
+ 
+-        const char * getns() const {
+-            return data;
+-        }
+-        void getns(Namespace& ns) const {
+-            ns = data;
+-        }
++        const char * getns() const;
++        int getQueryNToReturn() const;
+ 
+-        const char * afterNS() const {
+-            return data + strlen( data ) + 1;
+-        }
++        int getFlags() const;
++        void setFlags(int value);
+ 
+-        int getInt( int num ) const {
+-            const int * foo = (const int*)afterNS();
+-            return foo[num];
+-        }
++        long long getInt64(int offsetBytes) const;
+ 
+-        int getQueryNToReturn() const {
+-            return getInt( 1 );
+-        }
++        int pullInt();
++        long long pullInt64();
++        const long long* getArray(size_t count) const;
+ 
+-        /**
+-         * get an int64 at specified offsetBytes after ns
+-         */
+-        long long getInt64( int offsetBytes ) const {
+-            const char * x = afterNS();
+-            x += offsetBytes;
+-            const long long * ll = (const long long*)x;
+-            return ll[0];
++        /* for insert and update msgs */
++        bool moreJSObjs() const {
++            return _nextjsobj != 0;
+         }
+ 
+-        void resetPull() { nextjsobj = data; }
+-        int pullInt() const { return pullInt(); }
+-        int& pullInt() {
+-            if ( nextjsobj == data )
+-                nextjsobj += strlen(data) + 1; // skip namespace
+-            int& i = *((int *)nextjsobj);
+-            nextjsobj += 4;
+-            return i;
+-        }
+-        long long pullInt64() const {
+-            return pullInt64();
+-        }
+-        long long &pullInt64() {
+-            if ( nextjsobj == data )
+-                nextjsobj += strlen(data) + 1; // skip namespace
+-            long long &i = *((long long *)nextjsobj);
+-            nextjsobj += 8;
+-            return i;
+-        }
++        BSONObj nextJsObj();
+ 
+-        OID* getOID() const {
+-            return (OID *) (data + strlen(data) + 1); // skip namespace
+-        }
++        const Message& msg() const { return _msg; }
+ 
+-        void getQueryStuff(const char *&query, int& ntoreturn) {
+-            int *i = (int *) (data + strlen(data) + 1);
+-            ntoreturn = *i;
+-            i++;
+-            query = (const char *) i;
++        const char * markGet() const {
++            return _nextjsobj;
+         }
+ 
+-        /* for insert and update msgs */
+-        bool moreJSObjs() const {
+-            return nextjsobj != 0;
++        void markSet() {
++            _mark = _nextjsobj;
+         }
+-        BSONObj nextJsObj() {
+-            if ( nextjsobj == data ) {
+-                nextjsobj += strlen(data) + 1; // skip namespace
+-                massert( 13066 ,  "Message contains no documents", theEnd > nextjsobj );
+-            }
+-            massert( 10304,
+-                     "Client Error: Remaining data too small for BSON object",
+-                     theEnd - nextjsobj >= 5 );
+-
+-            if ( cmdLine.objcheck ) {
+-                Status status = validateBSON( nextjsobj, theEnd - nextjsobj );
+-                massert( 10307,
+-                         str::stream() << "Client Error: bad object in message: " << status.reason(),
+-                         status.isOK() );
+-            }
+ 
+-            BSONObj js(nextjsobj);
+-            verify( js.objsize() >= 5 );
+-            verify( js.objsize() < ( theEnd - data ) );
++        void markReset(const char * toMark = NULL);
+ 
+-            nextjsobj += js.objsize();
+-            if ( nextjsobj >= theEnd )
+-                nextjsobj = 0;
+-            return js;
+-        }
++    private:
++        // Check if we have enough data to read
++        template<typename T>
++        void checkRead(const char* start, size_t count = 0) const;
+ 
+-        const Message& msg() const { return m; }
++        template<typename T>
++        void checkReadOffset(const char* start, size_t offset) const;
+ 
+-        const char * markGet() {
+-            return nextjsobj;
+-        }
++        // Read some type without advancing our position
++        template<typename T>
++        T read() const;
+ 
+-        void markSet() {
+-            mark = nextjsobj;
+-        }
++        // Read some type, and advance our position
++        template<typename T> T readAndAdvance();
+ 
+-        void markReset( const char * toMark = 0) {
+-            if( toMark == 0 ) toMark = mark;
+-            verify( toMark );
+-            nextjsobj = toMark;
+-        }
++        const Message& _msg;
++        int _reserved; // flags or zero depending on packet, starts the packet
+ 
+-    private:
+-        const Message& m;
+-        int* reserved;
+-        const char *data;
+-        const char *nextjsobj;
+-        const char *theEnd;
++        const char* _nsStart; // start of namespace string, +4 from message start
++        const char* _nextjsobj; // current position reading packet
++        const char* _theEnd; // end of packet
++
++        const char* _mark;
+ 
+-        const char * mark;
++        unsigned int _nsLen;
+     };
+ 
+ 
+diff --git a/src/mongo/db/dbmessage_test.cpp b/src/mongo/db/dbmessage_test.cpp
+new file mode 100644
+index 0000000..867a52d
+--- /dev/null
++++ b/src/mongo/db/dbmessage_test.cpp
+@@ -0,0 +1,143 @@
++/**
++ * Copyright (C) 2014 MongoDB Inc.
++ *
++ * This program is free software: you can redistribute it and/or  modify
++ * it under the terms of the GNU Affero General Public License, version 3,
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU Affero General Public License for more details.
++ *
++ * You should have received a copy of the GNU Affero General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ *
++ * As a special exception, the copyright holders give permission to link the
++ * code of portions of this program with the OpenSSL library under certain
++ * conditions as described in each individual source file and distribute
++ * linked combinations including the program with the OpenSSL library. You
++ * must comply with the GNU Affero General Public License in all respects
++ * for all of the code used other than as permitted herein. If you modify
++ * file(s) with this exception, you may extend this exception to your
++ * version of the file(s), but you are not obligated to do so. If you do not
++ * wish to do so, delete this exception statement from your version. If you
++ * delete this exception statement from all source files in the program,
++ * then also delete it in the license file.
++ */
++
++#include <string>
++
++#include "mongo/bson/util/builder.h"
++#include "mongo/db/dbmessage.h"
++#include "mongo/unittest/unittest.h"
++
++namespace mongo {
++    using std::string;
++
++    // Test if the reserved field is short of 4 bytes
++    TEST(DBMessage1, ShortFlags) {
++        BufBuilder b;
++        string ns("test");
++
++        b.appendChar( 1 );
++
++        Message toSend;
++        toSend.setData( dbDelete , b.buf() , b.len() );
++
++        ASSERT_THROWS(DbMessage d1(toSend), UserException);
++    }
++
++    // Test a short NS missing a trailing null
++    TEST(DBMessage1, BadNS) {
++        BufBuilder b;
++
++        b.appendNum( static_cast<int>(1) );
++        b.appendChar( 'b' );
++        b.appendChar( 'a' );
++        b.appendChar( 'd' );
++        // Forget to append \0
++
++        Message toSend;
++        toSend.setData( dbDelete , b.buf() , b.len() );
++
++        ASSERT_THROWS(DbMessage d1(toSend), UserException);
++    }
++
++    // Test a valid kill message and try an extra pull
++    TEST(DBMessage1, GoodKill) {
++        BufBuilder b;
++
++        b.appendNum( static_cast<int>(1) );
++        b.appendNum( static_cast<int>(3) );
++
++        Message toSend;
++        toSend.setData( dbKillCursors , b.buf() , b.len() );
++
++        DbMessage d1(toSend);
++        ASSERT_EQUALS(3, d1.pullInt());
++
++        ASSERT_THROWS(d1.pullInt(), UserException);
++    }
++
++    // Try a bad read of a type too large
++    TEST(DBMessage1, GoodKill2) {
++        BufBuilder b;
++
++        b.appendNum( static_cast<int>(1) );
++        b.appendNum( static_cast<int>(3) );
++
++        Message toSend;
++        toSend.setData( dbKillCursors , b.buf() , b.len() );
++
++        DbMessage d1(toSend);
++        ASSERT_THROWS(d1.pullInt64(), UserException);
++    }
++
++    // Test a basic good insert, and an extra read
++    TEST(DBMessage1, GoodInsert) {
++        BufBuilder b;
++        string ns("test");
++
++        b.appendNum( static_cast<int>(1) );
++        b.appendStr(ns);
++        b.appendNum( static_cast<int>(3) );
++        b.appendNum( static_cast<int>(39) );
++
++        Message toSend;
++        toSend.setData( dbInsert , b.buf() , b.len() );
++
++        DbMessage d1(toSend);
++        ASSERT_EQUALS(3, d1.pullInt());
++        ASSERT_EQUALS(39, d1.pullInt());
++        ASSERT_THROWS(d1.pullInt(), UserException);
++    }
++
++    // Test a basic good insert, and an extra read
++    TEST(DBMessage1, GoodInsert2) {
++        BufBuilder b;
++        string ns("test");
++
++        b.appendNum( static_cast<int>(1) );
++        b.appendStr(ns);
++        b.appendNum( static_cast<int>(3) );
++        b.appendNum( static_cast<int>(39) );
++
++        BSONObj bo = BSON( "ts" << 0 );
++        bo.appendSelfToBufBuilder( b );
++
++        Message toSend;
++        toSend.setData( dbInsert , b.buf() , b.len() );
++
++        DbMessage d1(toSend);
++        ASSERT_EQUALS(3, d1.pullInt());
++
++
++        ASSERT_EQUALS(39, d1.pullInt());
++        BSONObj bo2 = d1.nextJsObj();
++        ASSERT_THROWS(d1.nextJsObj(), MsgAssertionException);
++    }
++
++
++
++} // mongo namespace
+diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
+index cf3d084..c4fb9f7 100644
+--- a/src/mongo/db/instance.cpp
++++ b/src/mongo/db/instance.cpp
+@@ -339,10 +339,13 @@ namespace mongo {
+         // before we lock...
+         int op = m.operation();
+         bool isCommand = false;
+-        const char *ns = m.singleData()->_data + 4;
++
++        DbMessage dbmsg(m);
+ 
+         if ( op == dbQuery ) {
+-            if( strstr(ns, ".$cmd") ) {
++            const char *ns = dbmsg.getns();
++
++            if (strstr(ns, ".$cmd")) {
+                 isCommand = true;
+                 opwrite(m);
+                 if( strstr(ns, ".$cmd.sys.") ) {
+@@ -406,7 +409,8 @@ namespace mongo {
+         }
+         else if ( op == dbMsg ) {
+             // deprecated - replaced by commands
+-            char *p = m.singleData()->_data;
++            const char *p = dbmsg.getns();
++
+             int len = strlen(p);
+             if ( len > 400 )
+                 out() << curTimeMillis64() % 10000 <<
+@@ -423,8 +427,6 @@ namespace mongo {
+         }
+         else {
+             try {
+-                const NamespaceString nsString( ns );
+-
+                 // The following operations all require authorization.
+                 // dbInsert, dbUpdate and dbDelete can be easily pre-authorized,
+                 // here, but dbKillCursors cannot.
+@@ -433,28 +435,36 @@ namespace mongo {
+                     logThreshold = 10;
+                     receivedKillCursors(m);
+                 }
+-                else if ( !nsString.isValid() ) {
+-                    // Only killCursors doesn't care about namespaces
+-                    uassert( 16257, str::stream() << "Invalid ns [" << ns << "]", false );
+-                }
+-                else if ( op == dbInsert ) {
+-                    receivedInsert(m, currentOp);
+-                }
+-                else if ( op == dbUpdate ) {
+-                    receivedUpdate(m, currentOp);
+-                }
+-                else if ( op == dbDelete ) {
+-                    receivedDelete(m, currentOp);
+-                }
+-                else {
++                else if (op != dbInsert && op != dbUpdate && op != dbDelete) {
+                     mongo::log() << "    operation isn't supported: " << op << endl;
+                     currentOp.done();
+                     shouldLog = true;
+                 }
+-            }
+-            catch ( UserException& ue ) {
+-                tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing "
+-                        << ue.toString() << endl;
++                else {
++                    const char* ns = dbmsg.getns();
++                    const NamespaceString nsString(ns);
++
++                    if (!nsString.isValid()) {
++                        uassert(16257, str::stream() << "Invalid ns [" << ns << "]", false);
++                    }
++                    else if (op == dbInsert) {
++                        receivedInsert(m, currentOp);
++                    }
++                    else if (op == dbUpdate) {
++                        receivedUpdate(m, currentOp);
++                    }
++                    else if (op == dbDelete) {
++                        receivedDelete(m, currentOp);
++                    }
++                    else {
++                        fassertFailed(18625);
++                    }
++                }
++             }
++            catch (const UserException& ue) {
++                setLastError(ue.getCode(), ue.getInfo().msg.c_str());
++                LOG(3) << " Caught Assertion in " << opToString(op) << ", continuing "
++                       << ue.toString() << endl;
+                 debug.exceptionInfo = ue.getInfo();
+             }
+             catch ( AssertionException& e ) {
+@@ -492,9 +502,8 @@ namespace mongo {
+     } /* assembleResponse() */
+ 
+     void receivedKillCursors(Message& m) {
+-        int *x = (int *) m.singleData()->_data;
+-        x++; // reserved
+-        int n = *x++;
++        DbMessage dbmessage(m);
++        int n = dbmessage.pullInt();
+ 
+         uassert( 13659 , "sent 0 cursors to kill" , n != 0 );
+         massert( 13658 , str::stream() << "bad kill cursors size: " << m.dataSize() , m.dataSize() == 8 + ( 8 * n ) );
+@@ -505,7 +514,9 @@ namespace mongo {
+             verify( n < 30000 );
+         }
+ 
+-        int found = ClientCursor::eraseIfAuthorized(n, (long long *) x);
++        const long long* cursorArray = dbmessage.getArray(n);
++
++        int found = ClientCursor::eraseIfAuthorized(n, (long long *)cursorArray);
+ 
+         if ( logLevel > 0 || found != n ) {
+             LOG( found == n ? 1 : 0 ) << "killcursors: found " << found << " of " << n << endl;
+diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp
+index 4e904e3..4cec5a6 100644
+--- a/src/mongo/s/cursors.cpp
++++ b/src/mongo/s/cursors.cpp
+@@ -270,19 +270,20 @@ namespace mongo {
+     }
+ 
+     void CursorCache::gotKillCursors(Message& m ) {
+-        int *x = (int *) m.singleData()->_data;
+-        x++; // reserved
+-        int n = *x++;
++        DbMessage dbmessage(m);
++        int n = dbmessage.pullInt();
+ 
+         if ( n > 2000 ) {
+             LOG( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+         }
+ 
+-
+         uassert( 13286 , "sent 0 cursors to kill" , n >= 1 );
+         uassert( 13287 , "too many cursors to kill" , n < 30000 );
++        massert( 18632 , str::stream() << "bad kill cursors size: " << m.dataSize(), 
++                    m.dataSize() == 8 + ( 8 * n ) );
++
+ 
+-        long long * cursors = (long long *)x;
++        const long long* cursors = dbmessage.getArray(n);
+         AuthorizationManager* authManager =
+                 ClientBasic::getCurrent()->getAuthorizationManager();
+         for ( int i=0; i<n; i++ ) {
+diff --git a/src/mongo/s/request.cpp b/src/mongo/s/request.cpp
+index ed9527d..033ed14 100644
+--- a/src/mongo/s/request.cpp
++++ b/src/mongo/s/request.cpp
+@@ -37,7 +37,6 @@ namespace mongo {
+     Request::Request( Message& m, AbstractMessagingPort* p ) :
+         _m(m) , _d( m ) , _p(p) , _didInit(false) {
+ 
+-        verify( _d.getns() );
+         _id = _m.header()->id;
+ 
+         _clientInfo = ClientInfo::get();
+@@ -58,7 +57,7 @@ namespace mongo {
+ 
+     // Deprecated, will move to the strategy itself
+     void Request::reset() {
+-        if ( _m.operation() == dbKillCursors ) {
++        if ( !_d.messageShouldHaveNs()) {
+             return;
+         }
+ 
+diff --git a/src/mongo/s/strategy_shard.cpp b/src/mongo/s/strategy_shard.cpp
+index 560a4ac..c3d9ede 100644
+--- a/src/mongo/s/strategy_shard.cpp
++++ b/src/mongo/s/strategy_shard.cpp
+@@ -1043,8 +1043,9 @@ namespace mongo {
+                 // TODO: make this safer w/ shard add/remove
+                 //
+ 
+-                int* opts = (int*)( r.d().afterNS() );
+-                opts[0] |= UpdateOption_Broadcast; // this means don't check shard version in mongod
++                int opts = r.d().getFlags();
++                opts |= UpdateOption_Broadcast; // this means don't check shard version in mongod
++                r.d().setFlags(opts);
+                 broadcastWrite( dbUpdate, r );
+                 return;
+             }
+@@ -1192,8 +1193,10 @@ namespace mongo {
+ 
+             if( ! shard ){
+ 
+-                int * x = (int*)(r.d().afterNS());
+-                x[0] |= RemoveOption_Broadcast; // this means don't check shard version in mongod
++                int opts = r.d().getFlags();
++                opts |= RemoveOption_Broadcast; // this means don't check shard version in mongod
++                r.d().setFlags(opts);
++
+                 broadcastWrite(dbDelete, r);
+                 return;
+             }
+diff --git a/src/mongo/s/writeback_listener.cpp b/src/mongo/s/writeback_listener.cpp
+index 0695c4c..8eddf78 100644
+--- a/src/mongo/s/writeback_listener.cpp
++++ b/src/mongo/s/writeback_listener.cpp
+@@ -294,7 +294,7 @@ namespace mongo {
+                             Request r( msg , 0 );
+                             r.init();
+ 
+-                            r.d().reservedField() |= Reserved_FromWriteback;
++                            r.d().setReservedField(r.d().reservedField() | Reserved_FromWriteback);
+ 
+                             ClientInfo * ci = r.getClientInfo();
+                             if (!noauth) {
+diff --git a/src/mongo/tools/sniffer.cpp b/src/mongo/tools/sniffer.cpp
+index 5e2ac66..b6c97d9 100644
+--- a/src/mongo/tools/sniffer.cpp
++++ b/src/mongo/tools/sniffer.cpp
+@@ -329,9 +329,7 @@ void processMessage( Connection& c , Message& m ) {
+             break;
+         }
+         case mongo::dbKillCursors: {
+-            int *x = (int *) m.singleData()->_data;
+-            x++; // reserved
+-            int n = *x;
++            int n = d.pullInt();
+             out() << "\tkillCursors n: " << n << endl;
+             break;
+         }
+@@ -357,7 +355,7 @@ void processMessage( Connection& c , Message& m ) {
+                 if ( m.operation() == mongo::dbGetMore ) {
+                     DbMessage d( m );
+                     d.pullInt();
+-                    long long &cId = d.pullInt64();
++                    long long cId = d.pullInt64();
+                     cId = mapCursor[ c ][ cId ];
+                 }
+                 Message response;
+diff --git a/src/mongo/util/net/message.h b/src/mongo/util/net/message.h
+index ade0123..dee49f5 100644
+--- a/src/mongo/util/net/message.h
++++ b/src/mongo/util/net/message.h
+@@ -103,13 +103,18 @@ namespace mongo {
+ 
+ #pragma pack(1)
+     /* todo merge this with MSGHEADER (or inherit from it). */
+-    struct MsgData {
++    class MsgData {
++        friend class Message;
++        friend class DbMessage;
++        friend class MessagingPort;
++    public:
+         int len; /* len of the msg, including this field */
+         MSGID id; /* request/reply id's match... */
+         MSGID responseTo; /* id of the message we are responding to */
+         short _operation;
+         char _flags;
+         char _version;
++
+         int operation() const {
+             return _operation;
+         }
+@@ -118,7 +123,6 @@ namespace mongo {
+             _version = 0;
+             _operation = o;
+         }
+-        char _data[4];
+ 
+         int& dataAsInt() {
+             return *((int *) _data);
+@@ -140,6 +144,8 @@ namespace mongo {
+         }
+ 
+         int dataLen(); // len without header
++    private:
++        char _data[4]; //must be last member
+     };
+     const int MsgDataHeaderSize = sizeof(MsgData) - 4;
+     inline int MsgData::dataLen() {
diff -Nru mongodb-2.4.10/debian/patches/cefb0ef38f050b73b2bf8211add55f3749753e0a.patch mongodb-2.4.10/debian/patches/cefb0ef38f050b73b2bf8211add55f3749753e0a.patch
--- mongodb-2.4.10/debian/patches/cefb0ef38f050b73b2bf8211add55f3749753e0a.patch	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.10/debian/patches/cefb0ef38f050b73b2bf8211add55f3749753e0a.patch	2014-11-10 18:36:24.000000000 +0000
@@ -0,0 +1,28 @@
+From cefb0ef38f050b73b2bf8211add55f3749753e0a Mon Sep 17 00:00:00 2001
+From: Mark Benvenuto <mark.benvenuto@mongodb.com>
+Date: Thu, 14 Aug 2014 17:33:12 -0400
+Subject: [PATCH] SERVER-14268: Backport from 2.6 to 2.4 - disable test
+
+---
+ src/mongo/db/SConscript | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
+index 8ac7897..2d3a4f6 100644
+--- a/src/mongo/db/SConscript
++++ b/src/mongo/db/SConscript
+@@ -13,14 +13,3 @@ env.StaticLibrary('common', ['field_ref.cpp'],
+                            '$BUILD_DIR/mongo/foundation'])
+ 
+ env.CppUnitTest('field_ref_test', ['field_ref_test.cpp'], LIBDEPS=['common'])
+-
+-env.CppUnitTest(
+-    target="dbmessage_test",
+-    source=[
+-        "dbmessage_test.cpp"
+-    ],
+-    LIBDEPS=[
+-        "common",
+-        "$BUILD_DIR/mongo/clientdriver",
+-    ],
+-)
diff -Nru mongodb-2.4.10/debian/patches/series mongodb-2.4.10/debian/patches/series
--- mongodb-2.4.10/debian/patches/series	2014-06-21 18:44:14.000000000 +0000
+++ mongodb-2.4.10/debian/patches/series	2014-11-10 19:19:57.000000000 +0000
@@ -13,3 +13,7 @@
 0004-Support-ppc64el-builds.patch
 0012-support-gnu.patch
 no-unused-function.patch
+9105b69e1ded5b7d0d384d574103b0ee6bbb6122.patch
+cefb0ef38f050b73b2bf8211add55f3749753e0a.patch
+8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch
+656f78711632a5dc37221422c99e3c4619bcc58f.patch
diff -Nru mongodb-2.4.10/debian/changelog mongodb-2.4.12/debian/changelog
--- mongodb-2.4.10/debian/changelog	2014-11-02 00:42:59.000000000 +0000
+++ mongodb-2.4.12/debian/changelog	2014-11-04 16:38:22.000000000 +0000
@@ -1,3 +1,10 @@
+mongodb (1:2.4.12-1) unstable; urgency=high
+
+  * New upstream release.
+  * Backport disable SSLv3 ciphers from pre 2.4.13 to fix CVE-2014-3566.
+
+ -- Laszlo Boszormenyi (GCS) <gcs@debian.org>  Tue, 04 Nov 2014 05:44:11 +0000
+
 mongodb (1:2.4.10-3) unstable; urgency=medium
 
   * Enable systemd unit file (closes: #767211).
diff -Nru mongodb-2.4.10/debian/patches/0009-ignore-unused-local-typedefs.patch mongodb-2.4.12/debian/patches/0009-ignore-unused-local-typedefs.patch
--- mongodb-2.4.10/debian/patches/0009-ignore-unused-local-typedefs.patch	2013-10-18 08:35:02.000000000 +0000
+++ mongodb-2.4.12/debian/patches/0009-ignore-unused-local-typedefs.patch	2014-11-04 05:52:32.000000000 +0000
@@ -7,8 +7,8 @@
 
 --- a/SConstruct
 +++ b/SConstruct
-@@ -715,6 +715,7 @@ if nix:
-         env.Append( CCFLAGS=["-Werror", "-pipe"] )
+@@ -722,6 +722,7 @@ if nix:
+             env.Append( CCFLAGS=["-Werror"] )
          if not has_option('clang'):
              env.Append( CCFLAGS=["-fno-builtin-memcmp"] ) # glibc's memcmp is faster than gcc's
 +            env.Append( CXXFLAGS=["-Wno-unused-local-typedefs"] ) # New in gcc 4.8
diff -Nru mongodb-2.4.10/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch mongodb-2.4.12/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch
--- mongodb-2.4.10/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.12/debian/patches/656f78711632a5dc37221422c99e3c4619bcc58f.patch	2014-11-04 17:00:29.000000000 +0000
@@ -0,0 +1,22 @@
+From 656f78711632a5dc37221422c99e3c4619bcc58f Mon Sep 17 00:00:00 2001
+From: Dan Pasette <dan@10gen.com>
+Date: Mon, 27 Oct 2014 23:49:33 -0400
+Subject: [PATCH] SERVER-15673 fix typo in backport
+
+---
+ src/mongo/util/net/ssl_manager.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
+index 0efdd7c..de49e38 100644
+--- a/src/mongo/util/net/ssl_manager.cpp
++++ b/src/mongo/util/net/ssl_manager.cpp
+@@ -142,7 +142,7 @@ namespace mongo {
+         // Activate all bug workaround options, to support buggy client SSL's.
+         // SSL_OP_NO_SSLv2 - Disable SSL v2 support
+         // SSL_OP_NO_SSLv3 - Disable SSL v3 support
+-        SSL_CTX_set_options(*context, SSL_OP_ALL|SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3);
++        SSL_CTX_set_options(_context, SSL_OP_ALL|SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3);
+ 
+         // If renegotiation is needed, don't return from recv() or send() until it's successful.
+         // Note: this is for blocking sockets only.
diff -Nru mongodb-2.4.10/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch mongodb-2.4.12/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch
--- mongodb-2.4.10/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.12/debian/patches/8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch	2014-11-04 16:31:53.000000000 +0000
@@ -0,0 +1,25 @@
+From 8b9242837510e6410ddcf4f19969da4c7b01b2f7 Mon Sep 17 00:00:00 2001
+From: Dan Pasette <dan@10gen.com>
+Date: Mon, 27 Oct 2014 22:45:56 -0400
+Subject: [PATCH] SERVER-15673 Disable SSLv3 ciphers (CVE-2014-3566 "POODLE")
+
+(cherry picked from commit 035b5a90f56d653e930fcbe20c89f4dda7e48a30)
+---
+ src/mongo/util/net/ssl_manager.cpp | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
+index dd8b3a2..0efdd7c 100644
+--- a/src/mongo/util/net/ssl_manager.cpp
++++ b/src/mongo/util/net/ssl_manager.cpp
+@@ -140,7 +140,9 @@ namespace mongo {
+                 _context);
+    
+         // Activate all bug workaround options, to support buggy client SSL's.
+-        SSL_CTX_set_options(_context, SSL_OP_ALL);
++        // SSL_OP_NO_SSLv2 - Disable SSL v2 support
++        // SSL_OP_NO_SSLv3 - Disable SSL v3 support
++        SSL_CTX_set_options(*context, SSL_OP_ALL|SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3);
+ 
+         // If renegotiation is needed, don't return from recv() or send() until it's successful.
+         // Note: this is for blocking sockets only.
diff -Nru mongodb-2.4.10/debian/patches/series mongodb-2.4.12/debian/patches/series
--- mongodb-2.4.10/debian/patches/series	2014-06-21 18:44:14.000000000 +0000
+++ mongodb-2.4.12/debian/patches/series	2014-11-04 17:00:46.000000000 +0000
@@ -13,3 +13,5 @@
 0004-Support-ppc64el-builds.patch
 0012-support-gnu.patch
 no-unused-function.patch
+8b9242837510e6410ddcf4f19969da4c7b01b2f7.patch
+656f78711632a5dc37221422c99e3c4619bcc58f.patch
diff -Nru mongodb-2.4.10/doxygenConfig mongodb-2.4.12/doxygenConfig
--- mongodb-2.4.10/doxygenConfig	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/doxygenConfig	2014-10-14 20:40:29.000000000 +0000
@@ -3,7 +3,7 @@
 #---------------------------------------------------------------------------
 DOXYFILE_ENCODING      = UTF-8
 PROJECT_NAME           = MongoDB
-PROJECT_NUMBER         = 2.4.10
+PROJECT_NUMBER         = 2.4.12
 OUTPUT_DIRECTORY       = docs/doxygen
 CREATE_SUBDIRS         = NO
 OUTPUT_LANGUAGE        = English
diff -Nru mongodb-2.4.10/jstests/dur/checksum.js mongodb-2.4.12/jstests/dur/checksum.js
--- mongodb-2.4.10/jstests/dur/checksum.js	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.12/jstests/dur/checksum.js	2014-10-14 20:40:29.000000000 +0000
@@ -0,0 +1,89 @@
+// Test checksum validation of journal files.
+
+var testname = "dur_checksum";
+var path = MongoRunner.dataPath + testname;
+
+if (0) {
+    // This is used to create the prototype journal file.
+    jsTest.log("Just creating prototype journal, not testing anything");
+    var conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur");
+    var db = conn.getDB("test");
+
+    // each insert is in it's own commit.
+    db.foo.insert({a: 1});
+    db.runCommand({getlasterror:1, j:1})
+
+    db.foo.insert({a: 2});
+    db.runCommand({getlasterror:1, j:1})
+
+    stopMongod(30001, /*signal*/9);
+
+    jsTest.log("Journal file left at " + path + "/journal/j._0");
+    quit();
+    // A hex editor must be used to replace the checksums of specific journal sections with
+    // "0BADC0DE 1BADC0DE 2BADC0DE 3BADCD0E"
+}
+
+function startMongodWithJournal() {
+    return startMongodNoReset("--port", 30001,
+                              "--dbpath", path,
+                              "--dur",
+                              "--smallfiles",
+                              "--durOptions", 1 /*DurDumpJournal*/);
+}
+
+
+jsTest.log("Starting with good.journal to make sure everything works");
+resetDbpath(path);
+mkdir(path + '/journal');
+copyFile("jstests/libs/dur_checksum_good.journal", path + "/journal/j._0");
+var conn = startMongodWithJournal();
+var db = conn.getDB('test');
+assert.eq(db.foo.count(), 2);
+stopMongod(30001);
+
+
+// dur_checksum_bad_last.journal is good.journal with the bad checksum on the last section.
+jsTest.log("Starting with bad_last.journal");
+resetDbpath(path);
+mkdir(path + '/journal');
+copyFile("jstests/libs/dur_checksum_bad_last.journal", path + "/journal/j._0");
+conn = startMongodWithJournal();
+var db = conn.getDB('test');
+assert.eq(db.foo.count(), 1); // 2nd insert "never happened"
+stopMongod(30001);
+
+
+// dur_checksum_bad_first.journal is good.journal with the bad checksum on the prior section.
+// This means there is a good commit after the bad one. We currently ignore this, but a future
+// version of the server may be able to detect this case.
+jsTest.log("Starting with bad_first.journal");
+resetDbpath(path);
+mkdir(path + '/journal');
+copyFile("jstests/libs/dur_checksum_bad_first.journal", path + "/journal/j._0");
+conn = startMongodWithJournal();
+var db = conn.getDB('test');
+assert.eq(db.foo.count(), 0); // Neither insert happened.
+stopMongod(30001);
+
+// If we detect an error in a non-final journal file, that is considered an error.
+jsTest.log("Starting with bad_last.journal followed by good.journal");
+resetDbpath(path);
+mkdir(path + '/journal');
+copyFile("jstests/libs/dur_checksum_bad_first.journal", path + "/journal/j._0");
+copyFile("jstests/libs/dur_checksum_good.journal", path + "/journal/j._1");
+
+exitCode = runMongoProgram("mongod",
+                           "--port", 30001,
+                           "--dbpath", path,
+                           "--dur",
+                           "--smallfiles",
+                           "--durOptions", 1 /*DurDumpJournal*/
+                                         + 2 /*DurScanOnly*/);
+
+assert.eq(exitCode, 100 /*EXIT_UNCAUGHT*/);
+
+// TODO Possibly we could check the mongod log to verify that the correct type of exception was
+// thrown.  But that would introduce a dependency on the mongod log format, which we may not want.
+
+jsTest.log("SUCCESS checksum.js");
diff -Nru mongodb-2.4.10/jstests/dur/diskfull.js mongodb-2.4.12/jstests/dur/diskfull.js
--- mongodb-2.4.10/jstests/dur/diskfull.js	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/jstests/dur/diskfull.js	2014-10-14 20:40:29.000000000 +0000
@@ -1,4 +1,13 @@
-/** Test running out of disk space with durability enabled */
+/** Test running out of disk space with durability enabled.
+To set up the test, it's required to set up a small partition something like the following:
+sudo umount /data/db/diskfulltest/
+rm -rf /data/db/diskfulltest
+mkdir -p /data/images
+dd bs=512 count=83968 if=/dev/zero of=/data/images/diskfulltest.img
+/sbin/mkfs.ext2 -m 0 -F /data/images/diskfulltest.img
+mkdir -p /data/db/diskfulltest
+mount -o loop /data/images/diskfulltest.img /data/db/diskfulltest
+*/
 
 startPath = "/data/db/diskfulltest";
 recoverPath = "/data/db/dur_diskfull";
@@ -52,7 +61,10 @@
         var d = conn.getDB("test");
         
         big = new Array( 5000 ).toString();
-        for( i = 0; i < 10000; ++i ) {
+        // This part of the test depends on the partition size used in the build env
+        // Currently, unused, but with larger partitions insert enough documents here
+        // to create a second db file
+        for( i = 0; i < 1; ++i ) {
             d.foo.insert( { _id:i, b:big } );
         }
         
diff -Nru mongodb-2.4.10/jstests/dur/md5.js mongodb-2.4.12/jstests/dur/md5.js
--- mongodb-2.4.10/jstests/dur/md5.js	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/jstests/dur/md5.js	1970-01-01 00:00:00.000000000 +0000
@@ -1,101 +0,0 @@
-/**
- * Test md5 validation of journal file.
- * This test is dependent on the journal file format and may require an update if the format changes,
- * see comments near fuzzFile() below.
- */
-
-var debugging = false;
-var testname = "dur_md5";
-var step = 1;
-var conn = null;
-
-function log(str) {
-    print();
-    if(str)
-        print(testname+" step " + step++ + " " + str);
-    else
-        print(testname+" step " + step++);
-}
-
-/** Changes here may require updating the byte index of the md5 hash, see File comments below. */
-function work() {
-    log("work");
-    var d = conn.getDB("test");
-    d.foo.insert({ _id: 3, x: 22 });
-    d.foo.insert({ _id: 4, x: 22 });
-    d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
-    d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
-    d.a.update({ _id: 4 }, { $inc: { x: 1} });
-    
-    // try building an index.  however, be careful as object id's in system.indexes would vary, so we do it manually:
-    d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
-    
-    //    d.a.update({ _id: 4 }, { $inc: { x: 1} });
-    //    d.a.reIndex();
-    
-    // assure writes applied in case we kill -9 on return from this function
-    d.getLastError(); 
-    
-    log("endwork");
-}
-
-if( debugging ) { 
-    // mongod already running in debugger
-    conn = db.getMongo();
-    work();
-    sleep(30000);
-    quit();
-}
-
-log();
-
-var path = "/data/db/" + testname+"dur";
-
-log();
-conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", 8);
-work();
-
-// wait for group commit.
-printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
-
-log("kill -9");
-
-// kill the process hard
-stopMongod(30001, /*signal*/9);
-
-// journal file should be present, and non-empty as we killed hard
-
-// Bit flip the first byte of the md5sum contained within the opcode footer.
-// This ensures we get an md5 exception instead of some other type of exception.
-var file = path + "/journal/j._0";
-
-// if test fails, uncomment these "cp" lines to debug:
-// run("cp", file, "/tmp/before");
-
-// journal header is 8192
-// jsectheader is 20
-// so a little beyond that
-fuzzFile(file, 8214+8);
-
-// run("cp", file, "/tmp/after");
-
-log("run mongod again recovery should fail");
-
-// 100 exit code corresponds to EXIT_UNCAUGHT, which is triggered when there is an exception during recovery.
-// 14 is is sometimes triggered instead due to SERVER-2184
-exitCode = runMongoProgram( "mongod", "--port", 30002, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", /*9*/13 );
-
-if (exitCode != 100 && exitCode != 14) {
-    print("\n\n\nFAIL md5.js expected mongod to fail but didn't? mongod exitCode: " + exitCode + "\n\n\n");
-    // sleep a little longer to get more output maybe
-    sleep(2000);
-    assert(false);
-}
-
-// TODO Possibly we could check the mongod log to verify that the correct type of exception was thrown.  But
-// that would introduce a dependency on the mongod log format, which we may not want.
-
-print("SUCCESS md5.js");
-
-// if we sleep a littler here we may get more out the mongod output logged
-sleep(500);
diff -Nru mongodb-2.4.10/jstests/fts_index3.js mongodb-2.4.12/jstests/fts_index3.js
--- mongodb-2.4.10/jstests/fts_index3.js	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.12/jstests/fts_index3.js	2014-10-14 20:40:29.000000000 +0000
@@ -0,0 +1,135 @@
+// Test that updates to fields in a text-indexed document are correctly reflected in the text index.
+load("jstests/libs/fts.js");
+var coll = db.fts_index3;
+var res;
+
+// 1) Create a text index on a single field, insert a document, update the value of the field, and
+// verify that searching with the new value returns the document.
+coll.drop();
+res = coll.ensureIndex({a: "text"});
+assert.isnull(res);
+coll.insert({a: "hello"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "hello"}).stats.n);
+coll.update({}, {$set: {a: "world"}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "hello"}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "world"}).stats.n);
+
+// 2) Same as #1, but with a wildcard text index.
+coll.drop();
+res = coll.ensureIndex({"$**": "text"});
+assert.isnull(res);
+coll.insert({a: "hello"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "hello"}).stats.n);
+coll.update({}, {$set: {a: "world"}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "hello"}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "world"}).stats.n);
+
+// 3) Create a compound text index with an index prefix, insert a document, update the value of the
+// index prefix field, and verify that searching with the new value returns the document.
+coll.drop();
+res = coll.ensureIndex({a: 1, b: "text"});
+assert.isnull(res);
+coll.insert({a: 1, b: "hello"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {a: 1}}).stats.n);
+coll.update({}, {$set: {a: 2}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "hello", filter: {a: 1}}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {a: 2}}).stats.n);
+
+// 4) Same as #3, but with a wildcard text index.
+coll.drop();
+res = coll.ensureIndex({a: 1, "$**": "text"});
+assert.isnull(res);
+coll.insert({a: 1, b: "hello"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {a: 1}}).stats.n);
+coll.update({}, {$set: {a: 2}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "hello", filter: {a: 1}}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {a: 2}}).stats.n);
+
+// 5) Create a compound text index with an index suffix, insert a document, update the value of the
+// index suffix field, and verify that searching with the new value returns the document.
+coll.drop();
+res = coll.ensureIndex({a: "text", b: 1});
+assert.isnull(res);
+coll.insert({a: "hello", b: 1});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {b: 1}}).stats.n);
+coll.update({}, {$set: {b: 2}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "hello", filter: {b: 1}}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {b: 2}}).stats.n);
+
+// 6) Same as #5, but with a wildcard text index.
+coll.drop();
+res = coll.ensureIndex({"$**": "text", b: 1});
+assert.isnull(res);
+coll.insert({a: "hello", b: 1});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {b: 1}}).stats.n);
+coll.update({}, {$set: {b: 2}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "hello", filter: {b: 1}}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "hello", filter: {b: 2}}).stats.n);
+
+// 7) Create a text index on a single field, insert a document, update the language of the document
+// (so as to change the stemming), and verify that searching with the new language returns the
+// document.
+coll.drop();
+res = coll.ensureIndex({a: "text"});
+assert.isnull(res);
+coll.insert({a: "testing", language: "spanish"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
+coll.update({}, {$set: {language: "english"}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
+
+// 8) Same as #7, but with a wildcard text index.
+coll.drop();
+res = coll.ensureIndex({"$**": "text"});
+assert.isnull(res);
+coll.insert({a: "testing", language: "spanish"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
+coll.update({}, {$set: {language: "english"}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
+
+// 9) Create a text index on a single field with a custom language override, insert a document,
+// update the language of the document (so as to change the stemming), and verify that searching
+// with the new language returns the document.
+coll.drop();
+res = coll.ensureIndex({a: "text"}, {language_override: "idioma"});
+assert.isnull(res);
+coll.insert({a: "testing", idioma: "spanish"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
+coll.update({}, {$set: {idioma: "english"}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
+
+// 10) Same as #9, but with a wildcard text index.
+coll.drop();
+res = coll.ensureIndex({"$**": "text"}, {language_override: "idioma"});
+assert.isnull(res);
+coll.insert({a: "testing", idioma: "spanish"});
+assert(!db.getLastError());
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
+coll.update({}, {$set: {idioma: "english"}});
+assert(!db.getLastError());
+assert.eq(0, coll.runCommand("text", {search: "testing", language: "spanish"}).stats.n);
+assert.eq(1, coll.runCommand("text", {search: "testing", language: "english"}).stats.n);
diff -Nru mongodb-2.4.10/jstests/index_id_desc.js mongodb-2.4.12/jstests/index_id_desc.js
--- mongodb-2.4.10/jstests/index_id_desc.js	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.12/jstests/index_id_desc.js	2014-10-14 20:40:29.000000000 +0000
@@ -0,0 +1,38 @@
+// Test creation of an index with key pattern {_id: -1}.  It is expected that a request for creation
+// of a {_id: -1} index is treated as if it were a request for creation of a {_id: 1} index.
+// SERVER-14833.
+
+var coll = db.index_id_desc;
+var indexes;
+var res;
+
+// Test ensureIndex({_id: -1}) on a nonexistent collection.
+coll.drop();
+res = coll.ensureIndex({_id: -1});
+assert.isnull(res);
+indexes = coll.getIndexes();
+assert.eq(1, indexes.length);
+assert.eq("_id_", indexes[0].name);
+assert.eq({_id: 1}, indexes[0].key);
+
+// Test ensureIndex({_id: -1}) on a normal empty collection.
+coll.drop();
+assert.commandWorked(coll.runCommand("create"));
+assert.eq(1, coll.getIndexes().length);
+res = coll.ensureIndex({_id: -1});
+assert.isnull(res);
+indexes = coll.getIndexes();
+assert.eq(1, indexes.length);
+assert.eq("_id_", indexes[0].name);
+assert.eq({_id: 1}, indexes[0].key);
+
+// Test ensureIndex({_id: -1}) on an empty collection with no _id index.
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
+assert.eq(0, coll.getIndexes().length);
+res = coll.ensureIndex({_id: -1});
+assert.isnull(res);
+indexes = coll.getIndexes();
+assert.eq(1, indexes.length);
+assert.eq("_id_", indexes[0].name);
+assert.eq({_id: 1}, indexes[0].key);
Binary files /tmp/9FeMoYB5WS/mongodb-2.4.10/jstests/libs/dur_checksum_bad_first.journal and /tmp/c06xGjImtB/mongodb-2.4.12/jstests/libs/dur_checksum_bad_first.journal differ
Binary files /tmp/9FeMoYB5WS/mongodb-2.4.10/jstests/libs/dur_checksum_bad_last.journal and /tmp/c06xGjImtB/mongodb-2.4.12/jstests/libs/dur_checksum_bad_last.journal differ
Binary files /tmp/9FeMoYB5WS/mongodb-2.4.10/jstests/libs/dur_checksum_good.journal and /tmp/c06xGjImtB/mongodb-2.4.12/jstests/libs/dur_checksum_good.journal differ
diff -Nru mongodb-2.4.10/rpm/init.d-mongod mongodb-2.4.12/rpm/init.d-mongod
--- mongodb-2.4.10/rpm/init.d-mongod	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/rpm/init.d-mongod	2014-10-14 20:40:29.000000000 +0000
@@ -22,8 +22,8 @@
 # FIXME: 1.9.x has a --shutdown flag that parses the config file and
 # shuts down the correct running pid, but that's unavailable in 1.8
 # for now.  This can go away when this script stops supporting 1.8.
-DBPATH=`awk -F= '/^dbpath=/{print $2}' "$CONFIGFILE"`
-PIDFILE=`awk -F= '/^dbpath\s=\s/{print $2}' "$CONFIGFILE"`
+DBPATH=`awk -F= '/^dbpath\s*=\s*/{print $2}' "$CONFIGFILE"`
+PIDFILE=`awk -F= '/^pidfilepath\s*=\s*/{print $2}' "$CONFIGFILE"`
 mongod=${MONGOD-/usr/bin/mongod}
 
 MONGO_USER=mongod
diff -Nru mongodb-2.4.10/rpm/mongo.mdv.spec mongodb-2.4.12/rpm/mongo.mdv.spec
--- mongodb-2.4.10/rpm/mongo.mdv.spec	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/rpm/mongo.mdv.spec	2014-10-14 20:40:29.000000000 +0000
@@ -1,5 +1,5 @@
 %define name    mongodb
-%define version 1.3.4
+%define version 2.4.12
 %define release %mkrel 1
 
 Name:    %{name}
@@ -140,4 +140,4 @@
 - Minor fixes.
 
 * Sat Oct 24 2009 Joe Miklojcik <jmiklojcik@shopwiki.com> - 
-- Wrote mongo.spec.
\ No newline at end of file
+- Wrote mongo.spec.
diff -Nru mongodb-2.4.10/rpm/mongo.spec mongodb-2.4.12/rpm/mongo.spec
--- mongodb-2.4.10/rpm/mongo.spec	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/rpm/mongo.spec	2014-10-14 20:40:29.000000000 +0000
@@ -1,7 +1,7 @@
 Name: mongo-10gen
 Conflicts: mongo, mongo-10gen-unstable
 Obsoletes: mongo-stable
-Version: 2.4.8
+Version: 2.4.12
 Release: mongodb_1%{?dist}
 Summary: mongo client shell and tools
 License: AGPL 3.0
diff -Nru mongodb-2.4.10/SConstruct mongodb-2.4.12/SConstruct
--- mongodb-2.4.10/SConstruct	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/SConstruct	2014-10-14 20:40:29.000000000 +0000
@@ -231,6 +231,8 @@
 add_option('client-dist-basename', "Name of the client source archive.", 1, False,
            default='mongo-cxx-driver')
 
+add_option('disable-warnings-as-errors', "Don't add -Werror to compiler command line", 0, False)
+
 # don't run configure if user calls --help
 if GetOption('help'):
     Return()
@@ -710,7 +712,9 @@
                          "-Winvalid-pch"] )
     # env.Append( " -Wconversion" ) TODO: this doesn't really work yet
     if linux:
-        env.Append( CCFLAGS=["-Werror", "-pipe"] )
+        env.Append( CCFLAGS=["-pipe"] )
+        if not has_option("disable-warnings-as-errors"):
+            env.Append( CCFLAGS=["-Werror"] )
         if not has_option('clang'):
             env.Append( CCFLAGS=["-fno-builtin-memcmp"] ) # glibc's memcmp is faster than gcc's
 
diff -Nru mongodb-2.4.10/src/mongo/client/distlock.cpp mongodb-2.4.12/src/mongo/client/distlock.cpp
--- mongodb-2.4.10/src/mongo/client/distlock.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/client/distlock.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -594,7 +594,11 @@
             if ( o.isEmpty() ) {
                 try {
                     LOG( logLvl ) << "inserting initial doc in " << LocksType::ConfigNS << " for lock " << _name << endl;
-                    conn->insert( LocksType::ConfigNS , BSON( LocksType::name(_name) << LocksType::state(0) << LocksType::who("") ) );
+                    conn->insert( LocksType::ConfigNS,
+                                  BSON( LocksType::name(_name)
+                                        << LocksType::state(0)
+                                        << LocksType::who("")
+                                        << LocksType::lockID(OID()) ));
                 }
                 catch ( UserException& e ) {
                     warning() << "could not insert initial doc for distributed lock " << _name << causedBy( e ) << endl;
diff -Nru mongodb-2.4.10/src/mongo/client/parallel.cpp mongodb-2.4.12/src/mongo/client/parallel.cpp
--- mongodb-2.4.10/src/mongo/client/parallel.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/client/parallel.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -540,14 +540,7 @@
         if( full || errored ) retryNext = false;
 
         if( ! retryNext && pcState ){
-
-            if( errored && pcState->conn ){
-                // Don't return this conn to the pool if it's bad
-                pcState->conn->kill();
-                pcState->conn.reset();
-            }
-            else if( initialized ){
-
+            if (initialized && !errored) {
                 verify( pcState->cursor );
                 verify( pcState->conn );
 
diff -Nru mongodb-2.4.10/src/mongo/client/parallel.h mongodb-2.4.12/src/mongo/client/parallel.h
--- mongodb-2.4.10/src/mongo/client/parallel.h	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/client/parallel.h	2014-10-14 20:40:29.000000000 +0000
@@ -268,6 +268,8 @@
         ParallelConnectionState() :
             count( 0 ), done( false ) { }
 
+        // Please do not reorder. cursor destructor can use conn.
+        // On a related note, never attempt to cleanup these pointers manually.
         ShardConnectionPtr conn;
         DBClientCursorPtr cursor;
 
diff -Nru mongodb-2.4.10/src/mongo/db/dbmessage.cpp mongodb-2.4.12/src/mongo/db/dbmessage.cpp
--- mongodb-2.4.10/src/mongo/db/dbmessage.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/dbmessage.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -52,6 +52,140 @@
         return ss.str();
     }
 
+    DbMessage::DbMessage(const Message& msg) : _msg(msg), _nsStart(NULL), _mark(NULL), _nsLen(0) {
+        // for received messages, Message has only one buffer
+        _theEnd = _msg.singleData()->_data + _msg.singleData()->dataLen();
+        _nextjsobj = _msg.singleData()->_data;
+
+        _reserved = readAndAdvance<int>();
+
+        // Read packet for NS
+        if (messageShouldHaveNs()) {
+
+            // Limit = buffer size of message -
+            //        (first int4 in message which is either flags or a zero constant)
+            size_t limit = _msg.singleData()->dataLen() - sizeof(int);
+
+            _nsStart = _nextjsobj;
+            _nsLen = strnlen(_nsStart, limit);
+
+            // Validate there is room for a null byte in the buffer
+            // Strings can be zero length
+            uassert(18633, "Failed to parse ns string", _nsLen <= (limit - 1));
+
+            _nextjsobj += _nsLen + 1; // skip namespace + null
+        }
+    }
+
+    const char * DbMessage::getns() const {
+        verify(messageShouldHaveNs());
+        return _nsStart;
+    }
+
+    long long DbMessage::getInt64(int offsetBytes) const {
+        verify(messageShouldHaveNs());
+        const char* p = _nsStart + _nsLen + 1;
+        checkReadOffset<long long>(p, offsetBytes);
+
+        return ((reinterpret_cast<const long long*>(p + offsetBytes)))[0];
+    }
+
+    int DbMessage::getQueryNToReturn() const {
+        verify(messageShouldHaveNs());
+        const char* p = _nsStart + _nsLen + 1;
+        checkRead<int>(p, 2);
+
+        return ((reinterpret_cast<const int*>(p)))[1];
+    }
+
+
+    int DbMessage::getFlags() const {
+        verify(messageShouldHaveNs());
+        const char* p = _nsStart + _nsLen + 1;
+        checkRead<int>(p, 1);
+
+        return ((reinterpret_cast<const int*>(p)))[0];
+    }
+
+    void DbMessage::setFlags(int value) {
+        verify(messageShouldHaveNs());
+        char* p = const_cast<char*>(_nsStart) + _nsLen + 1;
+        checkRead<int>(p, 1);
+
+        ((reinterpret_cast<int*>(p)))[0] = value;
+    }
+
+
+    int DbMessage::pullInt() {
+        return readAndAdvance<int>();
+    }
+
+    long long DbMessage::pullInt64() {
+        return readAndAdvance<long long>();
+    }
+
+    const long long* DbMessage::getArray(size_t count) const {
+        checkRead<long long>(_nextjsobj, count);
+        return reinterpret_cast<const long long*>(_nextjsobj);
+    }
+
+    BSONObj DbMessage::nextJsObj() {
+        massert(10304,
+            "Client Error: Remaining data too small for BSON object",
+            _nextjsobj != NULL && _theEnd - _nextjsobj >= 5);
+
+        if (cmdLine.objcheck) {
+            Status status = validateBSON(_nextjsobj, _theEnd - _nextjsobj);
+            massert(10307,
+                str::stream() << "Client Error: bad object in message: " << status.reason(),
+                status.isOK());
+        }
+
+        BSONObj js(_nextjsobj);
+        verify(js.objsize() >= 5);
+        verify(js.objsize() <= (_theEnd - _nextjsobj));
+
+        _nextjsobj += js.objsize();
+        if (_nextjsobj >= _theEnd)
+            _nextjsobj = NULL;
+        return js;
+    }
+
+    void DbMessage::markReset(const char * toMark) {
+        if (toMark == NULL) {
+            toMark = _mark;
+        }
+
+        verify(toMark);
+        _nextjsobj = toMark;
+    }
+
+    template<typename T>
+    void DbMessage::checkRead(const char* start, size_t count) const {
+        if ((_theEnd - start) < static_cast<int>(sizeof(T) * count)) {
+            uassert(18634, "Not enough data to read", false);
+        }
+    }
+
+    template<typename T>
+    void DbMessage::checkReadOffset(const char* start, size_t offset) const {
+        if ((_theEnd - start) < static_cast<int>(offset + sizeof(T))) {
+            uassert(18626, "Not enough data to read", false);
+        }
+    }
+
+    template<typename T>
+    T DbMessage::read() const {
+        checkRead<T>(_nextjsobj, 1);
+
+        return *(reinterpret_cast<const T*>(_nextjsobj));
+    }
+
+    template<typename T> T DbMessage::readAndAdvance() {
+        T t = read<T>();
+        _nextjsobj += sizeof(T);
+        return t;
+    }
 
     void replyToQuery(int queryResultFlags,
                       AbstractMessagingPort* p, Message& requestMsg,
diff -Nru mongodb-2.4.10/src/mongo/db/dbmessage.h mongodb-2.4.12/src/mongo/db/dbmessage.h
--- mongodb-2.4.10/src/mongo/db/dbmessage.h	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/dbmessage.h	2014-10-14 20:40:29.000000000 +0000
@@ -110,138 +110,83 @@
        See http://dochub.mongodb.org/core/mongowireprotocol
     */
     class DbMessage {
+    // Assume sizeof(int) == 4 bytes
+    BOOST_STATIC_ASSERT(sizeof(int) == 4);
+
     public:
-        DbMessage(const Message& _m) : m(_m) , mark(0) {
-            // for received messages, Message has only one buffer
-            theEnd = _m.singleData()->_data + _m.header()->dataLen();
-            char *r = _m.singleData()->_data;
-            reserved = (int *) r;
-            data = r + 4;
-            nextjsobj = data;
+        // Note: DbMessage constructor reads the first 4 bytes and stores it in reserved
+        DbMessage(const Message& msg);
+
+        // Indicates whether this message is expected to have a ns
+        // or in the case of dbMsg, a string in the same place as ns
+        bool messageShouldHaveNs() const {
+            return (_msg.operation() >= dbMsg) && (_msg.operation() <= dbDelete);
         }
 
-        /** the 32 bit field before the ns 
+        /** the 32 bit field before the ns
          * track all bit usage here as its cross op
          * 0: InsertOption_ContinueOnError
          * 1: fromWriteback
          */
-        int& reservedField() { return *reserved; }
-
-        const char * getns() const {
-            return data;
-        }
-        void getns(Namespace& ns) const {
-            ns = data;
-        }
-
-        const char * afterNS() const {
-            return data + strlen( data ) + 1;
-        }
+        int reservedField() const { return _reserved; }
+        void setReservedField(int value) {  _reserved = value; }
 
-        int getInt( int num ) const {
-            const int * foo = (const int*)afterNS();
-            return foo[num];
-        }
+        const char * getns() const;
+        int getQueryNToReturn() const;
 
-        int getQueryNToReturn() const {
-            return getInt( 1 );
-        }
+        int getFlags() const;
+        void setFlags(int value);
 
-        /**
-         * get an int64 at specified offsetBytes after ns
-         */
-        long long getInt64( int offsetBytes ) const {
-            const char * x = afterNS();
-            x += offsetBytes;
-            const long long * ll = (const long long*)x;
-            return ll[0];
-        }
+        long long getInt64(int offsetBytes) const;
 
-        void resetPull() { nextjsobj = data; }
-        int pullInt() const { return pullInt(); }
-        int& pullInt() {
-            if ( nextjsobj == data )
-                nextjsobj += strlen(data) + 1; // skip namespace
-            int& i = *((int *)nextjsobj);
-            nextjsobj += 4;
-            return i;
-        }
-        long long pullInt64() const {
-            return pullInt64();
-        }
-        long long &pullInt64() {
-            if ( nextjsobj == data )
-                nextjsobj += strlen(data) + 1; // skip namespace
-            long long &i = *((long long *)nextjsobj);
-            nextjsobj += 8;
-            return i;
-        }
-
-        OID* getOID() const {
-            return (OID *) (data + strlen(data) + 1); // skip namespace
-        }
-
-        void getQueryStuff(const char *&query, int& ntoreturn) {
-            int *i = (int *) (data + strlen(data) + 1);
-            ntoreturn = *i;
-            i++;
-            query = (const char *) i;
-        }
+        int pullInt();
+        long long pullInt64();
+        const long long* getArray(size_t count) const;
 
         /* for insert and update msgs */
         bool moreJSObjs() const {
-            return nextjsobj != 0;
-        }
-        BSONObj nextJsObj() {
-            if ( nextjsobj == data ) {
-                nextjsobj += strlen(data) + 1; // skip namespace
-                massert( 13066 ,  "Message contains no documents", theEnd > nextjsobj );
-            }
-            massert( 10304,
-                     "Client Error: Remaining data too small for BSON object",
-                     theEnd - nextjsobj >= 5 );
-
-            if ( cmdLine.objcheck ) {
-                Status status = validateBSON( nextjsobj, theEnd - nextjsobj );
-                massert( 10307,
-                         str::stream() << "Client Error: bad object in message: " << status.reason(),
-                         status.isOK() );
-            }
-
-            BSONObj js(nextjsobj);
-            verify( js.objsize() >= 5 );
-            verify( js.objsize() < ( theEnd - data ) );
-
-            nextjsobj += js.objsize();
-            if ( nextjsobj >= theEnd )
-                nextjsobj = 0;
-            return js;
+            return _nextjsobj != 0;
         }
 
-        const Message& msg() const { return m; }
+        BSONObj nextJsObj();
+
+        const Message& msg() const { return _msg; }
 
-        const char * markGet() {
-            return nextjsobj;
+        const char * markGet() const {
+            return _nextjsobj;
         }
 
         void markSet() {
-            mark = nextjsobj;
+            _mark = _nextjsobj;
         }
 
-        void markReset( const char * toMark = 0) {
-            if( toMark == 0 ) toMark = mark;
-            verify( toMark );
-            nextjsobj = toMark;
-        }
+        void markReset(const char * toMark = NULL);
 
     private:
-        const Message& m;
-        int* reserved;
-        const char *data;
-        const char *nextjsobj;
-        const char *theEnd;
+        // Check if we have enough data to read
+        template<typename T>
+        void checkRead(const char* start, size_t count = 0) const;
+
+        template<typename T>
+        void checkReadOffset(const char* start, size_t offset) const;
+
+        // Read some type without advancing our position
+        template<typename T>
+        T read() const;
+
+        // Read some type, and advance our position
+        template<typename T> T readAndAdvance();
+
+        const Message& _msg;
+        int _reserved; // flags or zero depending on packet, starts the packet
+
+        const char* _nsStart; // start of namespace string, +4 from message start
+        const char* _nextjsobj; // current position reading packet
+        const char* _theEnd; // end of packet
+
+        const char* _mark;
 
-        const char * mark;
+        unsigned int _nsLen;
     };
 
 
diff -Nru mongodb-2.4.10/src/mongo/db/dbmessage_test.cpp mongodb-2.4.12/src/mongo/db/dbmessage_test.cpp
--- mongodb-2.4.10/src/mongo/db/dbmessage_test.cpp	1970-01-01 00:00:00.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/dbmessage_test.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -0,0 +1,143 @@
+/**
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or  modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include <string>
+
+#include "mongo/bson/util/builder.h"
+#include "mongo/db/dbmessage.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+    using std::string;
+
+    // Test if the reserved field is short of 4 bytes
+    TEST(DBMessage1, ShortFlags) {
+        BufBuilder b;
+        string ns("test");
+
+        b.appendChar( 1 );
+
+        Message toSend;
+        toSend.setData( dbDelete , b.buf() , b.len() );
+
+        ASSERT_THROWS(DbMessage d1(toSend), UserException);
+    }
+
+    // Test a short NS missing a trailing null
+    TEST(DBMessage1, BadNS) {
+        BufBuilder b;
+
+        b.appendNum( static_cast<int>(1) );
+        b.appendChar( 'b' );
+        b.appendChar( 'a' );
+        b.appendChar( 'd' );
+        // Forget to append \0
+
+        Message toSend;
+        toSend.setData( dbDelete , b.buf() , b.len() );
+
+        ASSERT_THROWS(DbMessage d1(toSend), UserException);
+    }
+
+    // Test a valid kill message and try an extra pull
+    TEST(DBMessage1, GoodKill) {
+        BufBuilder b;
+
+        b.appendNum( static_cast<int>(1) );
+        b.appendNum( static_cast<int>(3) );
+
+        Message toSend;
+        toSend.setData( dbKillCursors , b.buf() , b.len() );
+
+        DbMessage d1(toSend);
+        ASSERT_EQUALS(3, d1.pullInt());
+
+        ASSERT_THROWS(d1.pullInt(), UserException);
+    }
+
+    // Try a bad read of a type too large
+    TEST(DBMessage1, GoodKill2) {
+        BufBuilder b;
+
+        b.appendNum( static_cast<int>(1) );
+        b.appendNum( static_cast<int>(3) );
+
+        Message toSend;
+        toSend.setData( dbKillCursors , b.buf() , b.len() );
+
+        DbMessage d1(toSend);
+        ASSERT_THROWS(d1.pullInt64(), UserException);
+    }
+
+    // Test a basic good insert, and an extra read
+    TEST(DBMessage1, GoodInsert) {
+        BufBuilder b;
+        string ns("test");
+
+        b.appendNum( static_cast<int>(1) );
+        b.appendStr(ns);
+        b.appendNum( static_cast<int>(3) );
+        b.appendNum( static_cast<int>(39) );
+
+        Message toSend;
+        toSend.setData( dbInsert , b.buf() , b.len() );
+
+        DbMessage d1(toSend);
+        ASSERT_EQUALS(3, d1.pullInt());
+        ASSERT_EQUALS(39, d1.pullInt());
+        ASSERT_THROWS(d1.pullInt(), UserException);
+    }
+
+    // Test a basic good insert, and an extra read
+    TEST(DBMessage1, GoodInsert2) {
+        BufBuilder b;
+        string ns("test");
+
+        b.appendNum( static_cast<int>(1) );
+        b.appendStr(ns);
+        b.appendNum( static_cast<int>(3) );
+        b.appendNum( static_cast<int>(39) );
+
+        BSONObj bo = BSON( "ts" << 0 );
+        bo.appendSelfToBufBuilder( b );
+
+        Message toSend;
+        toSend.setData( dbInsert , b.buf() , b.len() );
+
+        DbMessage d1(toSend);
+        ASSERT_EQUALS(3, d1.pullInt());
+
+
+        ASSERT_EQUALS(39, d1.pullInt());
+        BSONObj bo2 = d1.nextJsObj();
+        ASSERT_THROWS(d1.nextJsObj(), MsgAssertionException);
+    }
+
+
+
+} // mongo namespace
diff -Nru mongodb-2.4.10/src/mongo/db/dur_recover.cpp mongodb-2.4.12/src/mongo/db/dur_recover.cpp
--- mongodb-2.4.10/src/mongo/db/dur_recover.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/dur_recover.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -49,6 +49,15 @@
 
 namespace mongo {
 
+    /**
+     * Thrown when a journal section is corrupt. This is considered OK as long as it occurs while
+     * processing the last file. Processing stops at the first corrupt section.
+     *
+     * Any logging about the nature of the corruption should happen before throwing as this class
+     * contains no data.
+     */
+    class JournalSectionCorruptException {};
+
     namespace dur {
 
         struct ParsedJournalEntry { /*copyable*/
@@ -112,9 +121,10 @@
                 verify( doDurOpsRecovering );
                 bool ok = uncompress((const char *)compressed, compressedLen, &_uncompressed);
                 if( !ok ) { 
-                    // it should always be ok (i think?) as there is a previous check to see that the JSectFooter is ok
+                    // We check the checksum before we uncompress, but this may still fail as the
+                    // checksum isn't foolproof.
                     log() << "couldn't uncompress journal section" << endl;
-                    msgasserted(15874, "couldn't uncompress journal section");
+                    throw JournalSectionCorruptException();
                 }
                 const char *p = _uncompressed.c_str();
                 verify( compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader) );
@@ -162,7 +172,11 @@
                         _lastDbName = (const char*) _entries->pos();
                         const unsigned limit = std::min((unsigned)Namespace::MaxNsLen, _entries->remaining());
                         const unsigned len = strnlen(_lastDbName, limit);
-                        massert(13533, "problem processing journal file during recovery", _lastDbName[len] == '\0');
+                        if (_lastDbName[len] != '\0') {
+                            log() << "problem processing journal file during recovery";
+                            throw JournalSectionCorruptException();
+                        }
+
                         _entries->skip(len+1); // skip '\0' too
                         _entries->read(lenOrOpCode); // read this for the fall through
                     }
@@ -357,10 +371,15 @@
             scoped_lock lk(_mx);
             RACECHECK
 
-            /** todo: we should really verify the checksum to see that seqNumber is ok?
-                      that is expensive maybe there is some sort of checksum of just the header 
-                      within the header itself
-            */
+            // Check the footer checksum before doing anything else.
+            if (_recovering) {
+                verify( ((const char *)h) + sizeof(JSectHeader) == p );
+                if (!f->checkHash(h, len + sizeof(JSectHeader))) {
+                    log() << "journal section checksum doesn't match";
+                    throw JournalSectionCorruptException();
+                }
+            }
+
             if( _recovering && _lastDataSyncedFromLastRun > h->seqNumber + ExtraKeepTimeMs ) {
                 if( h->seqNumber != _lastSeqMentionedInConsoleLog ) {
                     static int n;
@@ -403,14 +422,6 @@
                 entries.push_back(e);
             }
 
-            // after the entries check the footer checksum
-            if( _recovering ) {
-                verify( ((const char *)h) + sizeof(JSectHeader) == p );
-                if( !f->checkHash(h, len + sizeof(JSectHeader)) ) { 
-                    msgasserted(13594, "journal checksum doesn't match");
-                }
-            }
-
             // got all the entries for one group commit.  apply them:
             applyEntries(entries);
         }
@@ -429,20 +440,20 @@
                     JHeader h;
                     br.read(h);
 
-                    /* [dm] not automatically handled.  we should eventually handle this automatically.  i think:
-                       (1) if this is the final journal file
-                       (2) and the file size is just the file header in length (or less) -- this is a bit tricky to determine if prealloced
-                       then can just assume recovery ended cleanly and not error out (still should log).
-                    */
-                    uassert(13537, 
-                        "journal file header invalid. This could indicate corruption in a journal file, or perhaps a crash where sectors in file header were in flight written out of order at time of crash (unlikely but possible).", 
-                        h.valid());
+                    if (!h.valid()) {
+                        log() << "Journal file header invalid. This could indicate corruption, or "
+                              << "an unclean shutdown while writing the first section in a journal "
+                              << "file.";
+                        throw JournalSectionCorruptException();
+                    }
 
                     if( !h.versionOk() ) {
                         log() << "journal file version number mismatch got:" << hex << h._version                             
                             << " expected:" << hex << (unsigned) JHeader::CurrentVersion 
                             << ". if you have just upgraded, recover with old version of mongod, terminate cleanly, then upgrade." 
                             << endl;
+                        // Not using JournalSectionCurruptException as we don't want to ignore
+                        // journal files on upgrade.
                         uasserted(13536, str::stream() << "journal version number mismatch " << h._version);
                     }
                     fileId = h.fileId;
@@ -473,7 +484,12 @@
                     killCurrentOp.checkForInterrupt(false);
                 }
             }
-            catch( BufReader::eof& ) {
+            catch (const BufReader::eof&) {
+                if( cmdLine.durOptions & CmdLine::DurDumpJournal )
+                    log() << "ABRUPT END" << endl;
+                return true; // abrupt end
+            }
+            catch (const JournalSectionCorruptException&) {
                 if( cmdLine.durOptions & CmdLine::DurDumpJournal )
                     log() << "ABRUPT END" << endl;
                 return true; // abrupt end
diff -Nru mongodb-2.4.10/src/mongo/db/index.cpp mongodb-2.4.12/src/mongo/db/index.cpp
--- mongodb-2.4.10/src/mongo/db/index.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/index.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -360,6 +360,14 @@
             uasserted(12504, s);
         }
 
+        /* this is because we want key patterns like { _id : 1 } and { _id : -1 } to
+           all be treated as the same pattern.
+        */
+        if ( IndexDetails::isIdIndexPattern(key) ) {
+            key = id_obj;
+            name = "_id_";
+        }
+
         sourceCollection = nsdetails(sourceNS);
         if( sourceCollection == 0 ) {
             // try to create it
@@ -393,9 +401,6 @@
             uasserted(12505,s);
         }
 
-        /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to
-           all be treated as the same pattern.
-        */
         if ( IndexDetails::isIdIndexPattern(key) ) {
             //if( !god ) {
             //ensureHaveIdIndex( sourceNS.c_str(), mayInterrupt );
@@ -426,6 +431,7 @@
             BSONObj o = io;
             if ( plugin ) {
                 o = plugin->adjustIndexSpec(o);
+                key = o.getObjectField("key");
             }
             BSONObjBuilder b;
             int v = DefaultIndexVersionNumber;
@@ -439,14 +445,8 @@
             }
             // idea is to put things we use a lot earlier
             b.append("v", v);
-            if ( IndexDetails::isIdIndexPattern(o["key"].Obj()) ) {
-                b.append("name", "_id_");
-                b.append("key", id_obj);
-            }
-            else {
-                b.append( o["name"] );
-                b.append(o["key"]);
-            }
+            b.append("name", name);
+            b.append("key", key);
             if( o["unique"].trueValue() )
                 b.appendBool("unique", true); // normalize to bool true in case was int 1 or something...
             b.append(o["ns"]);
diff -Nru mongodb-2.4.10/src/mongo/db/index_set.cpp mongodb-2.4.12/src/mongo/db/index_set.cpp
--- mongodb-2.4.10/src/mongo/db/index_set.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/index_set.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -21,6 +21,8 @@
 
 namespace mongo {
 
+    IndexPathSet::IndexPathSet() : _allPathsIndexed( false ) { }
+
     void IndexPathSet::addPath( const StringData& path ) {
         string s;
         if ( getCanonicalIndexField( path, &s ) ) {
@@ -31,11 +33,20 @@
         }
     }
 
+    void IndexPathSet::allPathsIndexed() {
+        _allPathsIndexed = true;
+    }
+
     void IndexPathSet::clear() {
         _canonical.clear();
+        _allPathsIndexed = false;
     }
 
     bool IndexPathSet::mightBeIndexed( const StringData& path ) const {
+        if ( _allPathsIndexed ) {
+            return true;
+        }
+
         StringData use = path;
         string x;
         if ( getCanonicalIndexField( path, &x ) )
diff -Nru mongodb-2.4.10/src/mongo/db/index_set.h mongodb-2.4.12/src/mongo/db/index_set.h
--- mongodb-2.4.10/src/mongo/db/index_set.h	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/index_set.h	2014-10-14 20:40:29.000000000 +0000
@@ -32,8 +32,12 @@
 
     class IndexPathSet {
     public:
+        IndexPathSet();
+
         void addPath( const StringData& path );
 
+        void allPathsIndexed();
+
         void clear();
 
         bool mightBeIndexed( const StringData& path ) const;
@@ -43,6 +47,8 @@
         bool _startsWith( const StringData& a, const StringData& b ) const;
 
         std::set<std::string> _canonical;
+
+        bool _allPathsIndexed;
     };
 
 }
diff -Nru mongodb-2.4.10/src/mongo/db/index_set_test.cpp mongodb-2.4.12/src/mongo/db/index_set_test.cpp
--- mongodb-2.4.10/src/mongo/db/index_set_test.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/index_set_test.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -31,12 +31,37 @@
 
         ASSERT_FALSE( a.mightBeIndexed( "b" ) );
         ASSERT_FALSE( a.mightBeIndexed( "a.c" ) );
+
+        a.clear();
+        ASSERT_FALSE( a.mightBeIndexed( "a.b" ) );
     }
 
     TEST( IndexPathSetTest, Simple2 ) {
         IndexPathSet a;
         a.addPath( "ab" );
         ASSERT_FALSE( a.mightBeIndexed( "a" ) );
+        a.clear();
+        ASSERT_FALSE( a.mightBeIndexed( "ab" ) );
+    }
+
+    TEST( IndexPathSetTest, AllPathsIndexed1 ) {
+        IndexPathSet a;
+        a.allPathsIndexed();
+        ASSERT_TRUE( a.mightBeIndexed( "a" ) );
+        a.clear();
+        ASSERT_FALSE( a.mightBeIndexed( "a" ) );
+    }
+
+    TEST( IndexPathSetTest, AllPathsIndexed2 ) {
+        IndexPathSet a;
+        a.allPathsIndexed();
+        ASSERT_TRUE( a.mightBeIndexed( "a" ) );
+        ASSERT_TRUE( a.mightBeIndexed( "" ) );
+        a.addPath( "a" );
+        ASSERT_TRUE( a.mightBeIndexed( "a" ) );
+        ASSERT_TRUE( a.mightBeIndexed( "b" ) );
+        a.clear();
+        ASSERT_FALSE( a.mightBeIndexed( "a" ) );
     }
 
 
diff -Nru mongodb-2.4.10/src/mongo/db/instance.cpp mongodb-2.4.12/src/mongo/db/instance.cpp
--- mongodb-2.4.10/src/mongo/db/instance.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/instance.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -339,10 +339,13 @@
         // before we lock...
         int op = m.operation();
         bool isCommand = false;
-        const char *ns = m.singleData()->_data + 4;
+
+        DbMessage dbmsg(m);
 
         if ( op == dbQuery ) {
-            if( strstr(ns, ".$cmd") ) {
+            const char *ns = dbmsg.getns();
+
+            if (strstr(ns, ".$cmd")) {
                 isCommand = true;
                 opwrite(m);
                 if( strstr(ns, ".$cmd.sys.") ) {
@@ -406,7 +409,8 @@
         }
         else if ( op == dbMsg ) {
             // deprecated - replaced by commands
-            char *p = m.singleData()->_data;
+            const char *p = dbmsg.getns();
+
             int len = strlen(p);
             if ( len > 400 )
                 out() << curTimeMillis64() % 10000 <<
@@ -423,8 +427,6 @@
         }
         else {
             try {
-                const NamespaceString nsString( ns );
-
                 // The following operations all require authorization.
                 // dbInsert, dbUpdate and dbDelete can be easily pre-authorized,
                 // here, but dbKillCursors cannot.
@@ -433,28 +435,36 @@
                     logThreshold = 10;
                     receivedKillCursors(m);
                 }
-                else if ( !nsString.isValid() ) {
-                    // Only killCursors doesn't care about namespaces
-                    uassert( 16257, str::stream() << "Invalid ns [" << ns << "]", false );
-                }
-                else if ( op == dbInsert ) {
-                    receivedInsert(m, currentOp);
-                }
-                else if ( op == dbUpdate ) {
-                    receivedUpdate(m, currentOp);
-                }
-                else if ( op == dbDelete ) {
-                    receivedDelete(m, currentOp);
-                }
-                else {
+                else if (op != dbInsert && op != dbUpdate && op != dbDelete) {
                     mongo::log() << "    operation isn't supported: " << op << endl;
                     currentOp.done();
                     shouldLog = true;
                 }
-            }
-            catch ( UserException& ue ) {
-                tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing "
-                        << ue.toString() << endl;
+                else {
+                    const char* ns = dbmsg.getns();
+                    const NamespaceString nsString(ns);
+
+                    if (!nsString.isValid()) {
+                        uassert(16257, str::stream() << "Invalid ns [" << ns << "]", false);
+                    }
+                    else if (op == dbInsert) {
+                        receivedInsert(m, currentOp);
+                    }
+                    else if (op == dbUpdate) {
+                        receivedUpdate(m, currentOp);
+                    }
+                    else if (op == dbDelete) {
+                        receivedDelete(m, currentOp);
+                    }
+                    else {
+                        fassertFailed(18625);
+                    }
+                }
+             }
+            catch (const UserException& ue) {
+                setLastError(ue.getCode(), ue.getInfo().msg.c_str());
+                LOG(3) << " Caught Assertion in " << opToString(op) << ", continuing "
+                       << ue.toString() << endl;
                 debug.exceptionInfo = ue.getInfo();
             }
             catch ( AssertionException& e ) {
@@ -492,9 +502,8 @@
     } /* assembleResponse() */
 
     void receivedKillCursors(Message& m) {
-        int *x = (int *) m.singleData()->_data;
-        x++; // reserved
-        int n = *x++;
+        DbMessage dbmessage(m);
+        int n = dbmessage.pullInt();
 
         uassert( 13659 , "sent 0 cursors to kill" , n != 0 );
         massert( 13658 , str::stream() << "bad kill cursors size: " << m.dataSize() , m.dataSize() == 8 + ( 8 * n ) );
@@ -505,7 +514,9 @@
             verify( n < 30000 );
         }
 
-        int found = ClientCursor::eraseIfAuthorized(n, (long long *) x);
+        const long long* cursorArray = dbmessage.getArray(n);
+
+        int found = ClientCursor::eraseIfAuthorized(n, (long long *)cursorArray);
 
         if ( logLevel > 0 || found != n ) {
             LOG( found == n ? 1 : 0 ) << "killcursors: found " << found << " of " << n << endl;
@@ -784,10 +795,23 @@
                 // check no $ modifiers.  note we only check top level.  
                 // (scanning deep would be quite expensive)
                 uassert( 13511, "document to insert can't have $ fields", e.fieldName()[0] != '$' );
-                
-                // check no regexp for _id (SERVER-9502)
+
                 if (str::equals(e.fieldName(), "_id")) {
+                    // check no regexp for _id (SERVER-9502)
                     uassert(16824, "can't use a regex for _id", e.type() != RegEx);
+
+                    uassert(ErrorCodes::BadValue,
+                            "can't use an undefined for _id",
+                            e.type() != Undefined );
+
+                    uassert(ErrorCodes::BadValue,
+                            "can't use an array for _id",
+                            e.type() != Array);
+
+                    if ( e.type() == Object ) {
+                        BSONObj obj = e.Obj();
+                        uassert(ErrorCodes::BadValue, "illegal object for _id", obj.okForStorage());
+                    }
                 }
             }
         }
diff -Nru mongodb-2.4.10/src/mongo/db/namespace_details.cpp mongodb-2.4.12/src/mongo/db/namespace_details.cpp
--- mongodb-2.4.10/src/mongo/db/namespace_details.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/db/namespace_details.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -24,12 +24,14 @@
 #include <boost/filesystem/operations.hpp>
 
 #include "mongo/db/db.h"
+#include "mongo/db/fts/fts_spec.h"
 #include "mongo/db/json.h"
 #include "mongo/db/mongommf.h"
 #include "mongo/db/ops/delete.h"
 #include "mongo/db/ops/update.h"
 #include "mongo/db/pdfile.h"
 #include "mongo/scripting/engine.h"
+#include "mongo/util/file.h"
 #include "mongo/util/hashtab.h"
 
 namespace mongo {
@@ -178,8 +180,38 @@
         else {
             // use lenForNewNsFiles, we are making a new database
             massert( 10343, "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
+
             maybeMkdir();
             unsigned long long l = lenForNewNsFiles;
+            log() << "allocating new ns file " << pathString << ", filling with zeroes..." << endl;
+
+            {
+                // Due to SERVER-15369 we need to explicitly write zero-bytes to the NS file.
+                const unsigned long long kBlockSize = 1024*1024;
+                verify(l % kBlockSize == 0); // ns files can only be multiples of 1MB
+                const std::vector<char> zeros(kBlockSize, 0);
+
+                File file;
+                file.open(pathString.c_str());
+                massert(18825, str::stream() << "couldn't create file " << pathString, file.is_open());
+                for (fileofs ofs = 0; ofs < l && !file.bad(); ofs += kBlockSize ) {
+                    file.write(ofs, &zeros[0], kBlockSize);
+                }
+                if (file.bad()) {
+                    try {
+                        boost::filesystem::remove(pathString);
+                    } catch (const std::exception& e) {
+                        StringBuilder ss;
+                        ss << "error removing file: " << e.what();
+                        massert(18909, ss.str(), 0);
+                    }
+                }
+                else {
+                    file.fsync();
+                }
+                massert(18826, str::stream() << "failure writing file " << pathString, !file.bad() );
+            }
+
             if( f.create(pathString, l, true) ) {
                 getDur().createdFile(pathString, l); // always a new file
                 len = l;
@@ -764,6 +796,17 @@
         get_cmap_inlock(ns).erase(ns);
     }
 
+    namespace {
+        bool indexIsText(const BSONObj& keyPattern) {
+            BSONObjIterator it( keyPattern );
+            while ( it.more() ) {
+                if ( str::equals( it.next().valuestrsafe(), "text" ) ) {
+                    return true;
+                }
+            }
+            return false;
+        }
+    }
 
     void NamespaceDetailsTransient::computeIndexKeys() {
         _indexedPaths.clear();
@@ -772,13 +815,44 @@
         if ( ! d )
             return;
 
+        bool indexesAreLegacy = (cc().database()->getFile(0)->getHeader()->versionMinor
+                                 == PDFILE_VERSION_MINOR_22_AND_OLDER);
+
         NamespaceDetails::IndexIterator i = d->ii( true );
         while( i.more() ) {
-            BSONObj key = i.next().keyPattern();
-            BSONObjIterator j( key );
-            while ( j.more() ) {
-                BSONElement e = j.next();
-                _indexedPaths.addPath( e.fieldName() );
+            const IndexSpec& indexSpec = getIndexSpec( &(i.next()) );
+            BSONObj key = indexSpec.keyPattern;
+
+            if ( indexesAreLegacy || !indexIsText( key ) ) {
+                BSONObjIterator j( key );
+                while ( j.more() ) {
+                    BSONElement e = j.next();
+                    _indexedPaths.addPath( e.fieldName() );
+                }
+            }
+            else {
+                // This is a text index.  Get the paths for the indexed fields out of the FTSSpec.
+                fts::FTSSpec ftsSpec( indexSpec.info );
+                if ( ftsSpec.wildcard() ) {
+                    _indexedPaths.allPathsIndexed();
+                }
+                else {
+                    for ( size_t i = 0; i < ftsSpec.numExtraBefore(); ++i ) {
+                        _indexedPaths.addPath( ftsSpec.extraBefore(i) );
+                    }
+                    for ( fts::Weights::const_iterator it = ftsSpec.weights().begin();
+                          it != ftsSpec.weights().end();
+                          ++it ) {
+                        _indexedPaths.addPath( it->first );
+                    }
+                    for ( size_t i = 0; i < ftsSpec.numExtraAfter(); ++i ) {
+                        _indexedPaths.addPath( ftsSpec.extraAfter(i) );
+                    }
+                    // Note that 2.4.x supports {textIndexVersion: 1} only. {textIndexVersion: 1}
+                    // can only have one language per document, and the "language override" field
+                    // specifies the exact path to the language.
+                    _indexedPaths.addPath( ftsSpec.languageOverrideField() );
+                }
             }
         }
 
diff -Nru mongodb-2.4.10/src/mongo/s/cursors.cpp mongodb-2.4.12/src/mongo/s/cursors.cpp
--- mongodb-2.4.10/src/mongo/s/cursors.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/s/cursors.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -270,19 +270,20 @@
     }
 
     void CursorCache::gotKillCursors(Message& m ) {
-        int *x = (int *) m.singleData()->_data;
-        x++; // reserved
-        int n = *x++;
+        DbMessage dbmessage(m);
+        int n = dbmessage.pullInt();
 
         if ( n > 2000 ) {
             LOG( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
         }
 
-
         uassert( 13286 , "sent 0 cursors to kill" , n >= 1 );
         uassert( 13287 , "too many cursors to kill" , n < 30000 );
+        massert( 18632 , str::stream() << "bad kill cursors size: " << m.dataSize(), 
+                    m.dataSize() == 8 + ( 8 * n ) );
+
 
-        long long * cursors = (long long *)x;
+        const long long* cursors = dbmessage.getArray(n);
         AuthorizationManager* authManager =
                 ClientBasic::getCurrent()->getAuthorizationManager();
         for ( int i=0; i<n; i++ ) {
diff -Nru mongodb-2.4.10/src/mongo/s/d_split.cpp mongodb-2.4.12/src/mongo/s/d_split.cpp
--- mongodb-2.4.10/src/mongo/s/d_split.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/s/d_split.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -802,6 +802,9 @@
 
                     Client::ReadContext ctx( ns );
                     NamespaceDetails *d = nsdetails( ns );
+                    if (d == NULL) {
+                        break;
+                    }
 
                     const IndexDetails *idx = d->findIndexByPrefix( keyPattern ,
                                                                     true ); /* exclude multikeys */
diff -Nru mongodb-2.4.10/src/mongo/s/request.cpp mongodb-2.4.12/src/mongo/s/request.cpp
--- mongodb-2.4.10/src/mongo/s/request.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/s/request.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -37,7 +37,6 @@
     Request::Request( Message& m, AbstractMessagingPort* p ) :
         _m(m) , _d( m ) , _p(p) , _didInit(false) {
 
-        verify( _d.getns() );
         _id = _m.header()->id;
 
         _clientInfo = ClientInfo::get();
@@ -58,7 +57,7 @@
 
     // Deprecated, will move to the strategy itself
     void Request::reset() {
-        if ( _m.operation() == dbKillCursors ) {
+        if ( !_d.messageShouldHaveNs()) {
             return;
         }
 
diff -Nru mongodb-2.4.10/src/mongo/s/strategy_shard.cpp mongodb-2.4.12/src/mongo/s/strategy_shard.cpp
--- mongodb-2.4.10/src/mongo/s/strategy_shard.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/s/strategy_shard.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -1043,8 +1043,9 @@
                 // TODO: make this safer w/ shard add/remove
                 //
 
-                int* opts = (int*)( r.d().afterNS() );
-                opts[0] |= UpdateOption_Broadcast; // this means don't check shard version in mongod
+                int opts = r.d().getFlags();
+                opts |= UpdateOption_Broadcast; // this means don't check shard version in mongod
+                r.d().setFlags(opts);
                 broadcastWrite( dbUpdate, r );
                 return;
             }
@@ -1192,8 +1193,10 @@
 
             if( ! shard ){
 
-                int * x = (int*)(r.d().afterNS());
-                x[0] |= RemoveOption_Broadcast; // this means don't check shard version in mongod
+                int opts = r.d().getFlags();
+                opts |= RemoveOption_Broadcast; // this means don't check shard version in mongod
+                r.d().setFlags(opts);
+
                 broadcastWrite(dbDelete, r);
                 return;
             }
diff -Nru mongodb-2.4.10/src/mongo/s/writeback_listener.cpp mongodb-2.4.12/src/mongo/s/writeback_listener.cpp
--- mongodb-2.4.10/src/mongo/s/writeback_listener.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/s/writeback_listener.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -294,7 +294,7 @@
                             Request r( msg , 0 );
                             r.init();
 
-                            r.d().reservedField() |= Reserved_FromWriteback;
+                            r.d().setReservedField(r.d().reservedField() | Reserved_FromWriteback);
 
                             ClientInfo * ci = r.getClientInfo();
                             if (!noauth) {
diff -Nru mongodb-2.4.10/src/mongo/scripting/engine_v8.cpp mongodb-2.4.12/src/mongo/scripting/engine_v8.cpp
--- mongodb-2.4.10/src/mongo/scripting/engine_v8.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/scripting/engine_v8.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -603,8 +603,7 @@
             // execution terminated
             return v8::Undefined();
 
-        v8::Local<v8::External> f =
-                v8::External::Cast(*args.Callee()->Get(scope->strLitToV8("_v8_function")));
+        v8::Local<v8::External> f = v8::Local<v8::External>::Cast(args.Data());
         v8Function function = (v8Function)(f->Value());
         v8::Handle<v8::Value> ret;
         string exceptionText;
@@ -1163,8 +1162,9 @@
     }
 
     v8::Handle<v8::FunctionTemplate> V8Scope::createV8Function(v8Function func) {
-        v8::Handle<v8::FunctionTemplate> ft = v8::FunctionTemplate::New(v8Callback);
-        ft->Set(strLitToV8("_v8_function"), v8::External::New(reinterpret_cast<void*>(func)),
+        v8::Handle<v8::Value> funcHandle = v8::External::New(reinterpret_cast<void*>(func));
+        v8::Handle<v8::FunctionTemplate> ft = v8::FunctionTemplate::New(v8Callback, funcHandle);
+        ft->Set(strLitToV8("_v8_function"), v8::Boolean::New(true),
                 static_cast<v8::PropertyAttribute>(v8::DontEnum | v8::ReadOnly));
         return ft;
     }
diff -Nru mongodb-2.4.10/src/mongo/shell/shell_utils_extended.cpp mongodb-2.4.12/src/mongo/shell/shell_utils_extended.cpp
--- mongodb-2.4.10/src/mongo/shell/shell_utils_extended.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/shell/shell_utils_extended.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -183,22 +183,19 @@
         }
 
         /**
-         * @param args - [ name, byte index ]
-         * In this initial implementation, all bits in the specified byte are flipped.
+         * @param args - [ source, destination ]
+         * copies file 'source' to 'destination'. Errors if the 'destination' file already exists.
          */
-        BSONObj fuzzFile(const BSONObj& args, void* data) {
-            uassert( 13619, "fuzzFile takes 2 arguments", args.nFields() == 2 );
-            scoped_ptr< File > f( new File() );
-            f->open( args.getStringField( "0" ) );
-            uassert( 13620, "couldn't open file to fuzz", !f->bad() && f->is_open() );
-
-            char c;
-            f->read( args.getIntField( "1" ), &c, 1 );
-            c = ~c;
-            f->write( args.getIntField( "1" ), &c, 1 );
+        BSONObj copyFile(const BSONObj& args, void* data) {
+            uassert(13619, "copyFile takes 2 arguments", args.nFields() == 2);
+
+            BSONObjIterator it(args);
+            const std::string source = it.next().str();
+            const std::string destination = it.next().str();
+
+            boost::filesystem::copy_file(source, destination);
 
             return undefinedReturn;
-            // f close is implicit
         }
 
         BSONObj getHostName(const BSONObj& a, void* data) {
@@ -212,7 +209,7 @@
         void installShellUtilsExtended( Scope& scope ) {
             scope.injectNative( "getHostName" , getHostName );
             scope.injectNative( "removeFile" , removeFile );
-            scope.injectNative( "fuzzFile" , fuzzFile );
+            scope.injectNative( "copyFile" , copyFile );
             scope.injectNative( "listFiles" , listFiles );
             scope.injectNative( "ls" , ls );
             scope.injectNative( "pwd", pwd );
diff -Nru mongodb-2.4.10/src/mongo/tools/sniffer.cpp mongodb-2.4.12/src/mongo/tools/sniffer.cpp
--- mongodb-2.4.10/src/mongo/tools/sniffer.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/tools/sniffer.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -329,9 +329,7 @@
             break;
         }
         case mongo::dbKillCursors: {
-            int *x = (int *) m.singleData()->_data;
-            x++; // reserved
-            int n = *x;
+            int n = d.pullInt();
             out() << "\tkillCursors n: " << n << endl;
             break;
         }
@@ -357,7 +355,7 @@
                 if ( m.operation() == mongo::dbGetMore ) {
                     DbMessage d( m );
                     d.pullInt();
-                    long long &cId = d.pullInt64();
+                    long long cId = d.pullInt64();
                     cId = mapCursor[ c ][ cId ];
                 }
                 Message response;
diff -Nru mongodb-2.4.10/src/mongo/util/net/message.h mongodb-2.4.12/src/mongo/util/net/message.h
--- mongodb-2.4.10/src/mongo/util/net/message.h	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/util/net/message.h	2014-10-14 20:40:29.000000000 +0000
@@ -103,13 +103,18 @@
 
 #pragma pack(1)
     /* todo merge this with MSGHEADER (or inherit from it). */
-    struct MsgData {
+    class MsgData {
+        friend class Message;
+        friend class DbMessage;
+        friend class MessagingPort;
+    public:
         int len; /* len of the msg, including this field */
         MSGID id; /* request/reply id's match... */
         MSGID responseTo; /* id of the message we are responding to */
         short _operation;
         char _flags;
         char _version;
+
         int operation() const {
             return _operation;
         }
@@ -118,7 +123,6 @@
             _version = 0;
             _operation = o;
         }
-        char _data[4];
 
         int& dataAsInt() {
             return *((int *) _data);
@@ -140,6 +144,8 @@
         }
 
         int dataLen(); // len without header
+    private:
+        char _data[4]; //must be last member
     };
     const int MsgDataHeaderSize = sizeof(MsgData) - 4;
     inline int MsgData::dataLen() {
diff -Nru mongodb-2.4.10/src/mongo/util/version.cpp mongodb-2.4.12/src/mongo/util/version.cpp
--- mongodb-2.4.10/src/mongo/util/version.cpp	2014-04-02 12:23:31.000000000 +0000
+++ mongodb-2.4.12/src/mongo/util/version.cpp	2014-10-14 20:40:29.000000000 +0000
@@ -47,7 +47,7 @@
      *      1.2.3-rc4-pre-
      * If you really need to do something else you'll need to fix _versionArray()
      */
-    const char versionString[] = "2.4.10";
+    const char versionString[] = "2.4.12";
 
     // See unit test for example outputs
     BSONArray toVersionArray(const char* version){

Attachment: signature.asc
Description: This is a digitally signed message part


Reply to: