[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#694629: marked as done (unblock: ruby1.9.1/1.9.3.194-5)



Your message dated Fri, 30 Nov 2012 22:36:57 +0000
with message-id <ff1925321c779a3dd8094287622a83ca@mail.adsl.funky-badger.org>
and subject line Re: Bug#694629: unblock: ruby1.9.1/1.9.3.194-5
has caused the Debian Bug report #694629,
regarding unblock: ruby1.9.1/1.9.3.194-5
to be marked as done.

This means that you claim that the problem has been dealt with.
If this is not the case it is now your responsibility to reopen the
Bug report if necessary, and/or fix the problem forthwith.

(NB: If you are a system administrator and have no idea what this
message is talking about, this may indicate a serious mail system
misconfiguration somewhere. Please contact owner@bugs.debian.org
immediately.)


-- 
694629: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=694629
Debian Bug Tracking System
Contact owner@bugs.debian.org with problems
--- Begin Message ---
Package: release.debian.org
Severity: normal
User: release.debian.org@packages.debian.org
Usertags: unblock

Please unblock package ruby1.9.1

This version disables the test suite on sparc, which seems to fail
depending on the specific buildd the build goes to (see
https://lists.debian.org/debian-ruby/2012/11/msg00064.html).

Attached is the debdiff between this version and the one in testing. The
only difference between this and the previous unblock request (#693987)
is that one-line change in debian/rules to avoid running the test suite
on sparc.

unblock ruby1.9.1/1.9.3.194-5

-- System Information:
Debian Release: wheezy/sid
  APT prefers unstable
  APT policy: (500, 'unstable'), (1, 'experimental')
Architecture: amd64 (x86_64)
Foreign Architectures: i386

Kernel: Linux 3.2.0-4-amd64 (SMP w/4 CPU cores)
Locale: LANG=pt_BR.utf8, LC_CTYPE=pt_BR.utf8 (charmap=UTF-8)
Shell: /bin/sh linked to /bin/dash

-- 
Antonio Terceiro <terceiro@debian.org>
diff -Nru ruby1.9.1-1.9.3.194/debian/changelog ruby1.9.1-1.9.3.194/debian/changelog
--- ruby1.9.1-1.9.3.194/debian/changelog	2012-10-16 10:28:42.000000000 -0300
+++ ruby1.9.1-1.9.3.194/debian/changelog	2012-11-27 18:42:09.000000000 -0300
@@ -1,3 +1,20 @@
+ruby1.9.1 (1.9.3.194-5) unstable; urgency=high
+
+  * Disable running the test suite during the build on sparc again. Keeping
+    urgency=high because the previous release, which contains a security bug
+    fix, did not reach testing yet because of a segfault when running tests in
+    the sparc buildd.
+
+ -- Antonio Terceiro <terceiro@debian.org>  Sun, 25 Nov 2012 19:12:28 -0300
+
+ruby1.9.1 (1.9.3.194-4) unstable; urgency=high
+
+  [ James Healy ]
+  * debian/patches/CVE-2012-5371.patch: avoid DOS vulnerability in hash
+    implementation, this fixes CVE-2012-5371. (Closes: #693024).
+
+ -- Antonio Terceiro <terceiro@debian.org>  Thu, 22 Nov 2012 10:30:37 -0300
+
 ruby1.9.1 (1.9.3.194-3) unstable; urgency=high
 
   * debian/patches/CVE-2012-4522.patch: avoid vulnerability with strings
diff -Nru ruby1.9.1-1.9.3.194/debian/patches/20121120-cve-2012-5371.diff ruby1.9.1-1.9.3.194/debian/patches/20121120-cve-2012-5371.diff
--- ruby1.9.1-1.9.3.194/debian/patches/20121120-cve-2012-5371.diff	1969-12-31 21:00:00.000000000 -0300
+++ ruby1.9.1-1.9.3.194/debian/patches/20121120-cve-2012-5371.diff	2012-11-22 09:43:30.000000000 -0300
@@ -0,0 +1,637 @@
+Description: replace hash implementation to avoid DOS attacks
+ This patch fixes CVE-2012-5371
+Bug-Debian: http://bugs.debian.org/693024
+Origin: upstream, https://github.com/ruby/ruby/commit/5e45af463cca6f062a986d5e686350e17ea653bb
+Backported-By: James Healy <james@yob.id.au>
+Reviewed-By: Antonio Terceiro <terceiro@debian.org>
+
+diff --git a/common.mk b/common.mk
+index eb89a2b..8ce77cb 100644
+--- a/common.mk
++++ b/common.mk
+@@ -663,7 +663,8 @@ proc.$(OBJEXT): {$(VPATH)}proc.c {$(VPATH)}eval_intern.h \
+ process.$(OBJEXT): {$(VPATH)}process.c $(RUBY_H_INCLUDES) \
+   {$(VPATH)}util.h {$(VPATH)}io.h $(ENCODING_H_INCLUDES) {$(VPATH)}dln.h \
+   $(VM_CORE_H_INCLUDES) {$(VPATH)}debug.h {$(VPATH)}internal.h
+-random.$(OBJEXT): {$(VPATH)}random.c $(RUBY_H_INCLUDES)
++random.$(OBJEXT): {$(VPATH)}random.c $(RUBY_H_INCLUDES) \
++  {$(VPATH)}siphash.c {$(VPATH)}siphash.h
+ range.$(OBJEXT): {$(VPATH)}range.c $(RUBY_H_INCLUDES) \
+   $(ENCODING_H_INCLUDES) {$(VPATH)}internal.h
+ rational.$(OBJEXT): {$(VPATH)}rational.c $(RUBY_H_INCLUDES) {$(VPATH)}internal.h
+diff --git a/random.c b/random.c
+index 873f9ae..2217c81 100644
+--- a/random.c
++++ b/random.c
+@@ -1258,7 +1258,15 @@ random_s_rand(int argc, VALUE *argv, VALUE obj)
+     return random_rand(argc, argv, rb_Random_DEFAULT);
+ }
+ 
++#define SIP_HASH_STREAMING 0
++#define sip_hash24 ruby_sip_hash24
++#include "siphash.c"
++
+ static st_index_t hashseed;
++static union {
++    uint8_t key[16];
++    uint32_t u32[(16 * sizeof(uint8_t) - 1) / sizeof(uint32_t)];
++} sipseed;
+ 
+ static VALUE
+ init_randomseed(struct MT *mt, unsigned int initial[DEFAULT_SEED_CNT])
+@@ -1278,6 +1286,7 @@ Init_RandomSeed(void)
+     unsigned int initial[DEFAULT_SEED_CNT];
+     struct MT *mt = &r->mt;
+     VALUE seed = init_randomseed(mt, initial);
++    int i;
+ 
+     hashseed = genrand_int32(mt);
+ #if SIZEOF_ST_INDEX_T*CHAR_BIT > 4*8
+@@ -1293,6 +1302,9 @@ Init_RandomSeed(void)
+     hashseed |= genrand_int32(mt);
+ #endif
+ 
++    for (i = 0; i < numberof(sipseed.u32); ++i)
++	sipseed.u32[i] = genrand_int32(mt);
++
+     rb_global_variable(&r->seed);
+     r->seed = seed;
+ }
+@@ -1303,6 +1315,17 @@ rb_hash_start(st_index_t h)
+     return st_hash_start(hashseed + h);
+ }
+ 
++st_index_t
++rb_memhash(const void *ptr, long len)
++{
++    sip_uint64_t h = sip_hash24(sipseed.key, ptr, len);
++#ifdef HAVE_UINT64_T
++    return (st_index_t)h;
++#else
++    return (st_index_t)(h.u32[0] ^ h.u32[1]);
++#endif
++}
++
+ static void
+ Init_RandomSeed2(void)
+ {
+diff --git a/siphash.c b/siphash.c
+new file mode 100644
+index 0000000..c100b14
+--- /dev/null
++++ b/siphash.c
+@@ -0,0 +1,483 @@
++#include <string.h>
++#include <stdio.h>
++#include "siphash.h"
++#ifndef SIP_HASH_STREAMING
++  #define SIP_HASH_STREAMING 1
++#endif
++
++#ifdef _WIN32
++  #define BYTE_ORDER __LITTLE_ENDIAN
++#elif !defined BYTE_ORDER
++  #include <endian.h>
++#endif
++#ifndef LITTLE_ENDIAN
++#define LITTLE_ENDIAN __LITTLE_ENDIAN
++#endif
++#ifndef BIG_ENDIAN
++#define BIG_ENDIAN __BIG_ENDIAN
++#endif
++
++#if BYTE_ORDER == LITTLE_ENDIAN
++  #define lo u32[0]
++  #define hi u32[1]
++#elif BYTE_ORDER == BIG_ENDIAN
++  #define hi u32[0]
++  #define lo u32[1]
++#else
++  #error "Only strictly little or big endian supported"
++#endif
++
++#ifndef UNALIGNED_WORD_ACCESS
++# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
++     defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD86) || \
++     defined(__mc68020__)
++#   define UNALIGNED_WORD_ACCESS 1
++# endif
++#endif
++#ifndef UNALIGNED_WORD_ACCESS
++# define UNALIGNED_WORD_ACCESS 0
++#endif
++
++#define U8TO32_LE(p)         						\
++    (((uint32_t)((p)[0])       ) | ((uint32_t)((p)[1]) <<  8) |  	\
++     ((uint32_t)((p)[2]) <<  16) | ((uint32_t)((p)[3]) << 24))		\
++
++#define U32TO8_LE(p, v)			\
++do {					\
++    (p)[0] = (uint8_t)((v)      );	\
++    (p)[1] = (uint8_t)((v) >>  8); 	\
++    (p)[2] = (uint8_t)((v) >> 16);	\
++    (p)[3] = (uint8_t)((v) >> 24);	\
++} while (0)
++
++#ifdef HAVE_UINT64_T
++#define U8TO64_LE(p) 							\
++    ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 )
++
++#define U64TO8_LE(p, v) \
++do {						\
++    U32TO8_LE((p),     (uint32_t)((v)      )); 	\
++    U32TO8_LE((p) + 4, (uint32_t)((v) >> 32));	\
++} while (0)
++
++#define ROTL64(v, s)			\
++    ((v) << (s)) | ((v) >> (64 - (s)))
++
++#define ROTL64_TO(v, s) ((v) = ROTL64((v), (s)))
++
++#define ADD64_TO(v, s) ((v) += (s))
++#define XOR64_TO(v, s) ((v) ^= (s))
++#define XOR64_INT(v, x) ((v) ^= (x))
++#else
++#define U8TO64_LE(p) u8to64_le(p)
++static inline uint64_t
++u8to64_le(const uint8_t *p)
++{
++    uint64_t ret;
++    ret.lo = U8TO32_LE(p);
++    ret.hi = U8TO32_LE(p + 4);
++    return ret;
++}
++
++#define U64TO8_LE(p, v) u64to8_le(p, v)
++static inline void
++u64to8_le(uint8_t *p, uint64_t v)
++{
++    U32TO8_LE(p,     v.lo);
++    U32TO8_LE(p + 4, v.hi);
++}
++
++#define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \
++			 (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s)))
++static inline uint64_t *
++rotl64_to(uint64_t *v, unsigned int s)
++{
++    uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s));
++    uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s));
++    v->hi = uhi;
++    v->lo = ulo;
++    return v;
++}
++
++static inline uint64_t *
++rotl64_swap(uint64_t *v)
++{
++    uint32_t t = v->lo;
++    v->lo = v->hi;
++    v->hi = t;
++    return v;
++}
++
++#define ADD64_TO(v, s) add64_to(&(v), (s))
++static inline uint64_t *
++add64_to(uint64_t *v, const uint64_t s)
++{
++    v->lo += s.lo;
++    v->hi += s.hi;
++    if (v->lo < s.lo) v->hi++;
++    return v;
++}
++
++#define XOR64_TO(v, s) xor64_to(&(v), (s))
++static inline uint64_t *
++xor64_to(uint64_t *v, const uint64_t s)
++{
++    v->lo ^= s.lo;
++    v->hi ^= s.hi;
++    return v;
++}
++
++#define XOR64_INT(v, x) ((v).lo ^= (x))
++#endif
++
++static const union {
++    char bin[32];
++    uint64_t u64[4];
++} sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"};
++#define sip_init_state sip_init_state_bin.u64
++
++#if SIP_HASH_STREAMING
++struct sip_interface_st {
++    void (*init)(sip_state *s, const uint8_t *key);
++    void (*update)(sip_state *s, const uint8_t *data, size_t len);
++    void (*final)(sip_state *s, uint64_t *digest);
++};
++
++static void int_sip_init(sip_state *state, const uint8_t *key);
++static void int_sip_update(sip_state *state, const uint8_t *data, size_t len);
++static void int_sip_final(sip_state *state, uint64_t *digest);
++
++static const sip_interface sip_methods = {
++    int_sip_init,
++    int_sip_update,
++    int_sip_final
++};
++#endif /* SIP_HASH_STREAMING */
++
++#define SIP_COMPRESS(v0, v1, v2, v3)	\
++do {					\
++    ADD64_TO((v0), (v1));		\
++    ADD64_TO((v2), (v3));		\
++    ROTL64_TO((v1), 13);		\
++    ROTL64_TO((v3), 16);		\
++    XOR64_TO((v1), (v0));		\
++    XOR64_TO((v3), (v2));		\
++    ROTL64_TO((v0), 32);		\
++    ADD64_TO((v2), (v1));		\
++    ADD64_TO((v0), (v3));		\
++    ROTL64_TO((v1), 17);		\
++    ROTL64_TO((v3), 21);		\
++    XOR64_TO((v1), (v2));		\
++    XOR64_TO((v3), (v0));		\
++    ROTL64_TO((v2), 32);		\
++} while(0)
++
++#if SIP_HASH_STREAMING
++static void
++int_sip_dump(sip_state *state)
++{
++    int v;
++
++    for (v = 0; v < 4; v++) {
++#if HAVE_UINT64_T
++	printf("v%d: %" PRIx64 "\n", v, state->v[v]);
++#else
++	printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo);
++#endif
++    }
++}
++
++static void
++int_sip_init(sip_state *state, const uint8_t key[16])
++{
++    uint64_t k0, k1;
++
++    k0 = U8TO64_LE(key);
++    k1 = U8TO64_LE(key + sizeof(uint64_t));
++
++    state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]);
++    state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]);
++    state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]);
++    state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]);
++}
++
++static inline void
++int_sip_round(sip_state *state, int n)
++{
++    int i;
++
++    for (i = 0; i < n; i++) {
++	SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]);
++    }
++}
++
++static inline void
++int_sip_update_block(sip_state *state, uint64_t m)
++{
++    XOR64_TO(state->v[3], m);
++    int_sip_round(state, state->c);
++    XOR64_TO(state->v[0], m);
++}
++
++static inline void
++int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen)
++{
++    int to_read;
++    uint64_t m;
++
++    if (!state->buflen) return;
++
++    to_read = sizeof(uint64_t) - state->buflen;
++    memcpy(state->buf + state->buflen, *pdata, to_read);
++    m = U8TO64_LE(state->buf);
++    int_sip_update_block(state, m);
++    *pdata += to_read;
++    *plen -= to_read;
++    state->buflen = 0;
++}
++
++static inline void
++int_sip_post_update(sip_state *state, const uint8_t *data, size_t len)
++{
++    uint8_t r = len % sizeof(uint64_t);
++    if (r) {
++	memcpy(state->buf, data + len - r, r);
++	state->buflen = r;
++    }
++}
++
++static void
++int_sip_update(sip_state *state, const uint8_t *data, size_t len)
++{
++    uint64_t *end;
++    uint64_t *data64;
++
++    state->msglen_byte = state->msglen_byte + (len % 256);
++    data64 = (uint64_t *) data;
++
++    int_sip_pre_update(state, &data, &len);
++
++    end = data64 + (len / sizeof(uint64_t));
++
++#if BYTE_ORDER == LITTLE_ENDIAN
++    while (data64 != end) {
++	int_sip_update_block(state, *data64++);
++    }
++#elif BYTE_ORDER == BIG_ENDIAN
++    {
++	uint64_t m;
++	uint8_t *data8 = data;
++	for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) {
++	    m = U8TO64_LE(data8);
++	    int_sip_update_block(state, m);
++	}
++    }
++#endif
++
++    int_sip_post_update(state, data, len);
++}
++
++static inline void
++int_sip_pad_final_block(sip_state *state)
++{
++    int i;
++    /* pad with 0's and finalize with msg_len mod 256 */
++    for (i = state->buflen; i < sizeof(uint64_t); i++) {
++	state->buf[i] = 0x00;
++    }
++    state->buf[sizeof(uint64_t) - 1] = state->msglen_byte;
++}
++
++static void
++int_sip_final(sip_state *state, uint64_t *digest)
++{
++    uint64_t m;
++
++    int_sip_pad_final_block(state);
++
++    m = U8TO64_LE(state->buf);
++    int_sip_update_block(state, m);
++
++    XOR64_INT(state->v[2], 0xff);
++
++    int_sip_round(state, state->d);
++
++    *digest = state->v[0];
++    XOR64_TO(*digest, state->v[1]);
++    XOR64_TO(*digest, state->v[2]);
++    XOR64_TO(*digest, state->v[3]);
++}
++
++sip_hash *
++sip_hash_new(const uint8_t key[16], int c, int d)
++{
++    sip_hash *h = NULL;
++
++    if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL;
++    return sip_hash_init(h, key, c, d);
++}
++
++sip_hash *
++sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d)
++{
++    h->state->c = c;
++    h->state->d = d;
++    h->state->buflen = 0;
++    h->state->msglen_byte = 0;
++    h->methods = &sip_methods;
++    h->methods->init(h->state, key);
++    return h;
++}
++
++int
++sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len)
++{
++    h->methods->update(h->state, msg, len);
++    return 1;
++}
++
++int
++sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len)
++{
++    uint64_t digest64;
++    uint8_t *ret;
++
++    h->methods->final(h->state, &digest64);
++    if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0;
++    U64TO8_LE(ret, digest64);
++    *len = sizeof(uint64_t);
++    *digest = ret;
++
++    return 1;
++}
++
++int
++sip_hash_final_integer(sip_hash *h, uint64_t *digest)
++{
++    h->methods->final(h->state, digest);
++    return 1;
++}
++
++int
++sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len)
++{
++    if (!sip_hash_update(h, data, data_len)) return 0;
++    return sip_hash_final(h, digest, digest_len);
++}
++
++int
++sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest)
++{
++    if (!sip_hash_update(h, data, data_len)) return 0;
++    return sip_hash_final_integer(h, digest);
++}
++
++void
++sip_hash_free(sip_hash *h)
++{
++    free(h);
++}
++
++void
++sip_hash_dump(sip_hash *h)
++{
++    int_sip_dump(h->state);
++}
++#endif /* SIP_HASH_STREAMING */
++
++#define SIP_2_ROUND(m, v0, v1, v2, v3)	\
++do {					\
++    XOR64_TO((v3), (m));		\
++    SIP_COMPRESS(v0, v1, v2, v3);	\
++    SIP_COMPRESS(v0, v1, v2, v3);	\
++    XOR64_TO((v0), (m));		\
++} while (0)
++
++uint64_t
++sip_hash24(const uint8_t key[16], const uint8_t *data, size_t len)
++{
++    uint64_t k0, k1;
++    uint64_t v0, v1, v2, v3;
++    uint64_t m, last;
++    const uint8_t *end = data + len - (len % sizeof(uint64_t));
++
++    k0 = U8TO64_LE(key);
++    k1 = U8TO64_LE(key + sizeof(uint64_t));
++
++    v0 = k0; XOR64_TO(v0, sip_init_state[0]);
++    v1 = k1; XOR64_TO(v1, sip_init_state[1]);
++    v2 = k0; XOR64_TO(v2, sip_init_state[2]);
++    v3 = k1; XOR64_TO(v3, sip_init_state[3]);
++
++#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
++    {
++        uint64_t *data64 = (uint64_t *)data;
++        while (data64 != (uint64_t *) end) {
++	    m = *data64++;
++	    SIP_2_ROUND(m, v0, v1, v2, v3);
++        }
++    }
++#elif BYTE_ORDER == BIG_ENDIAN
++    for (; data != end; data += sizeof(uint64_t)) {
++	m = U8TO64_LE(data);
++	SIP_2_ROUND(m, v0, v1, v2, v3);
++    }
++#endif
++
++#ifdef HAVE_UINT64_T
++    last = (uint64_t)len << 56;
++#define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8))
++#else
++    last.hi = len << 24;
++    last.lo = 0;
++#define OR_BYTE(n) do { \
++	if (n >= 4) \
++	    last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \
++	else \
++	    last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \
++    } while (0)
++#endif
++
++    switch (len % sizeof(uint64_t)) {
++	case 7:
++	    OR_BYTE(6);
++	case 6:
++	    OR_BYTE(5);
++	case 5:
++	    OR_BYTE(4);
++	case 4:
++#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
++  #if HAVE_UINT64_T
++	    last |= (uint64_t) ((uint32_t *) end)[0];
++  #else
++	    last.lo |= ((uint32_t *) end)[0];
++  #endif
++	    break;
++#elif BYTE_ORDER == BIG_ENDIAN
++	    OR_BYTE(3);
++#endif
++	case 3:
++	    OR_BYTE(2);
++	case 2:
++	    OR_BYTE(1);
++	case 1:
++	    OR_BYTE(0);
++	    break;
++	case 0:
++	    break;
++    }
++
++    SIP_2_ROUND(last, v0, v1, v2, v3);
++
++    XOR64_INT(v2, 0xff);
++
++    SIP_COMPRESS(v0, v1, v2, v3);
++    SIP_COMPRESS(v0, v1, v2, v3);
++    SIP_COMPRESS(v0, v1, v2, v3);
++    SIP_COMPRESS(v0, v1, v2, v3);
++
++    XOR64_TO(v0, v1);
++    XOR64_TO(v0, v2);
++    XOR64_TO(v0, v3);
++    return v0;
++}
+diff --git a/siphash.h b/siphash.h
+new file mode 100644
+index 0000000..3f39884
+--- /dev/null
++++ b/siphash.h
+@@ -0,0 +1,48 @@
++#ifndef SIPHASH_H
++#define SIPHASH_H 1
++#include <stdlib.h>
++#ifdef HAVE_STDINT_H
++#include <stdint.h>
++#endif
++#ifdef HAVE_INTTYPES_H
++#include <inttypes.h>
++#endif
++
++#ifndef HAVE_UINT64_T
++typedef struct {
++    uint32_t u32[2];
++} sip_uint64_t;
++#define uint64_t sip_uint64_t
++#else
++typedef uint64_t sip_uint64_t;
++#endif
++
++typedef struct {
++    int c;
++    int d;
++    uint64_t v[4];
++    uint8_t buf[sizeof(uint64_t)];
++    uint8_t buflen;
++    uint8_t msglen_byte;
++} sip_state;
++
++typedef struct sip_interface_st sip_interface;
++
++typedef struct {
++    sip_state state[1];
++    const sip_interface *methods;
++} sip_hash;
++
++sip_hash *sip_hash_new(const uint8_t key[16], int c, int d);
++sip_hash *sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d);
++int sip_hash_update(sip_hash *h, const uint8_t *data, size_t len);
++int sip_hash_final(sip_hash *h, uint8_t **digest, size_t *len);
++int sip_hash_final_integer(sip_hash *h, uint64_t *digest);
++int sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len);
++int sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest);
++void sip_hash_free(sip_hash *h);
++void sip_hash_dump(sip_hash *h);
++
++uint64_t sip_hash24(const uint8_t key[16], const uint8_t *data, size_t len);
++
++#endif
+diff --git a/string.c b/string.c
+index c770215..0138dce 100644
+--- a/string.c
++++ b/string.c
+@@ -2161,12 +2161,6 @@ rb_str_prepend(VALUE str, VALUE str2)
+ }
+ 
+ st_index_t
+-rb_memhash(const void *ptr, long len)
+-{
+-    return st_hash(ptr, len, rb_hash_start((st_index_t)len));
+-}
+-
+-st_index_t
+ rb_str_hash(VALUE str)
+ {
+     int e = ENCODING_GET(str);
diff -Nru ruby1.9.1-1.9.3.194/debian/patches/series ruby1.9.1-1.9.3.194/debian/patches/series
--- ruby1.9.1-1.9.3.194/debian/patches/series	2012-10-16 10:24:26.000000000 -0300
+++ ruby1.9.1-1.9.3.194/debian/patches/series	2012-11-21 23:39:33.000000000 -0300
@@ -18,3 +18,4 @@
 20120517-r35434.patch
 20120927-cve_2011_1005.patch
 CVE-2012-4522.patch
+20121120-cve-2012-5371.diff
diff -Nru ruby1.9.1-1.9.3.194/debian/rules ruby1.9.1-1.9.3.194/debian/rules
--- ruby1.9.1-1.9.3.194/debian/rules	2012-09-30 17:40:36.000000000 -0300
+++ ruby1.9.1-1.9.3.194/debian/rules	2012-11-25 19:05:30.000000000 -0300
@@ -126,7 +126,7 @@
 ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
 # Disable test suites on some broken architectures.
 # See http://lists.debian.org/debian-release/2011/10/msg00279.html
-ifeq (,$(filter $(DEB_BUILD_ARCH),ia64 kfreebsd-i386 kfreebsd-amd64))
+ifeq (,$(filter $(DEB_BUILD_ARCH),ia64 kfreebsd-i386 kfreebsd-amd64 sparc))
 	mv test/drb _test_drb
 	debian/run-test-suites.bash
 	mv _test_drb test/drb

Attachment: signature.asc
Description: Digital signature


--- End Message ---
--- Begin Message ---
On 28.11.2012 14:31, Antonio Terceiro wrote:
Please unblock package ruby1.9.1

This version disables the test suite on sparc, which seems to fail
depending on the specific buildd the build goes to (see
https://lists.debian.org/debian-ruby/2012/11/msg00064.html).

Hint updated.

Regards,

Adam

--- End Message ---

Reply to: