mirror of https://github.com/redis/redis.git
parent
94751543b0
commit
a90b0f99ce
|
@ -28,7 +28,7 @@ This is never upgraded since it's part of the Redis project. If there are change
|
||||||
Hiredis
|
Hiredis
|
||||||
---
|
---
|
||||||
|
|
||||||
Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is adviced to take a lot of care:
|
Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care:
|
||||||
|
|
||||||
1. Check with diff if hiredis API changed and what impact it could have in Redis.
|
1. Check with diff if hiredis API changed and what impact it could have in Redis.
|
||||||
2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible.
|
2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible.
|
||||||
|
|
|
@ -637,7 +637,7 @@ slave-priority 100
|
||||||
# it with the specified string.
|
# it with the specified string.
|
||||||
# 4) During replication, when a slave performs a full resynchronization with
|
# 4) During replication, when a slave performs a full resynchronization with
|
||||||
# its master, the content of the whole database is removed in order to
|
# its master, the content of the whole database is removed in order to
|
||||||
# load the RDB file just transfered.
|
# load the RDB file just transferred.
|
||||||
#
|
#
|
||||||
# In all the above cases the default is to delete objects in a blocking way,
|
# In all the above cases the default is to delete objects in a blocking way,
|
||||||
# like if DEL was called. However you can configure each case specifically
|
# like if DEL was called. However you can configure each case specifically
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
* atomicDecr(var,count,mutex) -- Decrement the atomic counter
|
* atomicDecr(var,count,mutex) -- Decrement the atomic counter
|
||||||
* atomicGet(var,dstvar,mutex) -- Fetch the atomic counter value
|
* atomicGet(var,dstvar,mutex) -- Fetch the atomic counter value
|
||||||
*
|
*
|
||||||
* If atomic primitives are availble (tested in config.h) the mutex
|
* If atomic primitives are available (tested in config.h) the mutex
|
||||||
* is not used.
|
* is not used.
|
||||||
*
|
*
|
||||||
* Never use return value from the macros. To update and get use instead:
|
* Never use return value from the macros. To update and get use instead:
|
||||||
|
|
|
@ -918,7 +918,7 @@ void bitfieldCommand(client *c) {
|
||||||
struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */
|
struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */
|
||||||
int owtype = BFOVERFLOW_WRAP; /* Overflow type. */
|
int owtype = BFOVERFLOW_WRAP; /* Overflow type. */
|
||||||
int readonly = 1;
|
int readonly = 1;
|
||||||
size_t higest_write_offset = 0;
|
size_t highest_write_offset = 0;
|
||||||
|
|
||||||
for (j = 2; j < c->argc; j++) {
|
for (j = 2; j < c->argc; j++) {
|
||||||
int remargs = c->argc-j-1; /* Remaining args other than current. */
|
int remargs = c->argc-j-1; /* Remaining args other than current. */
|
||||||
|
@ -968,8 +968,8 @@ void bitfieldCommand(client *c) {
|
||||||
|
|
||||||
if (opcode != BITFIELDOP_GET) {
|
if (opcode != BITFIELDOP_GET) {
|
||||||
readonly = 0;
|
readonly = 0;
|
||||||
if (higest_write_offset < bitoffset + bits - 1)
|
if (highest_write_offset < bitoffset + bits - 1)
|
||||||
higest_write_offset = bitoffset + bits - 1;
|
highest_write_offset = bitoffset + bits - 1;
|
||||||
/* INCRBY and SET require another argument. */
|
/* INCRBY and SET require another argument. */
|
||||||
if (getLongLongFromObjectOrReply(c,c->argv[j+3],&i64,NULL) != C_OK){
|
if (getLongLongFromObjectOrReply(c,c->argv[j+3],&i64,NULL) != C_OK){
|
||||||
zfree(ops);
|
zfree(ops);
|
||||||
|
@ -999,7 +999,7 @@ void bitfieldCommand(client *c) {
|
||||||
/* Lookup by making room up to the farest bit reached by
|
/* Lookup by making room up to the farest bit reached by
|
||||||
* this operation. */
|
* this operation. */
|
||||||
if ((o = lookupStringForBitCommand(c,
|
if ((o = lookupStringForBitCommand(c,
|
||||||
higest_write_offset)) == NULL) return;
|
highest_write_offset)) == NULL) return;
|
||||||
}
|
}
|
||||||
|
|
||||||
addReplyMultiBulkLen(c,numops);
|
addReplyMultiBulkLen(c,numops);
|
||||||
|
|
|
@ -2920,7 +2920,7 @@ void clusterHandleSlaveFailover(void) {
|
||||||
(unsigned long long) myself->configEpoch);
|
(unsigned long long) myself->configEpoch);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Take responsability for the cluster slots. */
|
/* Take responsibility for the cluster slots. */
|
||||||
clusterFailoverReplaceYourMaster();
|
clusterFailoverReplaceYourMaster();
|
||||||
} else {
|
} else {
|
||||||
clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES);
|
clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES);
|
||||||
|
@ -2975,7 +2975,7 @@ void clusterHandleSlaveMigration(int max_slaves) {
|
||||||
* masters with the greatest number of ok slaves, I'm the one with the
|
* masters with the greatest number of ok slaves, I'm the one with the
|
||||||
* smallest node ID (the "candidate slave").
|
* smallest node ID (the "candidate slave").
|
||||||
*
|
*
|
||||||
* Note: this means that eventually a replica migration will occurr
|
* Note: this means that eventually a replica migration will occur
|
||||||
* since slaves that are reachable again always have their FAIL flag
|
* since slaves that are reachable again always have their FAIL flag
|
||||||
* cleared, so eventually there must be a candidate. At the same time
|
* cleared, so eventually there must be a candidate. At the same time
|
||||||
* this does not mean that there are no race conditions possible (two
|
* this does not mean that there are no race conditions possible (two
|
||||||
|
|
2
src/db.c
2
src/db.c
|
@ -296,7 +296,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
|
||||||
* If callback is given the function is called from time to time to
|
* If callback is given the function is called from time to time to
|
||||||
* signal that work is in progress.
|
* signal that work is in progress.
|
||||||
*
|
*
|
||||||
* The dbnum can be -1 if all teh DBs should be flushed, or the specified
|
* The dbnum can be -1 if all the DBs should be flushed, or the specified
|
||||||
* DB number if we want to flush only a single Redis database number.
|
* DB number if we want to flush only a single Redis database number.
|
||||||
*
|
*
|
||||||
* Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or
|
* Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or
|
||||||
|
|
|
@ -1012,7 +1012,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
|
||||||
"Redis %s crashed by signal: %d", REDIS_VERSION, sig);
|
"Redis %s crashed by signal: %d", REDIS_VERSION, sig);
|
||||||
if (eip != NULL) {
|
if (eip != NULL) {
|
||||||
serverLog(LL_WARNING,
|
serverLog(LL_WARNING,
|
||||||
"Crashed running the instuction at: %p", eip);
|
"Crashed running the instruction at: %p", eip);
|
||||||
}
|
}
|
||||||
if (sig == SIGSEGV || sig == SIGBUS) {
|
if (sig == SIGSEGV || sig == SIGBUS) {
|
||||||
serverLog(LL_WARNING,
|
serverLog(LL_WARNING,
|
||||||
|
|
|
@ -253,7 +253,7 @@ double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) {
|
||||||
* moved. Return value is the the dictEntry if found, or NULL if not found.
|
* moved. Return value is the the dictEntry if found, or NULL if not found.
|
||||||
* NOTE: this is very ugly code, but it let's us avoid the complication of
|
* NOTE: this is very ugly code, but it let's us avoid the complication of
|
||||||
* doing a scan on another dict. */
|
* doing a scan on another dict. */
|
||||||
dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, unsigned int hash, int *defragged) {
|
dictEntry* replaceSatelliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, unsigned int hash, int *defragged) {
|
||||||
dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash);
|
dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash);
|
||||||
if (deref) {
|
if (deref) {
|
||||||
dictEntry *de = *deref;
|
dictEntry *de = *deref;
|
||||||
|
@ -290,7 +290,7 @@ int defragKey(redisDb *db, dictEntry *de) {
|
||||||
* I can't search in db->expires for that key after i already released
|
* I can't search in db->expires for that key after i already released
|
||||||
* the pointer it holds it won't be able to do the string compare */
|
* the pointer it holds it won't be able to do the string compare */
|
||||||
unsigned int hash = dictGetHash(db->dict, de->key);
|
unsigned int hash = dictGetHash(db->dict, de->key);
|
||||||
replaceSateliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged);
|
replaceSatelliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try to defrag robj and / or string value. */
|
/* Try to defrag robj and / or string value. */
|
||||||
|
@ -558,7 +558,7 @@ void activeDefragCycle(void) {
|
||||||
cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db);
|
cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db);
|
||||||
/* Once in 16 scan iterations, or 1000 pointer reallocations
|
/* Once in 16 scan iterations, or 1000 pointer reallocations
|
||||||
* (if we have a lot of pointers in one hash bucket), check if we
|
* (if we have a lot of pointers in one hash bucket), check if we
|
||||||
* reached the tiem limit. */
|
* reached the time limit. */
|
||||||
if (cursor && (++iterations > 16 || server.stat_active_defrag_hits - defragged > 1000)) {
|
if (cursor && (++iterations > 16 || server.stat_active_defrag_hits - defragged > 1000)) {
|
||||||
if ((ustime() - start) > timelimit) {
|
if ((ustime() - start) > timelimit) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -327,7 +327,7 @@ int dictReplace(dict *d, void *key, void *val)
|
||||||
dictEntry *entry, *existing, auxentry;
|
dictEntry *entry, *existing, auxentry;
|
||||||
|
|
||||||
/* Try to add the element. If the key
|
/* Try to add the element. If the key
|
||||||
* does not exists dictAdd will suceed. */
|
* does not exists dictAdd will succeed. */
|
||||||
entry = dictAddRaw(d,key,&existing);
|
entry = dictAddRaw(d,key,&existing);
|
||||||
if (entry) {
|
if (entry) {
|
||||||
dictSetVal(d, entry, val);
|
dictSetVal(d, entry, val);
|
||||||
|
|
|
@ -43,7 +43,7 @@ uint16_t intrev16(uint16_t v);
|
||||||
uint32_t intrev32(uint32_t v);
|
uint32_t intrev32(uint32_t v);
|
||||||
uint64_t intrev64(uint64_t v);
|
uint64_t intrev64(uint64_t v);
|
||||||
|
|
||||||
/* variants of the function doing the actual convertion only if the target
|
/* variants of the function doing the actual conversion only if the target
|
||||||
* host is big endian */
|
* host is big endian */
|
||||||
#if (BYTE_ORDER == LITTLE_ENDIAN)
|
#if (BYTE_ORDER == LITTLE_ENDIAN)
|
||||||
#define memrev16ifbe(p)
|
#define memrev16ifbe(p)
|
||||||
|
|
|
@ -879,7 +879,7 @@ promote: /* Promote to dense representation. */
|
||||||
*
|
*
|
||||||
* Note that this in turn means that PFADD will make sure the command
|
* Note that this in turn means that PFADD will make sure the command
|
||||||
* is propagated to slaves / AOF, so if there is a sparse -> dense
|
* is propagated to slaves / AOF, so if there is a sparse -> dense
|
||||||
* convertion, it will be performed in all the slaves as well. */
|
* conversion, it will be performed in all the slaves as well. */
|
||||||
int dense_retval = hllDenseAdd(hdr->registers, ele, elesize);
|
int dense_retval = hllDenseAdd(hdr->registers, ele, elesize);
|
||||||
serverAssert(dense_retval == 1);
|
serverAssert(dense_retval == 1);
|
||||||
return dense_retval;
|
return dense_retval;
|
||||||
|
|
|
@ -3306,7 +3306,7 @@ void moduleInitModulesSystem(void) {
|
||||||
* because the server must be fully initialized before loading modules.
|
* because the server must be fully initialized before loading modules.
|
||||||
*
|
*
|
||||||
* The function aborts the server on errors, since to start with missing
|
* The function aborts the server on errors, since to start with missing
|
||||||
* modules is not considered sane: clients may rely on the existance of
|
* modules is not considered sane: clients may rely on the existence of
|
||||||
* given commands, loading AOF also may need some modules to exist, and
|
* given commands, loading AOF also may need some modules to exist, and
|
||||||
* if this instance is a slave, it must understand commands from master. */
|
* if this instance is a slave, it must understand commands from master. */
|
||||||
void moduleLoadFromQueue(void) {
|
void moduleLoadFromQueue(void) {
|
||||||
|
|
|
@ -1636,7 +1636,7 @@ int quicklistTest(int argc, char *argv[]) {
|
||||||
TEST("add to tail of empty list") {
|
TEST("add to tail of empty list") {
|
||||||
quicklist *ql = quicklistNew(-2, options[_i]);
|
quicklist *ql = quicklistNew(-2, options[_i]);
|
||||||
quicklistPushTail(ql, "hello", 6);
|
quicklistPushTail(ql, "hello", 6);
|
||||||
/* 1 for head and 1 for tail beacuse 1 node = head = tail */
|
/* 1 for head and 1 for tail because 1 node = head = tail */
|
||||||
ql_verify(ql, 1, 1, 1, 1);
|
ql_verify(ql, 1, 1, 1, 1);
|
||||||
quicklistRelease(ql);
|
quicklistRelease(ql);
|
||||||
}
|
}
|
||||||
|
@ -1644,7 +1644,7 @@ int quicklistTest(int argc, char *argv[]) {
|
||||||
TEST("add to head of empty list") {
|
TEST("add to head of empty list") {
|
||||||
quicklist *ql = quicklistNew(-2, options[_i]);
|
quicklist *ql = quicklistNew(-2, options[_i]);
|
||||||
quicklistPushHead(ql, "hello", 6);
|
quicklistPushHead(ql, "hello", 6);
|
||||||
/* 1 for head and 1 for tail beacuse 1 node = head = tail */
|
/* 1 for head and 1 for tail because 1 node = head = tail */
|
||||||
ql_verify(ql, 1, 1, 1, 1);
|
ql_verify(ql, 1, 1, 1, 1);
|
||||||
quicklistRelease(ql);
|
quicklistRelease(ql);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2051,15 +2051,15 @@ static void getKeySizes(redisReply *keys, int *types,
|
||||||
keys->element[i]->str);
|
keys->element[i]->str);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Retreive sizes */
|
/* Retrieve sizes */
|
||||||
for(i=0;i<keys->elements;i++) {
|
for(i=0;i<keys->elements;i++) {
|
||||||
/* Skip keys that dissapeared between SCAN and TYPE */
|
/* Skip keys that disappeared between SCAN and TYPE */
|
||||||
if(types[i] == TYPE_NONE) {
|
if(types[i] == TYPE_NONE) {
|
||||||
sizes[i] = 0;
|
sizes[i] = 0;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Retreive size */
|
/* Retrieve size */
|
||||||
if(redisGetReply(context, (void**)&reply)!=REDIS_OK) {
|
if(redisGetReply(context, (void**)&reply)!=REDIS_OK) {
|
||||||
fprintf(stderr, "Error getting size for key '%s' (%d: %s)\n",
|
fprintf(stderr, "Error getting size for key '%s' (%d: %s)\n",
|
||||||
keys->element[i]->str, context->err, context->errstr);
|
keys->element[i]->str, context->err, context->errstr);
|
||||||
|
@ -2129,7 +2129,7 @@ static void findBigKeys(void) {
|
||||||
arrsize = keys->elements;
|
arrsize = keys->elements;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Retreive types and then sizes */
|
/* Retrieve types and then sizes */
|
||||||
getKeyTypes(keys, types);
|
getKeyTypes(keys, types);
|
||||||
getKeySizes(keys, types, sizes);
|
getKeySizes(keys, types, sizes);
|
||||||
|
|
||||||
|
|
|
@ -454,7 +454,7 @@ class RedisTrib
|
||||||
|
|
||||||
# Handle case "1": keys in no node.
|
# Handle case "1": keys in no node.
|
||||||
if none.length > 0
|
if none.length > 0
|
||||||
xputs "The folowing uncovered slots have no keys across the cluster:"
|
xputs "The following uncovered slots have no keys across the cluster:"
|
||||||
xputs none.keys.join(",")
|
xputs none.keys.join(",")
|
||||||
yes_or_die "Fix these slots by covering with a random node?"
|
yes_or_die "Fix these slots by covering with a random node?"
|
||||||
none.each{|slot,nodes|
|
none.each{|slot,nodes|
|
||||||
|
@ -466,7 +466,7 @@ class RedisTrib
|
||||||
|
|
||||||
# Handle case "2": keys only in one node.
|
# Handle case "2": keys only in one node.
|
||||||
if single.length > 0
|
if single.length > 0
|
||||||
xputs "The folowing uncovered slots have keys in just one node:"
|
xputs "The following uncovered slots have keys in just one node:"
|
||||||
puts single.keys.join(",")
|
puts single.keys.join(",")
|
||||||
yes_or_die "Fix these slots by covering with those nodes?"
|
yes_or_die "Fix these slots by covering with those nodes?"
|
||||||
single.each{|slot,nodes|
|
single.each{|slot,nodes|
|
||||||
|
@ -477,7 +477,7 @@ class RedisTrib
|
||||||
|
|
||||||
# Handle case "3": keys in multiple nodes.
|
# Handle case "3": keys in multiple nodes.
|
||||||
if multi.length > 0
|
if multi.length > 0
|
||||||
xputs "The folowing uncovered slots have keys in multiple nodes:"
|
xputs "The following uncovered slots have keys in multiple nodes:"
|
||||||
xputs multi.keys.join(",")
|
xputs multi.keys.join(",")
|
||||||
yes_or_die "Fix these slots by moving keys into a single node?"
|
yes_or_die "Fix these slots by moving keys into a single node?"
|
||||||
multi.each{|slot,nodes|
|
multi.each{|slot,nodes|
|
||||||
|
@ -1622,7 +1622,7 @@ private
|
||||||
]
|
]
|
||||||
end
|
end
|
||||||
|
|
||||||
# Turn a key name into the corrisponding Redis Cluster slot.
|
# Turn a key name into the corresponding Redis Cluster slot.
|
||||||
def key_to_slot(key)
|
def key_to_slot(key)
|
||||||
# Only hash what is inside {...} if there is such a pattern in the key.
|
# Only hash what is inside {...} if there is such a pattern in the key.
|
||||||
# Note that the specification requires the content that is between
|
# Note that the specification requires the content that is between
|
||||||
|
|
|
@ -553,7 +553,7 @@ need_full_resync:
|
||||||
* Side effects, other than starting a BGSAVE:
|
* Side effects, other than starting a BGSAVE:
|
||||||
*
|
*
|
||||||
* 1) Handle the slaves in WAIT_START state, by preparing them for a full
|
* 1) Handle the slaves in WAIT_START state, by preparing them for a full
|
||||||
* sync if the BGSAVE was succesfully started, or sending them an error
|
* sync if the BGSAVE was successfully started, or sending them an error
|
||||||
* and dropping them from the list of slaves.
|
* and dropping them from the list of slaves.
|
||||||
*
|
*
|
||||||
* 2) Flush the Lua scripting script cache if the BGSAVE was actually
|
* 2) Flush the Lua scripting script cache if the BGSAVE was actually
|
||||||
|
@ -895,7 +895,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the preamble was already transfered, send the RDB bulk data. */
|
/* If the preamble was already transferred, send the RDB bulk data. */
|
||||||
lseek(slave->repldbfd,slave->repldboff,SEEK_SET);
|
lseek(slave->repldbfd,slave->repldboff,SEEK_SET);
|
||||||
buflen = read(slave->repldbfd,buf,PROTO_IOBUF_LEN);
|
buflen = read(slave->repldbfd,buf,PROTO_IOBUF_LEN);
|
||||||
if (buflen <= 0) {
|
if (buflen <= 0) {
|
||||||
|
@ -964,7 +964,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) {
|
||||||
replicationGetSlaveName(slave));
|
replicationGetSlaveName(slave));
|
||||||
/* Note: we wait for a REPLCONF ACK message from slave in
|
/* Note: we wait for a REPLCONF ACK message from slave in
|
||||||
* order to really put it online (install the write handler
|
* order to really put it online (install the write handler
|
||||||
* so that the accumulated data can be transfered). However
|
* so that the accumulated data can be transferred). However
|
||||||
* we change the replication state ASAP, since our slave
|
* we change the replication state ASAP, since our slave
|
||||||
* is technically online now. */
|
* is technically online now. */
|
||||||
slave->replstate = SLAVE_STATE_ONLINE;
|
slave->replstate = SLAVE_STATE_ONLINE;
|
||||||
|
@ -1386,7 +1386,7 @@ char *sendSynchronousCommand(int flags, int fd, ...) {
|
||||||
*
|
*
|
||||||
* The function returns:
|
* The function returns:
|
||||||
*
|
*
|
||||||
* PSYNC_CONTINUE: If the PSYNC command succeded and we can continue.
|
* PSYNC_CONTINUE: If the PSYNC command succeeded and we can continue.
|
||||||
* PSYNC_FULLRESYNC: If PSYNC is supported but a full resync is needed.
|
* PSYNC_FULLRESYNC: If PSYNC is supported but a full resync is needed.
|
||||||
* In this case the master run_id and global replication
|
* In this case the master run_id and global replication
|
||||||
* offset is saved.
|
* offset is saved.
|
||||||
|
|
|
@ -3271,7 +3271,7 @@ void sentinelInfoCommand(client *c) {
|
||||||
addReplyBulkSds(c, info);
|
addReplyBulkSds(c, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Implements Sentinel verison of the ROLE command. The output is
|
/* Implements Sentinel version of the ROLE command. The output is
|
||||||
* "sentinel" and the list of currently monitored master names. */
|
* "sentinel" and the list of currently monitored master names. */
|
||||||
void sentinelRoleCommand(client *c) {
|
void sentinelRoleCommand(client *c) {
|
||||||
dictIterator *di;
|
dictIterator *di;
|
||||||
|
@ -3413,7 +3413,7 @@ void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) {
|
||||||
if (ri->link->cc &&
|
if (ri->link->cc &&
|
||||||
(mstime() - ri->link->cc_conn_time) >
|
(mstime() - ri->link->cc_conn_time) >
|
||||||
SENTINEL_MIN_LINK_RECONNECT_PERIOD &&
|
SENTINEL_MIN_LINK_RECONNECT_PERIOD &&
|
||||||
ri->link->act_ping_time != 0 && /* Ther is a pending ping... */
|
ri->link->act_ping_time != 0 && /* There is a pending ping... */
|
||||||
/* The pending ping is delayed, and we did not received
|
/* The pending ping is delayed, and we did not received
|
||||||
* error replies as well. */
|
* error replies as well. */
|
||||||
(mstime() - ri->link->act_ping_time) > (ri->down_after_period/2) &&
|
(mstime() - ri->link->act_ping_time) > (ri->down_after_period/2) &&
|
||||||
|
|
|
@ -193,7 +193,7 @@ void sortCommand(client *c) {
|
||||||
long limit_start = 0, limit_count = -1, start, end;
|
long limit_start = 0, limit_count = -1, start, end;
|
||||||
int j, dontsort = 0, vectorlen;
|
int j, dontsort = 0, vectorlen;
|
||||||
int getop = 0; /* GET operation counter */
|
int getop = 0; /* GET operation counter */
|
||||||
int int_convertion_error = 0;
|
int int_conversion_error = 0;
|
||||||
int syntax_error = 0;
|
int syntax_error = 0;
|
||||||
robj *sortval, *sortby = NULL, *storekey = NULL;
|
robj *sortval, *sortby = NULL, *storekey = NULL;
|
||||||
redisSortObject *vector; /* Resulting vector to sort */
|
redisSortObject *vector; /* Resulting vector to sort */
|
||||||
|
@ -469,7 +469,7 @@ void sortCommand(client *c) {
|
||||||
if (eptr[0] != '\0' || errno == ERANGE ||
|
if (eptr[0] != '\0' || errno == ERANGE ||
|
||||||
isnan(vector[j].u.score))
|
isnan(vector[j].u.score))
|
||||||
{
|
{
|
||||||
int_convertion_error = 1;
|
int_conversion_error = 1;
|
||||||
}
|
}
|
||||||
} else if (byval->encoding == OBJ_ENCODING_INT) {
|
} else if (byval->encoding == OBJ_ENCODING_INT) {
|
||||||
/* Don't need to decode the object if it's
|
/* Don't need to decode the object if it's
|
||||||
|
@ -503,7 +503,7 @@ void sortCommand(client *c) {
|
||||||
/* Send command output to the output buffer, performing the specified
|
/* Send command output to the output buffer, performing the specified
|
||||||
* GET/DEL/INCR/DECR operations if any. */
|
* GET/DEL/INCR/DECR operations if any. */
|
||||||
outputlen = getop ? getop*(end-start+1) : end-start+1;
|
outputlen = getop ? getop*(end-start+1) : end-start+1;
|
||||||
if (int_convertion_error) {
|
if (int_conversion_error) {
|
||||||
addReplyError(c,"One or more scores can't be converted into double");
|
addReplyError(c,"One or more scores can't be converted into double");
|
||||||
} else if (storekey == NULL) {
|
} else if (storekey == NULL) {
|
||||||
/* STORE option not specified, sent the sorting result to client */
|
/* STORE option not specified, sent the sorting result to client */
|
||||||
|
|
|
@ -507,7 +507,7 @@ static int zslParseRange(robj *min, robj *max, zrangespec *spec) {
|
||||||
* + means the max string possible
|
* + means the max string possible
|
||||||
*
|
*
|
||||||
* If the string is valid the *dest pointer is set to the redis object
|
* If the string is valid the *dest pointer is set to the redis object
|
||||||
* that will be used for the comparision, and ex will be set to 0 or 1
|
* that will be used for the comparison, and ex will be set to 0 or 1
|
||||||
* respectively if the item is exclusive or inclusive. C_OK will be
|
* respectively if the item is exclusive or inclusive. C_OK will be
|
||||||
* returned.
|
* returned.
|
||||||
*
|
*
|
||||||
|
|
|
@ -451,7 +451,7 @@ int string2ld(const char *s, size_t slen, long double *dp) {
|
||||||
/* Convert a double to a string representation. Returns the number of bytes
|
/* Convert a double to a string representation. Returns the number of bytes
|
||||||
* required. The representation should always be parsable by strtod(3).
|
* required. The representation should always be parsable by strtod(3).
|
||||||
* This function does not support human-friendly formatting like ld2string
|
* This function does not support human-friendly formatting like ld2string
|
||||||
* does. It is intented mainly to be used inside t_zset.c when writing scores
|
* does. It is intended mainly to be used inside t_zset.c when writing scores
|
||||||
* into a ziplist representing a sorted set. */
|
* into a ziplist representing a sorted set. */
|
||||||
int d2string(char *buf, size_t len, double value) {
|
int d2string(char *buf, size_t len, double value) {
|
||||||
if (isnan(value)) {
|
if (isnan(value)) {
|
||||||
|
|
|
@ -371,7 +371,7 @@ size_t zmalloc_get_private_dirty(long pid) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the size of physical memory (RAM) in bytes.
|
/* Returns the size of physical memory (RAM) in bytes.
|
||||||
* It looks ugly, but this is the cleanest way to achive cross platform results.
|
* It looks ugly, but this is the cleanest way to achieve cross platform results.
|
||||||
* Cleaned up from:
|
* Cleaned up from:
|
||||||
*
|
*
|
||||||
* http://nadeausoftware.com/articles/2012/09/c_c_tip_how_get_physical_memory_size_system
|
* http://nadeausoftware.com/articles/2012/09/c_c_tip_how_get_physical_memory_size_system
|
||||||
|
|
|
@ -121,7 +121,7 @@ start_server {tags {"expire"}} {
|
||||||
list $a $b
|
list $a $b
|
||||||
} {somevalue {}}
|
} {somevalue {}}
|
||||||
|
|
||||||
test {TTL returns tiem to live in seconds} {
|
test {TTL returns time to live in seconds} {
|
||||||
r del x
|
r del x
|
||||||
r setex x 10 somevalue
|
r setex x 10 somevalue
|
||||||
set ttl [r ttl x]
|
set ttl [r ttl x]
|
||||||
|
|
|
@ -516,7 +516,7 @@ start_server {tags {"scripting"}} {
|
||||||
# Note: keep this test at the end of this server stanza because it
|
# Note: keep this test at the end of this server stanza because it
|
||||||
# kills the server.
|
# kills the server.
|
||||||
test {SHUTDOWN NOSAVE can kill a timedout script anyway} {
|
test {SHUTDOWN NOSAVE can kill a timedout script anyway} {
|
||||||
# The server sould be still unresponding to normal commands.
|
# The server should be still unresponding to normal commands.
|
||||||
catch {r ping} e
|
catch {r ping} e
|
||||||
assert_match {BUSY*} $e
|
assert_match {BUSY*} $e
|
||||||
catch {r shutdown nosave}
|
catch {r shutdown nosave}
|
||||||
|
|
Loading…
Reference in New Issue