-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcache.py
More file actions
154 lines (125 loc) · 4.93 KB
/
cache.py
File metadata and controls
154 lines (125 loc) · 4.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
"""
File: cache_internal.py
Description:
This module uses a Python dictionary to implement a simple in-memory Twice cache.
Author: Kyle Vogt
Copyright (c) 2008, Justin.tv, Inc.
"""
from twisted.python import log
from twisted.protocols.memcache import MemCacheProtocol
from twisted.internet import protocol, reactor
import random, time
class TwiceCache:
""" Base class for implementing a Twice Cache"""
def __init__(self, config):
self.config = config
def ready(self):
"Call when the cache is online"
pass
def set(self, dictionary, time = None):
"Store value(s) supplied as a python dict for a certain time"
pass
def get(self, keylist):
"Retreive a list of values as a python dict"
return {}
def flush(self):
"Delete all keys"
pass
class InternalCache(TwiceCache):
"Implements a Twice Cache using a Python dictionary"
def __init__(self, config):
Cache.__init__(self, config)
self.cache = {}
self.ready()
def ready(self):
limit = self.config.get('memory_limit')
if not limit:
log.msg('WARNING: memory_limit not specified, using 100MB as default')
self.config['memory_limit'] = 100000
log.msg("CACHE_BACKEND: Using %s MB in-memory cache" % limit)
def set(self, dictionary, time = None):
for key, val in dict(dictionary.items()):
element = {
'expires_on' : time.time() + (time or 0),
'element' : val
}
dictionary[key] = element
self.cache.update(element)
def get(self, keylist):
if not isinstance(keylist, list): keylist = [keylist]
output = {}
for key in keylist:
element = self.cache.get(key)
if element:
if time.time() > element['expires_on']:
output[key] = None
else:
output[key] = element['element']
return output
def delete(self, keylist):
for key in keylist:
try:
del self.cache[key]
except:
pass
def flush(self):
self.cache = {}
class MemcacheCache(TwiceCache):
"Implements a Twice Cache using a memcache server"
def __init__(self, config):
Cache.__init__(self, config)
server = config['cache_server']
connection_pool_size = int(config.get('cache_pool', 1))
log.msg('Creating memcache connection pool to server %s...' % server)
self.pool = []
# Import pickling library
try:
import cPickle as pickle
except ImportError:
log.msg('cPickle not available, using slower pickle library.')
import pickle
# Parse server string
try:
self.host, self.port = server.split(':')
except:
self.host = server
self.port = 11211
# Make connections
defers = []
for i in xrange(connection_pool_size):
d = protocol.ClientCreator(reactor, MemCacheProtocol).connectTCP(self.host, int(self.port))
d.addCallback(self.add_connection)
defers.append(d)
defer.DeferredList(defers).addCallback(self.ready)
def add_connection(self, result=None):
log.msg('CACHE_BACKEND: Connected to memcache server at %s:%s' % (self.host, self.port))
self.pool.append(result)
def ready(self, result=None):
log.msg('CACHE_BACKEND: Memcache pool complete')
def cache_pool(self):
"Random load balancing across connection pool"
return random.choice(self.pool)
def set(self, dictionary, time = None):
pickled_dict = dict([(key, pickle.dumps(val)) for key, val in dictionary.items() if val is not None])
connection = self.cache_pool()
#log.msg('SET on cache %s' % cache)
if len(pickled_dict):
return connection.set_multi(pickled_dict, expireTime = time)
else:
return {}
def get(self, keylist):
if not isinstance(keylist, list): keylist = [keylist]
#log.msg('keylist: %s' % keylist)
connection = self.cache_pool()
#log.msg('GET on cache %s' % cache)
return connection.get_multi(keylist).addCallback(self._format, keylist)
def delete(self, keylist):
for key in keylist:
self.cache_pool().delete(key)
def _format(self, results, keylist):
"Return a dictionary containing all keys in keylist, with cache misses as None"
output = dict([(key, results[1].get(key, None) and pickle.loads(results[1][key])) for key in keylist])
#log.msg('Memcache results:\n%s' % repr(output))
return output
def flush(self):
self.cache_pool().flushAll()