Save chunks in db

One of the changes introduced for this is that multiple chunks and
diffs are now represented by dicts, not lists.
This commit is contained in:
Joscha 2017-04-16 15:51:00 +00:00
parent 9e5b5f874a
commit aca387cf05
4 changed files with 121 additions and 70 deletions

View file

@ -2,8 +2,8 @@ import sqlite3
import time
import threading
from chunks import ChunkPool
from utils import Position
from chunks import ChunkPool, Chunk
from utils import Position, CHUNK_WIDTH, CHUNK_HEIGHT
class ChunkDB():
"""
@ -12,6 +12,8 @@ class ChunkDB():
def __init__(self, filename):
self.dbfilename = filename
self._create_table()
def transaction(func):
def wrapper(self, *args, **kwargs):
@ -24,14 +26,56 @@ class ChunkDB():
return wrapper
@transaction
def _create_table(self, con):
cur = con.cursor()
cur.execute(("CREATE TABLE IF NOT EXISTS chunks ("
"x INTEGER NOT NULL, "
"y INTEGER NOT NULL, "
"content TEXT, "
"PRIMARY KEY (x, y)"
")"))
@transaction
def save_many(self, con, chunks):
print("save_many")
lchunks = ChunkDB.chunks_to_list(chunks)
con.executemany("INSERT OR REPLACE INTO chunks VALUES (?, ?, ?)", lchunks)
@transaction
def load_many(self, con, coords):
print("load_many")
return [(coor, None) for coor in coords]
cur = con.cursor()
results = []
for pos in coords:
cur.execute("SELECT * FROM chunks WHERE x=? AND y=?", pos)
results.extend(cur.fetchall())
results = ChunkDB.list_to_chunks(results)
return results
@transaction
def remove_empty(self, con):
con.execute("DELETE FROM chunks WHERE content=?", (" "*CHUNK_WIDTH*CHUNK_HEIGHT,))
@staticmethod
def list_to_chunks(l):
chunks = {}
for item in l:
pos = Position(item[0], item[1])
chunk = Chunk.from_string(item[2])
chunks[pos] = chunk
return chunks
@staticmethod
def chunks_to_list(chunks):
l = []
for pos, chunk in chunks.items():
l.append((pos[0], pos[1], chunk.to_string()))
return l
class DBChunkPool(ChunkPool):
"""
@ -42,8 +86,8 @@ class DBChunkPool(ChunkPool):
super().__init__()
self._chunkdb = ChunkDB(filename)
self.save_period = 10 # save and clean up every minute
self.max_age = 20 # ca. one minute until a chunk is unloaded again
self.save_period = 60 # save and clean up every minute
self.max_age = 60 # ca. one minute until a chunk is unloaded again
self.save_thread = threading.Thread(
target=self.perodic_save,
@ -55,27 +99,23 @@ class DBChunkPool(ChunkPool):
def save_changes(self):
diffs = self.commit_changes()
changed_chunks = []
for dchunk in diffs:
pos = dchunk[0]
changed_chunks = {}
for pos, diff in diffs.items():
chunk = self.get(pos)
changed_chunks.append((pos, chunk))
changed_chunks[pos] = chunk
self._chunkdb.save_many(changed_chunks)
def load(self, pos):
print("Loading individual chunk...")
raise Exception
def load_list(self, coords):
print("Loading chunk list...")
to_load = [pos for pos in coords if pos not in self._chunks]
chunks = self._chunkdb.load_many(to_load)
for dchunk in chunks:
pos = dchunk[0]
chunk = dchunk[1]
if chunk:
self.set(pos, chunk)
for pos in to_load:
if pos in chunks:
self.set(pos, chunks.get(pos))
else:
self.create(pos)
@ -84,21 +124,20 @@ class DBChunkPool(ChunkPool):
time.sleep(self.save_period)
with self:
print("BEFORE:::")
self.print_chunks()
self.save_changes()
# unload old chunks
now = time.time()
for pos, chunk in self._chunks.items():
print(f"p{pos} :: t{now} :: m{chunk.last_modified} :: a{chunk.age(now)}")
self.clean_up(condition=lambda pos, chunk: chunk.age(now) > self.max_age)
print("AFTER:::")
self.print_chunks()
def get_min_max(self):
def remove_empty(self):
self._chunkdb.remove_empty()
def _get_min_max(self):
"""
Meant for debugging.
"""
minx = min(pos.x for pos in self._chunks)
maxx = max(pos.x for pos in self._chunks)
miny = min(pos.y for pos in self._chunks)
@ -106,9 +145,13 @@ class DBChunkPool(ChunkPool):
return minx, maxx, miny, maxy
def print_chunks(self):
def _print_chunks(self):
"""
Meant for debugging.
"""
if self._chunks:
minx, maxx, miny, maxy = self.get_min_max()
minx, maxx, miny, maxy = self._get_min_max()
sizex, sizey = maxx - minx + 1, maxy - miny + 1
print("" + ""*sizex*2 + "")
for y in range(miny, maxy + 1):