commit 5f412947b403596f892cab799d2ff08cde35e2db
parent ac44170b1416a9638553f214e89aaff38539cf19
Author: sin <sin@2f30.org>
Date: Wed, 1 May 2013 14:50:05 +0100
rewrite mapfs
Diffstat:
6 files changed, 388 insertions(+), 253 deletions(-)
diff --git a/src/kunt/kunt.go b/src/kunt/kunt.go
@@ -137,7 +137,7 @@ func cmdAddQuote(msg irc.IrcMessage) {
}
func cmdCountQuotes(msg irc.IrcMessage) {
- text := fmt.Sprintf("The quote DB has %d quotes", quoteDb.Len())
+ text := fmt.Sprintf("The quote DB has %d quotes", quoteDb.Size())
kunt.ircCtx.SendPrivmsg(msg.Params[0], text)
}
@@ -187,7 +187,7 @@ func cmdAddUrl(msg irc.IrcMessage) {
}
func cmdCountUrls(msg irc.IrcMessage) {
- r := fmt.Sprintf("The url DB has %d urls", urlDb.Len())
+ r := fmt.Sprintf("The url DB has %d urls", urlDb.Size())
kunt.ircCtx.SendPrivmsg(msg.Params[0], r)
}
@@ -281,8 +281,10 @@ func main() {
hostport := strings.Split(flag.Arg(0), ":")
- quoteDb = mapfs.NewMapfs("Quotes", "db/quotes", "quote")
- urlDb = mapfs.NewMapfs("Urls", "db/urls", "url")
+ quotePlain := mapfs.NewPlainMap()
+ quoteDb = mapfs.NewMapfs(quotePlain, "Quotes", "db/quotes", "quote")
+ urlPlain := mapfs.NewPlainMap()
+ urlDb = mapfs.NewMapfs(urlPlain, "Urls", "db/urls", "url")
err := quoteDb.Load()
if err != nil {
diff --git a/src/mapfs/blowfish.go b/src/mapfs/blowfish.go
@@ -0,0 +1,180 @@
+package mapfs
+
+import (
+ "bufio"
+ "bytes"
+ "code.google.com/p/go.crypto/blowfish"
+ "encoding/gob"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+type BlowfishMap struct {
+ cache map[int]blowfishVal
+ cipher *blowfish.Cipher
+}
+
+type blowfishVal struct {
+ dirty bool
+ buf []byte // Actual encrypted data
+ siz int // Length of decrypted data
+}
+
+type hdr struct {
+ Magic string
+ Len int
+ Data []byte
+}
+
+func NewBlowfishMap(key string) *BlowfishMap {
+ c, err := blowfish.NewCipher([]byte(key))
+ if err != nil {
+ log.Fatal(err)
+ }
+ return &BlowfishMap{
+ cache: make(map[int]blowfishVal),
+ cipher: c,
+ }
+}
+
+func (b *BlowfishMap) rawRead(path string) ([]byte, error) {
+ fi, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer fi.Close()
+ r := bufio.NewReader(fi)
+ g := gob.NewDecoder(r)
+ h := new(hdr)
+ err = g.Decode(h)
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+ }
+ pt := b.decryptBuf(h.Data)
+ return pt[0:h.Len], nil
+}
+
+func (b *BlowfishMap) rawWrite(path string, buf []byte) error {
+ fo, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ defer fo.Close()
+ ct := b.encryptBuf(buf)
+ w := bufio.NewWriter(fo)
+ h := &hdr{"BENC", len(buf), ct}
+ g := gob.NewEncoder(w)
+ err = g.Encode(*h)
+ if err != nil {
+ return err
+ }
+ err = w.Flush()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *BlowfishMap) get(key int) ([]byte, error) {
+ val, ok := b.cache[key]
+ if !ok {
+ return nil, fmt.Errorf("No entry with key: %d", key)
+ }
+ return val.buf, nil
+}
+
+func (b *BlowfishMap) put(key int, buf []byte) error {
+ _, ok := b.cache[key]
+ if ok {
+ return fmt.Errorf("Key %d already in use", key)
+ }
+ b.cache[key] = blowfishVal{true, buf, len(buf)}
+ return nil
+}
+
+func (b *BlowfishMap) add(buf []byte) (int, error) {
+ key := len(b.cache)
+ b.cache[key] = blowfishVal{true, buf, len(buf)}
+ return key, nil
+}
+
+func (b *BlowfishMap) countMatches(buf []byte) int {
+ i := 0
+ for _, v := range b.cache {
+ if bytes.Equal(v.buf, buf) {
+ i++
+ }
+ }
+ return i
+}
+
+func (b *BlowfishMap) virtSync() error {
+ for k, v := range b.cache {
+ b.cache[k] = blowfishVal{false, v.buf, v.siz}
+ }
+ return nil
+}
+
+func (b *BlowfishMap) syncEntry(path string, key int) error {
+ val, ok := b.cache[key]
+ if !ok {
+ return fmt.Errorf("No entry with key: %d", key)
+ }
+ if val.dirty {
+ b.rawWrite(path, val.buf)
+ }
+ return nil
+}
+
+func (b *BlowfishMap) size() int {
+ return len(b.cache)
+}
+
+func (b *BlowfishMap) string() string {
+ s := ""
+ for k, v := range b.cache {
+ dirty := ""
+ if v.dirty {
+ dirty = "yes"
+ } else {
+ dirty = "no"
+ }
+ s += fmt.Sprintf("k: %d - dirty: %s\n",
+ k, dirty)
+ }
+ return s
+}
+
+func (b *BlowfishMap) encryptBuf(a []byte) []byte {
+ padded := a
+ diff := len(a) % blowfish.BlockSize
+ if diff != 0 {
+ diff = blowfish.BlockSize - diff
+ }
+ for i := 0; i < diff; i++ {
+ padded = append(padded, byte(0x0))
+ }
+ ct := make([]byte, len(padded))
+ for i := 0; i < len(a); i += blowfish.BlockSize {
+ if i+blowfish.BlockSize > len(a) {
+ b.cipher.Encrypt(ct[i:], padded[i:])
+ return ct
+ }
+ b.cipher.Encrypt(ct[i:i+blowfish.BlockSize],
+ padded[i:i+blowfish.BlockSize])
+ }
+ return ct
+}
+
+func (b *BlowfishMap) decryptBuf(a []byte) []byte {
+ pt := make([]byte, len(a))
+ for i := 0; i < len(a); i += blowfish.BlockSize {
+ b.cipher.Decrypt(pt[i:i+blowfish.BlockSize],
+ a[i:i+blowfish.BlockSize])
+ }
+ return pt
+}
diff --git a/src/mapfs/io.go b/src/mapfs/io.go
@@ -1,30 +0,0 @@
-package mapfs
-
-import (
- "fmt"
- "io/ioutil"
- "os"
-)
-
-func (m *Mapfs) fsRead(key int) ([]byte, error) {
- path := fmt.Sprintf("%s/%s%d.txt", m.path, m.prefix, key)
- b, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return b, err
-}
-
-func (m *Mapfs) fsWrite(key int, buf []byte) error {
- path := fmt.Sprintf("%s/%s%d.txt", m.path, m.prefix, key)
- _, err := os.Stat(path)
- if err == nil {
- return fmt.Errorf("Entry %s already exists in %s db",
- path, m.name)
- }
- err = ioutil.WriteFile(path, buf, 0644)
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/src/mapfs/mapfs.go b/src/mapfs/mapfs.go
@@ -2,8 +2,6 @@
package mapfs
import (
- "bytes"
- "code.google.com/p/go.crypto/blowfish"
"fmt"
"log"
"math/rand"
@@ -15,19 +13,24 @@ import (
)
type Mapfs struct {
- name string
- path string
- prefix string
- cache map[int]mapfsVal
- cacheOnly bool
- encrypt bool
- cipher *blowfish.Cipher
+ name string
+ path string
+ prefix string
+ mapper Mapper
sync.Mutex
}
-type mapfsVal struct {
- dirty bool // Do we need to sync this entry to disk?
- data []byte // Actual raw data
+type Mapper interface {
+ rawRead(string) ([]byte, error)
+ rawWrite(string, []byte) error
+ get(int) ([]byte, error)
+ put(int, []byte) error
+ add([]byte) (int, error)
+ countMatches([]byte) int
+ virtSync() error
+ syncEntry(string, int) error
+ size() int
+ string() string
}
type MapIter func() (key int, data []byte, ok bool)
@@ -37,16 +40,11 @@ func MakeMapIter(m *Mapfs) MapIter {
return func() (key int, data []byte, ok bool) {
m.Lock()
defer m.Unlock()
- for i < len(m.cache) {
- v, ok := m.cache[i]
- if ok {
+ for i < m.mapper.size() {
+ b, err := m.mapper.get(i)
+ if err == nil {
i++
- if m.encrypt {
- pt := make([]byte, len(v.data))
- m.cipher.Decrypt(pt, v.data)
- return i - 1, pt, true
- }
- return i - 1, v.data, true
+ return i - 1, b, true
}
i++
}
@@ -54,96 +52,100 @@ func MakeMapIter(m *Mapfs) MapIter {
}
}
-func NewMapfs(name string, path string, prefix string) *Mapfs {
- log.SetPrefix("mapfs: ")
+func NewMapfs(m Mapper, name string, path string, prefix string) *Mapfs {
return &Mapfs{
- name: name,
- path: path,
- prefix: prefix,
- cache: make(map[int]mapfsVal),
- cacheOnly: false,
- encrypt: false,
- cipher: nil,
+ name: name,
+ path: path,
+ prefix: prefix,
+ mapper: m,
}
}
-func NewEncryptedMapfs(name string, path string, prefix string, key string) *Mapfs {
- log.SetPrefix("mapfs: ")
- c, err := blowfish.NewCipher([]byte(key))
- if err != nil {
- log.Fatal(err)
+func (m *Mapfs) rawRead(key int) ([]byte, error) {
+ path := fmt.Sprintf("%s/%s%d.txt", m.path, m.prefix, key)
+ return m.mapper.rawRead(path)
+}
+
+func (m *Mapfs) rawWrite(key int, buf []byte) error {
+ path := fmt.Sprintf("%s/%s%d.txt", m.path, m.prefix, key)
+ return m.mapper.rawWrite(path, buf)
+}
+
+func (m *Mapfs) Get(key int) ([]byte, error) {
+ m.Lock()
+ defer m.Unlock()
+ if m.mapper.size() == 0 {
+ return nil, fmt.Errorf("Empty map, can't fetch entry")
}
- return &Mapfs{
- name: name,
- path: path,
- prefix: prefix,
- cache: make(map[int]mapfsVal),
- cacheOnly: false,
- encrypt: true,
- cipher: c,
+ return m.mapper.get(key)
+}
+
+func (m *Mapfs) Put(key int, buf []byte) error {
+ m.Lock()
+ defer m.Unlock()
+ if key < 0 {
+ return fmt.Errorf("Invalid key: %d", key)
}
+ return m.mapper.put(key, buf)
+}
+
+func (m *Mapfs) Append(buf []byte) (int, error) {
+ m.Lock()
+ defer m.Unlock()
+ return m.mapper.add(buf)
+}
+
+func (m *Mapfs) CountMatches(buf []byte) int {
+ m.Lock()
+ defer m.Unlock()
+ return m.mapper.countMatches(buf)
}
-// Sync dirty entries to disk
func (m *Mapfs) Sync() error {
m.Lock()
defer m.Unlock()
- if m.cacheOnly {
- return nil
- }
- for k, v := range m.cache {
- if v.dirty {
- err := m.fsWrite(k, v.data)
+ for i := 0; i < m.mapper.size(); i++ {
+ _, err := m.mapper.get(i)
+ if err == nil {
+ path := fmt.Sprintf("%s/%s%d.txt",
+ m.path, m.prefix, i)
+ err = m.mapper.syncEntry(path, i)
if err != nil {
return err
}
- m.cache[k] = mapfsVal{false, v.data}
- if !m.encrypt {
- fmt.Printf("Synced entry (%d, %v)\n",
- k, v.data)
- } else {
- fmt.Printf("Synced entry (%d, %v)\n",
- k, "ENCRYPTED")
- }
}
}
return nil
}
-func (m *Mapfs) CacheOnly(flag bool) {
+func (m *Mapfs) virtSync() error {
m.Lock()
defer m.Unlock()
- m.cacheOnly = flag
+ return m.mapper.virtSync()
}
-// Print map
-func (m *Mapfs) String() string {
+func (m *Mapfs) Size() int {
m.Lock()
defer m.Unlock()
- s := fmt.Sprintf("*** %s MAP DUMP ***\n", m.name)
- for k, v := range m.cache {
- dirty := ""
- if v.dirty {
- dirty = "yes"
- } else {
- dirty = "no"
- }
- s += fmt.Sprintf("k: %d - dirty: %s\n",
- k, dirty)
- }
- return s
+ return m.mapper.size()
}
-// Mark all entries as synced but do not actually write them out to disk
-func (m *Mapfs) virtSync() {
+func (m *Mapfs) Empty() bool {
m.Lock()
defer m.Unlock()
- for k, v := range m.cache {
- m.cache[k] = mapfsVal{false, v.data}
+ if m.mapper.size() == 0 {
+ return true
}
+ return false
+}
+
+func (m *Mapfs) String() string {
+ m.Lock()
+ defer m.Unlock()
+ fmt.Printf("*** %s MAP DUMP ***\n", m.name)
+ return m.mapper.string()
}
-// Load the map from disk
func (m *Mapfs) Load() error {
_, err := os.Stat(m.path)
if err != nil {
@@ -163,7 +165,7 @@ func (m *Mapfs) Load() error {
if err != nil {
return err
}
- b, err := m.fsRead(i)
+ b, err := m.rawRead(i)
if err != nil {
return err
}
@@ -180,136 +182,18 @@ func (m *Mapfs) Load() error {
return err
}
-// Count how many instances of `buf' exist in the map
-func (m *Mapfs) CountMatches(buf []byte) int {
- m.Lock()
- defer m.Unlock()
- i := 0
- raw := buf
- if m.encrypt {
- ct := m.encryptBuf(buf)
- raw = ct
- }
- for _, v := range m.cache {
- if bytes.Equal(v.data, raw) {
- i++
- }
- }
- return i
-}
-
-func (m *Mapfs) Put(key int, buf []byte) error {
- m.Lock()
- defer m.Unlock()
- if key < 0 {
- return fmt.Errorf("Invalid key: %d", key)
- }
- _, ok := m.cache[key]
- if ok {
- return fmt.Errorf("Key %d already in use", key)
- }
- if m.encrypt {
- ct := m.encryptBuf(buf)
- m.cache[key] = mapfsVal{true, ct}
- return nil
- }
- m.cache[key] = mapfsVal{true, buf}
- return nil
-}
-
-// Append buf to the map
-func (m *Mapfs) Append(buf []byte) (int, error) {
- m.Lock()
- defer m.Unlock()
- if m.encrypt {
- ct := m.encryptBuf(buf)
- key := len(m.cache)
- m.cache[key] = mapfsVal{true, ct}
- return key, nil
- }
- key := len(m.cache)
- m.cache[key] = mapfsVal{true, buf}
- return key, nil
-}
-
-// Return the raw data based on the key
-func (m *Mapfs) Get(key int) ([]byte, error) {
- m.Lock()
- defer m.Unlock()
- if len(m.cache) == 0 {
- return nil, fmt.Errorf("Empty map, can't fetch entry")
- }
- val, ok := m.cache[key]
- if !ok {
- return nil, fmt.Errorf("No entry with key: %d", key)
- }
- if m.encrypt {
- pt := m.decryptBuf(val.data)
- return pt, nil
- }
- return val.data, nil
-}
-
func (m *Mapfs) Rand() ([]byte, int) {
m.Lock()
defer m.Unlock()
- idx := rand.Intn(len(m.cache))
- i := 0
- for k, _ := range m.cache {
+ idx := rand.Intn(m.mapper.size())
+ for i := 0; i < m.mapper.size(); i++ {
if i == idx {
- val := m.cache[k]
- if m.encrypt {
- pt := m.decryptBuf(val.data)
- return pt, k
+ b, err := m.mapper.get(i)
+ if err != nil {
+ log.Fatal(err)
}
- return val.data, k
+ return b, i
}
- i++
}
return nil, -1
}
-
-func (m *Mapfs) Empty() bool {
- m.Lock()
- defer m.Unlock()
- if len(m.cache) == 0 {
- return true
- }
- return false
-}
-
-func (m *Mapfs) Len() int {
- m.Lock()
- defer m.Unlock()
- return len(m.cache)
-}
-
-func (m *Mapfs) encryptBuf(a []byte) []byte {
- padded := a
- diff := len(a) % blowfish.BlockSize
- if diff != 0 {
- diff = blowfish.BlockSize - diff
- }
- for i := 0; i < diff; i++ {
- padded = append(padded, byte(0x0))
- }
- ct := make([]byte, len(padded))
- for i := 0; i < len(a); i += blowfish.BlockSize {
- if i+blowfish.BlockSize > len(a) {
- m.cipher.Encrypt(ct[i:], padded[i:])
- return ct
- }
- m.cipher.Encrypt(ct[i:i+blowfish.BlockSize],
- padded[i:i+blowfish.BlockSize])
- }
- return ct
-}
-
-func (m *Mapfs) decryptBuf(a []byte) []byte {
- pt := make([]byte, len(a))
- for i := 0; i < len(a); i += blowfish.BlockSize {
- m.cipher.Decrypt(pt[i:i+blowfish.BlockSize],
- a[i:i+blowfish.BlockSize])
- }
- return pt
-}
diff --git a/src/mapfs/mapfs_test.go b/src/mapfs/mapfs_test.go
@@ -1,15 +0,0 @@
-package mapfs
-
-import (
- "testing"
-)
-
-func TestEncryptingFs(t *testing.T) {
- mfs := NewEncryptedMapfs("kota", "./", "moufa", "ICEBABY!@#")
- test := []byte("sweet plaintext!") //this test will only work without padding
- mfs.Append(test)
- r0, _ := mfs.Get(0)
- if string(r0) != string(test) { //byte equality will fail here(padding)
- t.Error("encryption failed got:" + string(r0))
- }
-}
diff --git a/src/mapfs/plain.go b/src/mapfs/plain.go
@@ -0,0 +1,114 @@
+package mapfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+type PlainMap struct {
+ cache map[int]plainVal
+}
+
+type plainVal struct {
+ dirty bool
+ buf []byte // Actual raw data
+ siz int // Size of raw data
+}
+
+func NewPlainMap() *PlainMap {
+ return &PlainMap{
+ cache: make(map[int]plainVal),
+ }
+}
+
+func (p *PlainMap) rawRead(path string) ([]byte, error) {
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return b, err
+}
+
+func (p *PlainMap) rawWrite(path string, buf []byte) error {
+ _, err := os.Stat(path)
+ if err == nil {
+ return fmt.Errorf("File %s already exists", path)
+ }
+ err = ioutil.WriteFile(path, buf, 0644)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (p *PlainMap) get(key int) ([]byte, error) {
+ val, ok := p.cache[key]
+ if !ok {
+ return nil, fmt.Errorf("No entry with key: %d", key)
+ }
+ return val.buf, nil
+}
+
+func (p *PlainMap) put(key int, buf []byte) error {
+ _, ok := p.cache[key]
+ if ok {
+ return fmt.Errorf("Key %d already in use", key)
+ }
+ p.cache[key] = plainVal{true, buf, len(buf)}
+ return nil
+}
+
+func (p *PlainMap) add(buf []byte) (int, error) {
+ key := len(p.cache)
+ p.cache[key] = plainVal{true, buf, len(buf)}
+ return key, nil
+}
+
+func (p *PlainMap) countMatches(buf []byte) int {
+ i := 0
+ for _, v := range p.cache {
+ if bytes.Equal(v.buf, buf) {
+ i++
+ }
+ }
+ return i
+}
+
+func (p *PlainMap) virtSync() error {
+ for k, v := range p.cache {
+ p.cache[k] = plainVal{false, v.buf, v.siz}
+ }
+ return nil
+}
+
+func (p *PlainMap) syncEntry(path string, key int) error {
+ val, ok := p.cache[key]
+ if !ok {
+ return fmt.Errorf("No entry with key: %d", key)
+ }
+ if val.dirty {
+ p.rawWrite(path, val.buf)
+ }
+ return nil
+}
+
+func (p *PlainMap) size() int {
+ return len(p.cache)
+}
+
+func (p *PlainMap) string() string {
+ s := ""
+ for k, v := range p.cache {
+ dirty := ""
+ if v.dirty {
+ dirty = "yes"
+ } else {
+ dirty = "no"
+ }
+ s += fmt.Sprintf("k: %d - dirty: %s\n",
+ k, dirty)
+ }
+ return s
+}