|
| 1 | +package main |
| 2 | + |
| 3 | +import ( |
| 4 | + "fmt" |
| 5 | + pcgr "github.com/dgryski/go-pcgr" |
| 6 | + mct "github.com/dormando/mctester" |
| 7 | + "math/rand" |
| 8 | + "time" |
| 9 | +) |
| 10 | + |
| 11 | +// Basic persistent load test, using text protocol: |
| 12 | +type BasicLoader struct { |
| 13 | + Servers []string |
| 14 | + stopAfter time.Time |
| 15 | + DesiredConnCount int |
| 16 | + RequestsPerSleep int |
| 17 | + RequestBundlesPerConn int |
| 18 | + SleepPerBundle time.Duration |
| 19 | + DeletePercent int |
| 20 | + KeyLength int |
| 21 | + KeyPrefix string |
| 22 | + KeySpace int |
| 23 | + KeyTTL uint |
| 24 | + UseZipf bool |
| 25 | + ZipfS float64 // (> 1, generally 1.01-2) pulls the power curve toward 0) |
| 26 | + ZipfV float64 // v (< KeySpace) puts the main part of the curve before this number |
| 27 | + ValueSize uint |
| 28 | + ClientFlags uint |
| 29 | +} |
| 30 | + |
| 31 | +func newBasicLoader() (*BasicLoader) { |
| 32 | + return &BasicLoader{ |
| 33 | + Servers: []string{"127.0.0.1:11211"}, |
| 34 | + DesiredConnCount: 1, |
| 35 | + RequestsPerSleep: 1, |
| 36 | + RequestBundlesPerConn: 1, |
| 37 | + SleepPerBundle: time.Millisecond*1, |
| 38 | + DeletePercent: 0, |
| 39 | + KeyLength: 10, |
| 40 | + KeyPrefix: "mctester:", |
| 41 | + KeySpace: 1000, |
| 42 | + KeyTTL: 180, |
| 43 | + UseZipf: false, |
| 44 | + ZipfS: 1.01, |
| 45 | + ZipfV: 500, |
| 46 | + ValueSize: 1000, |
| 47 | + ClientFlags: 0, |
| 48 | + } |
| 49 | +} |
| 50 | + |
| 51 | +// Update receives *BasicLoader's from the server. |
| 52 | +func runBasicLoader(Update <-chan interface{}, worker interface{}) { |
| 53 | + var l *BasicLoader = worker.(*BasicLoader) |
| 54 | + runners := 0 |
| 55 | + nextId := 1 |
| 56 | + workers := make(map[int]chan *BasicLoader) |
| 57 | + doneReceiver := make(chan int, 50) |
| 58 | + // need map of workers to update channel so we can broadcast updates... |
| 59 | + // worker channels should have 1 buffer, maybe? else it'll take forever to |
| 60 | + // update. |
| 61 | + |
| 62 | + for { |
| 63 | + keepGoing := true |
| 64 | + for runners < l.DesiredConnCount { |
| 65 | + // Give it a buffer of 1 to avoid race where worker is dying |
| 66 | + // rather than looking at its update channel. |
| 67 | + wc := make(chan *BasicLoader, 1) |
| 68 | + workers[nextId] = wc |
| 69 | + go basicWorker(nextId, doneReceiver, wc, l) |
| 70 | + nextId++ |
| 71 | + runners++ |
| 72 | + } |
| 73 | + |
| 74 | + select { |
| 75 | + case id := <-doneReceiver: |
| 76 | + runners-- |
| 77 | + delete(workers, id) |
| 78 | + case update, ok := <-Update: |
| 79 | + if ok { |
| 80 | + fmt.Printf("received basic loader update\n") |
| 81 | + l = update.(*BasicLoader) |
| 82 | + // Blast out update to everyone. |
| 83 | + // Note they will pick up changes during the next sleep cycle. |
| 84 | + for _, w := range workers { |
| 85 | + w <- l |
| 86 | + } |
| 87 | + fmt.Printf("sent loader update to workers\n") |
| 88 | + } else { |
| 89 | + keepGoing = false |
| 90 | + } |
| 91 | + } |
| 92 | + |
| 93 | + if !keepGoing { |
| 94 | + // Let all the workers die off so they don't explode when writing |
| 95 | + // to doneChan's. |
| 96 | + for runners != 0 { |
| 97 | + id := <-doneReceiver |
| 98 | + delete(workers, id) |
| 99 | + runners-- |
| 100 | + } |
| 101 | + return |
| 102 | + } |
| 103 | + } |
| 104 | +} |
| 105 | + |
| 106 | +// TODO: use sync.Pool for Item/etc? |
| 107 | +// pool.Put() items back before sleep. |
| 108 | +// may also be able to cache mc's bufio's this way. |
| 109 | +func basicWorker(id int, doneChan chan<- int, updateChan <-chan *BasicLoader, l *BasicLoader) { |
| 110 | + // TODO: server selector. |
| 111 | + host := l.Servers[0] |
| 112 | + mc := mct.NewClient(host) |
| 113 | + bundles := l.RequestBundlesPerConn |
| 114 | + |
| 115 | + rs := pcgr.New(time.Now().UnixNano(), 0) |
| 116 | + var zipRS *rand.Zipf |
| 117 | + randR := rand.New(&rs) // main randomizer, so we can use the random interface. |
| 118 | + if l.UseZipf { |
| 119 | + zipRS = rand.NewZipf(randR, l.ZipfS, l.ZipfV, uint64(l.KeySpace)) |
| 120 | + if zipRS == nil { |
| 121 | + fmt.Printf("bad arguments to zipf: S: %f V: %f\n", l.ZipfS, l.ZipfV) |
| 122 | + return |
| 123 | + } |
| 124 | + } |
| 125 | + |
| 126 | + subRS := pcgr.New(1, 0) // randomizer is re-seeded for random strings. |
| 127 | + var res int |
| 128 | + // TODO: struct with id, res, err? |
| 129 | + defer func() { |
| 130 | + doneChan <- id |
| 131 | + fmt.Printf("worker result: %d\n", res) |
| 132 | + }() |
| 133 | + |
| 134 | + for bundles == -1 || bundles > 0 { |
| 135 | + bundles-- |
| 136 | + for i := l.RequestsPerSleep; i > 0; i-- { |
| 137 | + // generate keys |
| 138 | + // TODO: Allow min/max length for keys. |
| 139 | + // The random key needs to stick with the random length, or we end |
| 140 | + // up with KeySpace * (max-min) number of unique keys. |
| 141 | + // Need to pull the randomizer exactly once (then just modulus for |
| 142 | + // a poor-but-probably-fine random value), then build the random |
| 143 | + // string from the rest. |
| 144 | + // Could also re-seed it twice, pull once Intn for length, |
| 145 | + // re-seed, then again for key space. |
| 146 | + |
| 147 | + keyLen := l.KeyLength |
| 148 | + if l.UseZipf { |
| 149 | + subRS.Seed(int64(zipRS.Uint64())) |
| 150 | + } else { |
| 151 | + subRS.Seed(int64(randR.Intn(l.KeySpace))) |
| 152 | + } |
| 153 | + // TODO: might be nice to pass (by ref?) prefix in here to make |
| 154 | + // use of string.Builder. |
| 155 | + key := l.KeyPrefix + mct.RandString(&subRS, keyLen) |
| 156 | + // chance we issue a delete instead. |
| 157 | + if l.DeletePercent != 0 && randR.Intn(1000) < l.DeletePercent { |
| 158 | + _, err := mc.Delete(key) |
| 159 | + if err != nil { |
| 160 | + fmt.Println(err) |
| 161 | + res = -1 |
| 162 | + return |
| 163 | + } |
| 164 | + } else { |
| 165 | + // issue gets |
| 166 | + _, _, code, err := mc.Get(key) |
| 167 | + // validate responses |
| 168 | + if err != nil { |
| 169 | + fmt.Println(err) |
| 170 | + res = -1 |
| 171 | + return |
| 172 | + } |
| 173 | + // set missing values |
| 174 | + if code == mct.McMISS { |
| 175 | + // TODO: random sizing |
| 176 | + value := mct.RandBytes(&rs, int(l.ValueSize)) |
| 177 | + mc.Set(key, uint32(l.ClientFlags), uint32(l.KeyTTL), value) |
| 178 | + } |
| 179 | + } |
| 180 | + } |
| 181 | + select { |
| 182 | + case update := <-updateChan: |
| 183 | + // TODO: re-create client if server changed. |
| 184 | + l = update |
| 185 | + default: |
| 186 | + // nothing. just fast-check for updates. |
| 187 | + } |
| 188 | + time.Sleep(l.SleepPerBundle) |
| 189 | + } |
| 190 | +} |
0 commit comments