Sunday, August 30, 2009

Improved single queue SimPy code

In this blog entry I had a comparison of PDQ and SimPy for a single queue. The code that I wrote was pretty cheezy and based upon a very early example from Norm Matloff's Introduction to Discrete-Event Simulation and the SimPy Language.

After writing the SimPy code to solve for the three tier eBiz solution I wanted to go back and correct some of my initial code with the single queue.

Below is what I feel is better code than my original single queue solution.




#!/usr/bin/env python

from SimPy.Simulation import *
from random import Random, expovariate, uniform
import time

class G:
# Rnd = random.Random(time.mktime(time.localtime()))
Rnd = random.Random(12345)
MyQueue = Resource(1)
QMon = Monitor()
ServiceTime = 0.50
TotalCalls = 0L
TotalResidence = 0L
TotalWait = 0L
TotalService = 0L

class WorkLoad(Process):

def __init__(self):
Process.__init__(self)
self.StartUpTime = 0.0

def Run(self):
self.StartUpTime = now()
yield request, self, G.MyQueue
G.TotalWait += now() - self.StartUpTime
yield hold, self, G.ServiceTime
G.QMon.observe(len(G.MyQueue.waitQ));
G.TotalResidence += now() - self.StartUpTime
yield release, self, G.MyQueue
G.TotalCalls += 1

class Generator(Process):

def __init__(self, Lambda):
Process.__init__(self)
self.Lambda = Lambda

def execute(self):

while 1:
yield hold, self, G.Rnd.expovariate(self.Lambda)
W = WorkLoad()
activate(W, W.Run())

def main():

Lambda = float(sys.argv[1])
MaxSimTime = 10000.00

G.TotalCalls = 0L
G.TotalResidence = 0L
G.TotalWait = 0L
G.TotalService = 0L

initialize()

print >> sys.stderr, "MaxSimTime = ", MaxSimTime

g = Generator(Lambda)
activate(g, g.execute())

simulate(until=MaxSimTime)

print Lambda, ",", MaxSimTime, ",", G.TotalCalls, ",", G.TotalCalls/MaxSimTime, ",", G.TotalWait, ",", G.TotalResidence, ",", G.TotalResidence/G.TotalCalls, ",", G.TotalWait/G.TotalCalls, ",", (G.TotalResidence/G.TotalCalls) - (G.TotalWait/G.TotalCalls) , ",", ((G.ServiceTime * G.TotalCalls) / MaxSimTime) * 100 , "%",",",G.QMon.mean()

print >> sys.stderr, "Ideal Throughput : ", Lambda
print >> sys.stderr, "Simulated Seconds : ", MaxSimTime
print >> sys.stderr, "Number of calls : ", G.TotalCalls
print >> sys.stderr, "Throughput : ", G.TotalCalls/MaxSimTime
print >> sys.stderr, "Total Wait Time : ", G.TotalWait
print >> sys.stderr, "Total Residence Time : ", G.TotalResidence
print >> sys.stderr, "Mean Residence Time : ", G.TotalResidence/G.TotalCalls
print >> sys.stderr, "Mean Wait Time : ", G.TotalWait/G.TotalCalls
print >> sys.stderr, "Mean Service Time : ", (G.TotalResidence/G.TotalCalls) - (G.TotalWait/G.TotalCalls)
print >> sys.stderr, "Total Utilization : ", ((G.ServiceTime * G.TotalCalls) / MaxSimTime) * 100 , " %"
print >> sys.stderr, "Mean WaitQ : ", G.QMon.mean()

if __name__ == '__main__': main()


The results are the same as before, but now instead of creating a gajillion objects to activate only one "thread" is created that in turns makes the calls to the queue. Much faster than my original code and laid out better.

No comments: