Compare commits
No commits in common. "actorbintree" and "pubsub" have entirely different histories.
actorbintr
...
pubsub
@ -25,7 +25,7 @@ grade:
|
|||||||
tags:
|
tags:
|
||||||
- cs206
|
- cs206
|
||||||
image:
|
image:
|
||||||
name: smarter3/moocs:reactive-actorbintree-2020-04-15
|
name: smarter3/moocs:concpar-pubsub-2020-03-15
|
||||||
entrypoint: [""]
|
entrypoint: [""]
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
before_script:
|
before_script:
|
||||||
|
|||||||
27
build.sbt
27
build.sbt
@ -1,24 +1,11 @@
|
|||||||
course := "reactive"
|
course := "concpar"
|
||||||
assignment := "actorbintree"
|
assignment := "pubsub"
|
||||||
|
|
||||||
testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-a", "-v", "-s")
|
|
||||||
parallelExecution in Test := false
|
|
||||||
|
|
||||||
val akkaVersion = "2.6.0"
|
|
||||||
|
|
||||||
scalaVersion := "0.23.0-bin-20200211-5b006fb-NIGHTLY"
|
scalaVersion := "0.23.0-bin-20200211-5b006fb-NIGHTLY"
|
||||||
|
|
||||||
scalacOptions ++= Seq(
|
scalacOptions ++= Seq("-language:implicitConversions", "-deprecation")
|
||||||
"-feature",
|
|
||||||
"-deprecation",
|
|
||||||
"-encoding", "UTF-8",
|
|
||||||
"-unchecked",
|
|
||||||
"-language:implicitConversions"
|
|
||||||
)
|
|
||||||
|
|
||||||
libraryDependencies ++= Seq(
|
libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % Test
|
||||||
"com.typesafe.akka" %% "akka-actor" % akkaVersion,
|
|
||||||
"com.typesafe.akka" %% "akka-testkit" % akkaVersion % Test,
|
testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-a", "-v", "-s")
|
||||||
"com.novocode" % "junit-interface" % "0.11" % Test
|
testSuite := "pubsub.BoundedBufferSuite"
|
||||||
).map(_.withDottyCompat(scalaVersion.value))
|
|
||||||
testSuite := "actorbintree.BinaryTreeSuite"
|
|
||||||
|
|||||||
Binary file not shown.
@ -1,189 +0,0 @@
|
|||||||
/**
|
|
||||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
|
||||||
*/
|
|
||||||
package actorbintree
|
|
||||||
|
|
||||||
import akka.actor._
|
|
||||||
import scala.collection.immutable.Queue
|
|
||||||
|
|
||||||
object BinaryTreeSet {
|
|
||||||
|
|
||||||
trait Operation {
|
|
||||||
def requester: ActorRef
|
|
||||||
def id: Int
|
|
||||||
def elem: Int
|
|
||||||
}
|
|
||||||
|
|
||||||
trait OperationReply {
|
|
||||||
def id: Int
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Request with identifier `id` to insert an element `elem` into the tree.
|
|
||||||
* The actor at reference `requester` should be notified when this operation
|
|
||||||
* is completed.
|
|
||||||
*/
|
|
||||||
case class Insert(requester: ActorRef, id: Int, elem: Int) extends Operation
|
|
||||||
|
|
||||||
/** Request with identifier `id` to check whether an element `elem` is present
|
|
||||||
* in the tree. The actor at reference `requester` should be notified when
|
|
||||||
* this operation is completed.
|
|
||||||
*/
|
|
||||||
case class Contains(requester: ActorRef, id: Int, elem: Int) extends Operation
|
|
||||||
|
|
||||||
/** Request with identifier `id` to remove the element `elem` from the tree.
|
|
||||||
* The actor at reference `requester` should be notified when this operation
|
|
||||||
* is completed.
|
|
||||||
*/
|
|
||||||
case class Remove(requester: ActorRef, id: Int, elem: Int) extends Operation
|
|
||||||
|
|
||||||
/** Request to perform garbage collection */
|
|
||||||
case object GC
|
|
||||||
|
|
||||||
/** Holds the answer to the Contains request with identifier `id`.
|
|
||||||
* `result` is true if and only if the element is present in the tree.
|
|
||||||
*/
|
|
||||||
case class ContainsResult(id: Int, result: Boolean) extends OperationReply
|
|
||||||
|
|
||||||
/** Message to signal successful completion of an insert or remove operation. */
|
|
||||||
case class OperationFinished(id: Int) extends OperationReply
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class BinaryTreeSet extends Actor {
|
|
||||||
import BinaryTreeSet._
|
|
||||||
import BinaryTreeNode._
|
|
||||||
|
|
||||||
def createRoot: ActorRef = context.actorOf(BinaryTreeNode.props(0, initiallyRemoved = true))
|
|
||||||
|
|
||||||
var root = createRoot
|
|
||||||
|
|
||||||
// optional
|
|
||||||
var pendingQueue = Queue.empty[Operation]
|
|
||||||
|
|
||||||
// optional
|
|
||||||
def receive = normal
|
|
||||||
|
|
||||||
// optional
|
|
||||||
/** Accepts `Operation` and `GC` messages. */
|
|
||||||
val normal: Receive = {
|
|
||||||
case op:Operation => root ! op
|
|
||||||
case GC => {
|
|
||||||
val newRoot = createRoot;
|
|
||||||
root ! CopyTo(newRoot)
|
|
||||||
context.become(garbageCollecting(newRoot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// optional
|
|
||||||
/** Handles messages while garbage collection is performed.
|
|
||||||
* `newRoot` is the root of the new binary tree where we want to copy
|
|
||||||
* all non-removed elements into.
|
|
||||||
*/
|
|
||||||
def garbageCollecting(newRoot: ActorRef): Receive = {
|
|
||||||
case op:Operation => pendingQueue = pendingQueue.enqueue(op)
|
|
||||||
case CopyFinished =>
|
|
||||||
pendingQueue.foreach(newRoot ! _) //foreach preserves order of a queue (same as dequeueing)
|
|
||||||
root ! PoisonPill //Will also stop all of its children
|
|
||||||
pendingQueue = Queue.empty
|
|
||||||
root = newRoot;
|
|
||||||
context.become(normal)
|
|
||||||
//Ignore GC messages here
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
object BinaryTreeNode {
|
|
||||||
trait Position
|
|
||||||
|
|
||||||
case object Left extends Position
|
|
||||||
case object Right extends Position
|
|
||||||
|
|
||||||
case class CopyTo(treeNode: ActorRef)
|
|
||||||
case object CopyFinished
|
|
||||||
|
|
||||||
def props(elem: Int, initiallyRemoved: Boolean) = Props(classOf[BinaryTreeNode], elem, initiallyRemoved)
|
|
||||||
}
|
|
||||||
|
|
||||||
class BinaryTreeNode(val elem: Int, initiallyRemoved: Boolean) extends Actor {
|
|
||||||
import BinaryTreeNode._
|
|
||||||
import BinaryTreeSet._
|
|
||||||
|
|
||||||
var subtrees = Map[Position, ActorRef]()
|
|
||||||
var removed = initiallyRemoved
|
|
||||||
|
|
||||||
// optional
|
|
||||||
def receive = normal
|
|
||||||
|
|
||||||
def goDownTo(elem : Int) : Position = if(elem < this.elem) Left else Right
|
|
||||||
// optional
|
|
||||||
/** Handles `Operation` messages and `CopyTo` requests. */
|
|
||||||
val normal: Receive = {
|
|
||||||
case Insert (requester, id, elem) =>
|
|
||||||
if(elem == this.elem && !removed){
|
|
||||||
requester ! OperationFinished(id)
|
|
||||||
}else{
|
|
||||||
val nextPos = goDownTo(elem)
|
|
||||||
|
|
||||||
subtrees get nextPos match{
|
|
||||||
case Some(node) => node ! Insert(requester, id, elem)
|
|
||||||
case None => {
|
|
||||||
val newActorSubtree = (nextPos, context.actorOf(BinaryTreeNode.props(elem, false)))
|
|
||||||
subtrees = subtrees + newActorSubtree
|
|
||||||
requester ! OperationFinished(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case Contains(requester, id, elem) =>
|
|
||||||
if(elem == this.elem && !removed)
|
|
||||||
requester ! ContainsResult(id, true)
|
|
||||||
else{
|
|
||||||
//Need to search subtrees
|
|
||||||
subtrees get goDownTo(elem) match{
|
|
||||||
case Some(node) => node ! Contains(requester, id, elem)
|
|
||||||
case None => requester ! ContainsResult(id, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case Remove (requester, id, elem) =>
|
|
||||||
if(elem == this.elem && !removed){
|
|
||||||
removed = true
|
|
||||||
requester ! OperationFinished(id)
|
|
||||||
}else{
|
|
||||||
subtrees get goDownTo(elem) match{
|
|
||||||
case Some(node) => node ! Remove(requester, id, elem)
|
|
||||||
case None => requester ! OperationFinished(id) // (elem isn't in the tree)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case CopyTo(newRoot) =>
|
|
||||||
//We are already done, nothing to do
|
|
||||||
if(removed && subtrees.isEmpty) context.parent ! CopyFinished
|
|
||||||
else{
|
|
||||||
if(!removed) newRoot ! Insert(self, elem, elem)
|
|
||||||
subtrees.values foreach(_ ! CopyTo(newRoot)) //Copy subtrees elems
|
|
||||||
//val insertConfirmed = if(removed) true else false, hence we can simply pass removed
|
|
||||||
context.become(copying(subtrees.values.toSet, removed))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// optional
|
|
||||||
/** `expected` is the set of ActorRefs whose replies we are waiting for,
|
|
||||||
* `insertConfirmed` tracks whether the copy of this node to the new tree has been confirmed.
|
|
||||||
*/
|
|
||||||
def copying(expected: Set[ActorRef], insertConfirmed: Boolean): Receive = {
|
|
||||||
//To catch the insert of this node into the new tree beeing finished
|
|
||||||
case OperationFinished(_) => {
|
|
||||||
if(expected.isEmpty) context.parent ! CopyFinished
|
|
||||||
else context.become(copying(expected, true))
|
|
||||||
}
|
|
||||||
|
|
||||||
case CopyFinished => {
|
|
||||||
val newExp = expected-sender
|
|
||||||
if(insertConfirmed && newExp.isEmpty){
|
|
||||||
context.parent ! CopyFinished
|
|
||||||
}else{
|
|
||||||
context.become(copying(newExp, insertConfirmed))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
34
src/main/scala/pubsub/Client.scala
Normal file
34
src/main/scala/pubsub/Client.scala
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package pubsub
|
||||||
|
|
||||||
|
import java.net.Socket
|
||||||
|
import java.nio.charset.Charset
|
||||||
|
|
||||||
|
case class Client(socket: Socket, id: Int)
|
||||||
|
(implicit charset: Charset = Charset.forName("UTF-8")) {
|
||||||
|
private var name_ = "client_" + id
|
||||||
|
val outStream = socket.getOutputStream()
|
||||||
|
|
||||||
|
def name = name_
|
||||||
|
def name_=(newName: String) = name_ = newName
|
||||||
|
|
||||||
|
def isConnected: Boolean = socket.isConnected()
|
||||||
|
|
||||||
|
def close(): Unit = socket.close()
|
||||||
|
|
||||||
|
def send(message: String): Unit = {
|
||||||
|
val payload = message.getBytes(charset)
|
||||||
|
outStream.write(payload)
|
||||||
|
outStream.flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
def sendAck(ackType: String, message: String): Unit =
|
||||||
|
send(s"${ackType}_ack $message\n")
|
||||||
|
|
||||||
|
def sayHello(): Unit = sendAck("connection", name_)
|
||||||
|
def sayGoodbye(): Unit = send(s"Bye Bye dear $name_!\n")
|
||||||
|
|
||||||
|
def invalidPreviousCommand(): Unit = send("! previous command was invalid\n")
|
||||||
|
|
||||||
|
def sendMessage(sender: String, topic: String, message: String): Unit =
|
||||||
|
send(s"$sender@$topic $message\n")
|
||||||
|
}
|
||||||
51
src/main/scala/pubsub/Server.scala
Normal file
51
src/main/scala/pubsub/Server.scala
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
package pubsub
|
||||||
|
|
||||||
|
import java.net.ServerSocket
|
||||||
|
import java.net.Socket
|
||||||
|
import java.io.BufferedReader
|
||||||
|
import java.io.InputStreamReader
|
||||||
|
import java.net.URL
|
||||||
|
import java.util.concurrent.Executors
|
||||||
|
|
||||||
|
import scala.concurrent.JavaConversions._
|
||||||
|
import scala.concurrent.{ExecutionContext, Future}
|
||||||
|
import scala.concurrent.ExecutionContext.Implicits.global
|
||||||
|
import pubsub.collection._
|
||||||
|
import pubsub.command._
|
||||||
|
import pubsub.network.TCPReader
|
||||||
|
|
||||||
|
object Server extends App {
|
||||||
|
val port = 7676
|
||||||
|
val maxWorkers = 12
|
||||||
|
val bufferSize = 20
|
||||||
|
val socket = new ServerSocket(port)
|
||||||
|
try {
|
||||||
|
val whatismyip = new URL("http://checkip.amazonaws.com")
|
||||||
|
val in = new BufferedReader(new InputStreamReader(whatismyip.openStream()));
|
||||||
|
val serverIP = in.readLine()
|
||||||
|
println(s"Connect to $serverIP (or `localhost`), port $port with `telnet` to join this server")
|
||||||
|
} catch {
|
||||||
|
case e: Exception =>
|
||||||
|
println("There is a problem with your internet connection, you can only access it via localhost")
|
||||||
|
}
|
||||||
|
|
||||||
|
val buffer = new BoundedBuffer[Command](20)
|
||||||
|
val commandHandlers = for{
|
||||||
|
i <- 0 until maxWorkers
|
||||||
|
} yield {
|
||||||
|
Future {
|
||||||
|
new CommandHandler(buffer).handle()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val threadPool = Executors.newFixedThreadPool(maxWorkers)
|
||||||
|
|
||||||
|
var clientId = 0
|
||||||
|
while(true) {
|
||||||
|
val client = socket.accept();
|
||||||
|
val cid = clientId
|
||||||
|
clientId += 1
|
||||||
|
Future{
|
||||||
|
new TCPReader(clientId, client, buffer).read()
|
||||||
|
}(ExecutionContext.fromExecutorService(threadPool))
|
||||||
|
}
|
||||||
|
}
|
||||||
34
src/main/scala/pubsub/collection/AbstractBoundedBuffer.scala
Normal file
34
src/main/scala/pubsub/collection/AbstractBoundedBuffer.scala
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package pubsub.collection
|
||||||
|
|
||||||
|
import instrumentation.Monitor
|
||||||
|
|
||||||
|
trait InternalBuffer[T] {
|
||||||
|
def update(index: Int, elem: T): Unit
|
||||||
|
def apply(index: Int): T
|
||||||
|
def delete(index: Int): Unit
|
||||||
|
val size: Int
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
abstract class AbstractBoundedBuffer[T](bufferSize: Int) extends Monitor {
|
||||||
|
require(bufferSize > 0)
|
||||||
|
|
||||||
|
def put(element: T): Unit
|
||||||
|
def take(): T
|
||||||
|
|
||||||
|
val buffer: InternalBuffer[T] = new InternalBuffer[T] {
|
||||||
|
private val buffer: Array[Option[T]] = new Array(bufferSize)
|
||||||
|
def update(index: Int, elem: T): Unit = buffer(index) = Some(elem)
|
||||||
|
def apply(index: Int): T = buffer(index).get
|
||||||
|
def delete(index: Int): Unit = buffer(index) = None
|
||||||
|
val size = bufferSize
|
||||||
|
}
|
||||||
|
|
||||||
|
def head: Int = _head
|
||||||
|
def head_=(e: Int): Unit = _head = e
|
||||||
|
def count: Int = _count
|
||||||
|
def count_=(e: Int): Unit = _count = e
|
||||||
|
|
||||||
|
private var _head = 0;
|
||||||
|
private var _count = 0;
|
||||||
|
}
|
||||||
49
src/main/scala/pubsub/collection/BoundedBuffer.scala
Normal file
49
src/main/scala/pubsub/collection/BoundedBuffer.scala
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package pubsub.collection
|
||||||
|
|
||||||
|
class BoundedBuffer[T](size: Int) extends AbstractBoundedBuffer[T](size) {
|
||||||
|
|
||||||
|
// You have at your disposition the following two variables:
|
||||||
|
// - count : Int
|
||||||
|
// - head : Int
|
||||||
|
// In addition, you have access to an array-like internal buffer:
|
||||||
|
// - buffer
|
||||||
|
// You can access elements of this buffer using:
|
||||||
|
// - buffer(i)
|
||||||
|
// Similarly, you can set elements using:
|
||||||
|
// - buffer(i) = e
|
||||||
|
//
|
||||||
|
// You do not need to create those variables yourself!
|
||||||
|
// They are inherited from the AbstractBoundedBuffer class.
|
||||||
|
|
||||||
|
override def put(e: T): Unit = synchronized{
|
||||||
|
while(isFull) wait()
|
||||||
|
buffer(head) = e
|
||||||
|
head = nextHeadIndex;
|
||||||
|
count = count + 1;
|
||||||
|
notifyAll()
|
||||||
|
}
|
||||||
|
|
||||||
|
override def take(): T = synchronized{
|
||||||
|
while(isEmpty) wait()
|
||||||
|
|
||||||
|
val e = buffer(tailIndex);
|
||||||
|
buffer.delete(tailIndex);
|
||||||
|
count = count - 1;
|
||||||
|
notifyAll()
|
||||||
|
e
|
||||||
|
}
|
||||||
|
|
||||||
|
def isEmpty : Boolean = count == 0;
|
||||||
|
def isFull : Boolean = count == size
|
||||||
|
def tailIndex : Int = {
|
||||||
|
val diff = head - count
|
||||||
|
if(diff >= 0) diff
|
||||||
|
else diff + size
|
||||||
|
}
|
||||||
|
def nextHeadIndex : Int = (head + 1) % size
|
||||||
|
|
||||||
|
// You may want to add methods to:
|
||||||
|
// - check whether the buffer is empty
|
||||||
|
// - check whether the buffer is full
|
||||||
|
// - get the index of tail
|
||||||
|
}
|
||||||
65
src/main/scala/pubsub/collection/ConcurrentMultiMap.scala
Normal file
65
src/main/scala/pubsub/collection/ConcurrentMultiMap.scala
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package pubsub.collection
|
||||||
|
|
||||||
|
import scala.collection.mutable
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock
|
||||||
|
|
||||||
|
class ConcurrentMultiMap[K,V] {
|
||||||
|
|
||||||
|
private val lockRW = new ReentrantReadWriteLock()
|
||||||
|
val map = mutable.HashMap[K, Set[V]]()
|
||||||
|
|
||||||
|
def lock(): Unit = lockRW.writeLock().lock()
|
||||||
|
|
||||||
|
def unlock(): Unit = lockRW.writeLock().unlock()
|
||||||
|
|
||||||
|
def add(key: K, value: V): Unit = {
|
||||||
|
try {
|
||||||
|
lockRW.writeLock().lock()
|
||||||
|
map.get(key) match {
|
||||||
|
case Some(set) =>
|
||||||
|
if (! set.contains(value)) {
|
||||||
|
map += ((key, set + value))
|
||||||
|
}
|
||||||
|
case None =>
|
||||||
|
map += ((key, Set(value)))
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
lockRW.writeLock().unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def get(key: K): Option[Set[V]] = {
|
||||||
|
try {
|
||||||
|
lockRW.readLock().lock()
|
||||||
|
val v = map.get(key)
|
||||||
|
v
|
||||||
|
} finally {
|
||||||
|
lockRW.readLock().unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def remove(key: K, value: V): Unit = {
|
||||||
|
try {
|
||||||
|
lockRW.writeLock().lock()
|
||||||
|
map.get(key) match {
|
||||||
|
case Some(set) =>
|
||||||
|
if (set.contains(value)) {
|
||||||
|
map += ((key, set - value))
|
||||||
|
}
|
||||||
|
case None =>
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
lockRW.writeLock().unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def removeValueFromAll(value: V): Unit = {
|
||||||
|
try {
|
||||||
|
lockRW.writeLock().lock()
|
||||||
|
map.keys.foreach(remove(_, value))
|
||||||
|
} finally {
|
||||||
|
lockRW.writeLock().unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
43
src/main/scala/pubsub/command/CommandHandler.scala
Normal file
43
src/main/scala/pubsub/command/CommandHandler.scala
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package pubsub.command
|
||||||
|
|
||||||
|
import pubsub.Client
|
||||||
|
import pubsub.collection._
|
||||||
|
|
||||||
|
class CommandHandler(buffer: BoundedBuffer[Command]) {
|
||||||
|
import CommandHandler._
|
||||||
|
|
||||||
|
def handle(): Unit = {
|
||||||
|
val command = buffer.take()
|
||||||
|
|
||||||
|
command match {
|
||||||
|
case Subscribe(topic, client) =>
|
||||||
|
multiMap.add(topic, client)
|
||||||
|
client.sendAck("subscribe", topic)
|
||||||
|
|
||||||
|
case Unsubscribe(topic, client) =>
|
||||||
|
multiMap.remove(topic, client)
|
||||||
|
client.sendAck("unsubscribe", topic)
|
||||||
|
|
||||||
|
case Publish(topic, message, sender) =>
|
||||||
|
for {
|
||||||
|
subscribers <- multiMap.get(topic)
|
||||||
|
client <- subscribers
|
||||||
|
} client.sendMessage(sender.name, topic, message)
|
||||||
|
|
||||||
|
case EndOfClient(client) =>
|
||||||
|
multiMap.removeValueFromAll(client)
|
||||||
|
|
||||||
|
case Rename(newName,client) =>
|
||||||
|
client.name = newName
|
||||||
|
client.sendAck("rename", newName)
|
||||||
|
|
||||||
|
case _ =>
|
||||||
|
// nothing should happen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
object CommandHandler {
|
||||||
|
val multiMap = new ConcurrentMultiMap[String, Client]()
|
||||||
|
}
|
||||||
45
src/main/scala/pubsub/command/CommandReader.scala
Normal file
45
src/main/scala/pubsub/command/CommandReader.scala
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package pubsub.command
|
||||||
|
|
||||||
|
import java.io.BufferedReader
|
||||||
|
import java.io.InputStreamReader
|
||||||
|
import java.io.InputStream
|
||||||
|
|
||||||
|
import pubsub.Client
|
||||||
|
|
||||||
|
class CommandReader(inStream: InputStream, client: Client) {
|
||||||
|
val inputBuffer = new BufferedReader(new InputStreamReader(inStream))
|
||||||
|
|
||||||
|
def fetchCommand(): Command = {
|
||||||
|
val line = inputBuffer.readLine()
|
||||||
|
|
||||||
|
if (line == null || line.startsWith("leave")) {
|
||||||
|
EndOfClient(client)
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
val quoteIndex = line.indexOf('\'')
|
||||||
|
val hasPayload = quoteIndex != -1
|
||||||
|
val parts =
|
||||||
|
if(!hasPayload) {
|
||||||
|
line.split(" ").toList
|
||||||
|
} else {
|
||||||
|
val (command, payload) = line.splitAt(quoteIndex)
|
||||||
|
command.split(" ").toList :+ payload
|
||||||
|
}
|
||||||
|
|
||||||
|
parts match {
|
||||||
|
case "subscribe" :: topic :: Nil => Subscribe(topic, client)
|
||||||
|
case "unsubscribe" :: topic :: Nil => Unsubscribe(topic, client)
|
||||||
|
case "rename" :: newName :: Nil => Rename(newName, client)
|
||||||
|
|
||||||
|
case "publish" :: topic :: msg :: Nil if hasPayload && msg != "\'" =>
|
||||||
|
var message = msg
|
||||||
|
while(!message.endsWith("\'")) {
|
||||||
|
message += "\n" + inputBuffer.readLine()
|
||||||
|
}
|
||||||
|
Publish(topic, message, client)
|
||||||
|
|
||||||
|
case _ => MalformedCommand(client)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
33
src/main/scala/pubsub/command/CommandType.scala
Normal file
33
src/main/scala/pubsub/command/CommandType.scala
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package pubsub.command
|
||||||
|
|
||||||
|
import pubsub.Client
|
||||||
|
|
||||||
|
sealed trait Topic {
|
||||||
|
val topic: String
|
||||||
|
}
|
||||||
|
|
||||||
|
sealed abstract class Command(from: Client) {
|
||||||
|
def toString(): String
|
||||||
|
}
|
||||||
|
case class EndOfClient(from: Client) extends Command(from) {
|
||||||
|
override def toString(): String = s"${from.name}: End of client"
|
||||||
|
}
|
||||||
|
case class MalformedCommand(from: Client) extends Command(from) {
|
||||||
|
override def toString(): String = s"${from.name}: Invalid Command"
|
||||||
|
}
|
||||||
|
case class Subscribe(topic: String,
|
||||||
|
from: Client) extends Command(from) with Topic {
|
||||||
|
override def toString(): String = s"${from.name}: Subscribe @ $topic"
|
||||||
|
}
|
||||||
|
case class Unsubscribe(topic: String,
|
||||||
|
from: Client) extends Command(from) with Topic {
|
||||||
|
override def toString(): String = s"${from.name}: Unsubscribe @ $topic"
|
||||||
|
}
|
||||||
|
case class Publish(topic: String, message: String,
|
||||||
|
from: Client) extends Command(from) with Topic {
|
||||||
|
override def toString(): String = s"${from.name}: Publish @ $topic -> $message"
|
||||||
|
}
|
||||||
|
|
||||||
|
case class Rename(newName: String, from: Client) extends Command(from) {
|
||||||
|
override def toString(): String = s"${from.name}: Renamed to $newName"
|
||||||
|
}
|
||||||
72
src/main/scala/pubsub/instrumentation/MockedMonitor.scala
Normal file
72
src/main/scala/pubsub/instrumentation/MockedMonitor.scala
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package instrumentation
|
||||||
|
|
||||||
|
trait MockedMonitor extends Monitor {
|
||||||
|
def scheduler: Scheduler
|
||||||
|
|
||||||
|
// Can be overriden.
|
||||||
|
override def waitDefault() = {
|
||||||
|
scheduler.log("wait")
|
||||||
|
scheduler updateThreadState Wait(this, scheduler.threadLocks.tail)
|
||||||
|
}
|
||||||
|
override def synchronizedDefault[T](toExecute: =>T): T = {
|
||||||
|
scheduler.log("synchronized check")
|
||||||
|
val prevLocks = scheduler.threadLocks
|
||||||
|
scheduler updateThreadState Sync(this, prevLocks) // If this belongs to prevLocks, should just continue.
|
||||||
|
scheduler.log("synchronized -> enter")
|
||||||
|
try {
|
||||||
|
toExecute
|
||||||
|
} finally {
|
||||||
|
scheduler updateThreadState Running(prevLocks)
|
||||||
|
scheduler.log("synchronized -> out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
override def notifyDefault() = {
|
||||||
|
scheduler mapOtherStates {
|
||||||
|
state => state match {
|
||||||
|
case Wait(lockToAquire, locks) if lockToAquire == this => SyncUnique(this, state.locks)
|
||||||
|
case e => e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scheduler.log("notify")
|
||||||
|
}
|
||||||
|
override def notifyAllDefault() = {
|
||||||
|
scheduler mapOtherStates {
|
||||||
|
state => state match {
|
||||||
|
case Wait(lockToAquire, locks) if lockToAquire == this => Sync(this, state.locks)
|
||||||
|
case SyncUnique(lockToAquire, locks) if lockToAquire == this => Sync(this, state.locks)
|
||||||
|
case e => e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scheduler.log("notifyAll")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trait LockFreeMonitor extends Monitor {
|
||||||
|
override def waitDefault() = {
|
||||||
|
throw new Exception("Please use lock-free structures and do not use wait()")
|
||||||
|
}
|
||||||
|
override def synchronizedDefault[T](toExecute: =>T): T = {
|
||||||
|
throw new Exception("Please use lock-free structures and do not use synchronized()")
|
||||||
|
}
|
||||||
|
override def notifyDefault() = {
|
||||||
|
throw new Exception("Please use lock-free structures and do not use notify()")
|
||||||
|
}
|
||||||
|
override def notifyAllDefault() = {
|
||||||
|
throw new Exception("Please use lock-free structures and do not use notifyAll()")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
abstract class ThreadState {
|
||||||
|
def locks: Seq[AnyRef]
|
||||||
|
}
|
||||||
|
trait CanContinueIfAcquiresLock extends ThreadState {
|
||||||
|
def lockToAquire: AnyRef
|
||||||
|
}
|
||||||
|
case object Start extends ThreadState { def locks: Seq[AnyRef] = Seq.empty }
|
||||||
|
case object End extends ThreadState { def locks: Seq[AnyRef] = Seq.empty }
|
||||||
|
case class Wait(lockToAquire: AnyRef, locks: Seq[AnyRef]) extends ThreadState
|
||||||
|
case class SyncUnique(lockToAquire: AnyRef, locks: Seq[AnyRef]) extends ThreadState with CanContinueIfAcquiresLock
|
||||||
|
case class Sync(lockToAquire: AnyRef, locks: Seq[AnyRef]) extends ThreadState with CanContinueIfAcquiresLock
|
||||||
|
case class Running(locks: Seq[AnyRef]) extends ThreadState
|
||||||
|
case class VariableReadWrite(locks: Seq[AnyRef]) extends ThreadState
|
||||||
23
src/main/scala/pubsub/instrumentation/Monitor.scala
Normal file
23
src/main/scala/pubsub/instrumentation/Monitor.scala
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package instrumentation
|
||||||
|
|
||||||
|
class Dummy
|
||||||
|
|
||||||
|
trait Monitor {
|
||||||
|
implicit val dummy: Dummy = new Dummy
|
||||||
|
|
||||||
|
def wait()(implicit i: Dummy) = waitDefault()
|
||||||
|
|
||||||
|
def synchronized[T](e: => T)(implicit i: Dummy) = synchronizedDefault(e)
|
||||||
|
|
||||||
|
def notify()(implicit i: Dummy) = notifyDefault()
|
||||||
|
|
||||||
|
def notifyAll()(implicit i: Dummy) = notifyAllDefault()
|
||||||
|
|
||||||
|
private val lock = new AnyRef
|
||||||
|
|
||||||
|
// Can be overriden.
|
||||||
|
def waitDefault(): Unit = lock.wait()
|
||||||
|
def synchronizedDefault[T](toExecute: =>T): T = lock.synchronized(toExecute)
|
||||||
|
def notifyDefault(): Unit = lock.notify()
|
||||||
|
def notifyAllDefault(): Unit = lock.notifyAll()
|
||||||
|
}
|
||||||
@ -0,0 +1,63 @@
|
|||||||
|
package instrumentation
|
||||||
|
|
||||||
|
import java.util.concurrent._;
|
||||||
|
import scala.concurrent.duration._
|
||||||
|
import scala.collection.mutable._
|
||||||
|
import Stats._
|
||||||
|
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger
|
||||||
|
|
||||||
|
import pubsub.collection._
|
||||||
|
|
||||||
|
class SchedulableInternalBuffer[T](val size: Int, scheduler: Scheduler) extends InternalBuffer[T] {
|
||||||
|
private val buffer = new Array[Option[T]](size)
|
||||||
|
private val threadBuffer = new Array[Option[Int]](size) // Who last wrote in the array.
|
||||||
|
|
||||||
|
def update(index: Int, elem: T): Unit = {
|
||||||
|
scheduler.exec {
|
||||||
|
buffer(index) = Some(elem)
|
||||||
|
threadBuffer(index) = Some(scheduler.threadId)
|
||||||
|
}(s"Write buffer($index) = $elem")
|
||||||
|
}
|
||||||
|
|
||||||
|
def apply(index: Int): T = scheduler.exec {
|
||||||
|
buffer(index).fold {
|
||||||
|
threadBuffer(index).fold {
|
||||||
|
throw new Exception(s"buffer($index) was never set ! ")
|
||||||
|
} { tid =>
|
||||||
|
throw new Exception(s"buffer($index) was deleted by thread $tid ! ")
|
||||||
|
}
|
||||||
|
}(identity)
|
||||||
|
}(s"Read buffer($index)")
|
||||||
|
|
||||||
|
def delete(index: Int): Unit = {
|
||||||
|
scheduler.exec {
|
||||||
|
buffer(index) = None
|
||||||
|
threadBuffer(index) = Some(scheduler.threadId)
|
||||||
|
}(s"Delete buffer($index)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trait MockedInternals[T] { self: SchedulableBoundedBuffer[T] =>
|
||||||
|
override val buffer = new SchedulableInternalBuffer[T](self.size, self.scheduler)
|
||||||
|
|
||||||
|
var h: Int = 0
|
||||||
|
var c: Int = 0
|
||||||
|
|
||||||
|
override def head_=(i: Int) = scheduler.exec {
|
||||||
|
h = i
|
||||||
|
}(s"Write head = $i")
|
||||||
|
override def head: Int = scheduler.exec { h }(s"Read head -> $h")
|
||||||
|
|
||||||
|
override def count_=(i: Int) = scheduler.exec {
|
||||||
|
c = i
|
||||||
|
}(s"Write count = $i")
|
||||||
|
|
||||||
|
override def count: Int = scheduler.exec { c }(s"Read count -> $c")
|
||||||
|
}
|
||||||
|
|
||||||
|
class SchedulableBoundedBuffer[T](val size: Int, val scheduler: Scheduler)
|
||||||
|
extends BoundedBuffer[T](size) with MockedMonitor with MockedInternals[T] {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
305
src/main/scala/pubsub/instrumentation/Scheduler.scala
Normal file
305
src/main/scala/pubsub/instrumentation/Scheduler.scala
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
package instrumentation
|
||||||
|
|
||||||
|
import java.util.concurrent._;
|
||||||
|
import scala.concurrent.duration._
|
||||||
|
import scala.collection.mutable._
|
||||||
|
import Stats._
|
||||||
|
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger
|
||||||
|
|
||||||
|
sealed abstract class Result
|
||||||
|
case class RetVal(rets: List[Any]) extends Result
|
||||||
|
case class Except(msg: String, stackTrace: Array[StackTraceElement]) extends Result
|
||||||
|
case class Timeout(msg: String) extends Result
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A class that maintains schedule and a set of thread ids.
|
||||||
|
* The schedules are advanced after an operation of a SchedulableBuffer is performed.
|
||||||
|
* Note: the real schedule that is executed may deviate from the input schedule
|
||||||
|
* due to the adjustments that had to be made for locks
|
||||||
|
*/
|
||||||
|
class Scheduler(sched: List[Int]) {
|
||||||
|
val maxOps = 500 // a limit on the maximum number of operations the code is allowed to perform
|
||||||
|
|
||||||
|
private var schedule = sched
|
||||||
|
private var numThreads = 0
|
||||||
|
private val realToFakeThreadId = Map[Long, Int]()
|
||||||
|
private val opLog = ListBuffer[String]() // a mutable list (used for efficient concat)
|
||||||
|
private val threadStates = Map[Int, ThreadState]()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs a set of operations in parallel as per the schedule.
|
||||||
|
* Each operation may consist of many primitive operations like reads or writes
|
||||||
|
* to shared data structure each of which should be executed using the function `exec`.
|
||||||
|
* @timeout in milliseconds
|
||||||
|
* @return true - all threads completed on time, false -some tests timed out.
|
||||||
|
*/
|
||||||
|
def runInParallel(timeout: Long, ops: List[() => Any]): Result = {
|
||||||
|
numThreads = ops.length
|
||||||
|
val threadRes = Array.fill(numThreads) { None: Any }
|
||||||
|
var exception: Option[Except] = None
|
||||||
|
val syncObject = new Object()
|
||||||
|
var completed = new AtomicInteger(0)
|
||||||
|
// create threads
|
||||||
|
val threads = ops.zipWithIndex.map {
|
||||||
|
case (op, i) =>
|
||||||
|
new Thread(new Runnable() {
|
||||||
|
def run(): Unit = {
|
||||||
|
val fakeId = i + 1
|
||||||
|
setThreadId(fakeId)
|
||||||
|
try {
|
||||||
|
updateThreadState(Start)
|
||||||
|
val res = op()
|
||||||
|
updateThreadState(End)
|
||||||
|
threadRes(i) = res
|
||||||
|
// notify the master thread if all threads have completed
|
||||||
|
if (completed.incrementAndGet() == ops.length) {
|
||||||
|
syncObject.synchronized { syncObject.notifyAll() }
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
case e: Throwable if exception != None => // do nothing here and silently fail
|
||||||
|
case e: Throwable =>
|
||||||
|
log(s"throw ${e.toString}")
|
||||||
|
exception = Some(Except(s"Thread $fakeId crashed on the following schedule: \n" + opLog.mkString("\n"),
|
||||||
|
e.getStackTrace))
|
||||||
|
syncObject.synchronized { syncObject.notifyAll() }
|
||||||
|
//println(s"$fakeId: ${e.toString}")
|
||||||
|
//Runtime.getRuntime().halt(0) //exit the JVM and all running threads (no other way to kill other threads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// start all threads
|
||||||
|
threads.foreach(_.start())
|
||||||
|
// wait for all threads to complete, or for an exception to be thrown, or for the time out to expire
|
||||||
|
var remTime = timeout
|
||||||
|
syncObject.synchronized {
|
||||||
|
|
||||||
|
timed { if(completed.get() != ops.length) syncObject.wait(timeout) } { time => remTime -= time }
|
||||||
|
}
|
||||||
|
if (exception.isDefined) {
|
||||||
|
exception.get
|
||||||
|
} else if (remTime <= 1) { // timeout ? using 1 instead of zero to allow for some errors
|
||||||
|
Timeout(opLog.mkString("\n"))
|
||||||
|
} else {
|
||||||
|
// every thing executed normally
|
||||||
|
RetVal(threadRes.toList)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Updates the state of the current thread
|
||||||
|
def updateThreadState(state: ThreadState): Unit = {
|
||||||
|
val tid = threadId
|
||||||
|
synchronized {
|
||||||
|
threadStates(tid) = state
|
||||||
|
}
|
||||||
|
state match {
|
||||||
|
case Sync(lockToAquire, locks) =>
|
||||||
|
if (locks.indexOf(lockToAquire) < 0) waitForTurn else {
|
||||||
|
// Re-aqcuiring the same lock
|
||||||
|
updateThreadState(Running(lockToAquire +: locks))
|
||||||
|
}
|
||||||
|
case Start => waitStart()
|
||||||
|
case End => removeFromSchedule(tid)
|
||||||
|
case Running(_) =>
|
||||||
|
case _ => waitForTurn // Wait, SyncUnique, VariableReadWrite
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def waitStart(): Unit = {
|
||||||
|
//while (threadStates.size < numThreads) {
|
||||||
|
//Thread.sleep(1)
|
||||||
|
//}
|
||||||
|
synchronized {
|
||||||
|
if (threadStates.size < numThreads) {
|
||||||
|
wait()
|
||||||
|
} else {
|
||||||
|
notifyAll()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def threadLocks = {
|
||||||
|
synchronized {
|
||||||
|
threadStates(threadId).locks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def threadState = {
|
||||||
|
synchronized {
|
||||||
|
threadStates(threadId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def mapOtherStates(f: ThreadState => ThreadState) = {
|
||||||
|
val exception = threadId
|
||||||
|
synchronized {
|
||||||
|
for (k <- threadStates.keys if k != exception) {
|
||||||
|
threadStates(k) = f(threadStates(k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def log(str: String) = {
|
||||||
|
if((realToFakeThreadId contains Thread.currentThread().getId())) {
|
||||||
|
val space = (" " * ((threadId - 1) * 2))
|
||||||
|
val s = space + threadId + ":" + "\n".r.replaceAllIn(str, "\n" + space + " ")
|
||||||
|
opLog += s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes a read or write operation to a global data structure as per the given schedule
|
||||||
|
* @param msg a message corresponding to the operation that will be logged
|
||||||
|
*/
|
||||||
|
def exec[T](primop: => T)(msg: => String, postMsg: => Option[T => String] = None): T = {
|
||||||
|
if(! (realToFakeThreadId contains Thread.currentThread().getId())) {
|
||||||
|
primop
|
||||||
|
} else {
|
||||||
|
updateThreadState(VariableReadWrite(threadLocks))
|
||||||
|
val m = msg
|
||||||
|
if(m != "") log(m)
|
||||||
|
if (opLog.size > maxOps)
|
||||||
|
throw new Exception(s"Total number of reads/writes performed by threads exceed $maxOps. A possible deadlock!")
|
||||||
|
val res = primop
|
||||||
|
postMsg match {
|
||||||
|
case Some(m) => log(m(res))
|
||||||
|
case None =>
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private def setThreadId(fakeId: Int) = synchronized {
|
||||||
|
realToFakeThreadId(Thread.currentThread.getId) = fakeId
|
||||||
|
}
|
||||||
|
|
||||||
|
def threadId =
|
||||||
|
try {
|
||||||
|
realToFakeThreadId(Thread.currentThread().getId())
|
||||||
|
} catch {
|
||||||
|
case e: NoSuchElementException =>
|
||||||
|
throw new Exception("You are accessing shared variables in the constructor. This is not allowed. The variables are already initialized!")
|
||||||
|
}
|
||||||
|
|
||||||
|
private def isTurn(tid: Int) = synchronized {
|
||||||
|
(!schedule.isEmpty && schedule.head != tid)
|
||||||
|
}
|
||||||
|
|
||||||
|
def canProceed(): Boolean = {
|
||||||
|
val tid = threadId
|
||||||
|
canContinue match {
|
||||||
|
case Some((i, state)) if i == tid =>
|
||||||
|
//println(s"$tid: Runs ! Was in state $state")
|
||||||
|
canContinue = None
|
||||||
|
state match {
|
||||||
|
case Sync(lockToAquire, locks) => updateThreadState(Running(lockToAquire +: locks))
|
||||||
|
case SyncUnique(lockToAquire, locks) =>
|
||||||
|
mapOtherStates {
|
||||||
|
_ match {
|
||||||
|
case SyncUnique(lockToAquire2, locks2) if lockToAquire2 == lockToAquire => Wait(lockToAquire2, locks2)
|
||||||
|
case e => e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
updateThreadState(Running(lockToAquire +: locks))
|
||||||
|
case VariableReadWrite(locks) => updateThreadState(Running(locks))
|
||||||
|
}
|
||||||
|
true
|
||||||
|
case Some((i, state)) =>
|
||||||
|
//println(s"$tid: not my turn but $i !")
|
||||||
|
false
|
||||||
|
case None =>
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var threadPreference = 0 // In the case the schedule is over, which thread should have the preference to execute.
|
||||||
|
|
||||||
|
/** returns true if the thread can continue to execute, and false otherwise */
|
||||||
|
def decide(): Option[(Int, ThreadState)] = {
|
||||||
|
if (!threadStates.isEmpty) { // The last thread who enters the decision loop takes the decision.
|
||||||
|
//println(s"$threadId: I'm taking a decision")
|
||||||
|
if (threadStates.values.forall { case e: Wait => true case _ => false }) {
|
||||||
|
val waiting = threadStates.keys.map(_.toString).mkString(", ")
|
||||||
|
val s = if (threadStates.size > 1) "s" else ""
|
||||||
|
val are = if (threadStates.size > 1) "are" else "is"
|
||||||
|
throw new Exception(s"Deadlock: Thread$s $waiting $are waiting but all others have ended and cannot notify them.")
|
||||||
|
} else {
|
||||||
|
// Threads can be in Wait, Sync, SyncUnique, and VariableReadWrite mode.
|
||||||
|
// Let's determine which ones can continue.
|
||||||
|
val notFree = threadStates.collect { case (id, state) => state.locks }.flatten.toSet
|
||||||
|
val threadsNotBlocked = threadStates.toSeq.filter {
|
||||||
|
case (id, v: VariableReadWrite) => true
|
||||||
|
case (id, v: CanContinueIfAcquiresLock) => !notFree(v.lockToAquire) || (v.locks contains v.lockToAquire)
|
||||||
|
case _ => false
|
||||||
|
}
|
||||||
|
if (threadsNotBlocked.isEmpty) {
|
||||||
|
val waiting = threadStates.keys.map(_.toString).mkString(", ")
|
||||||
|
val s = if (threadStates.size > 1) "s" else ""
|
||||||
|
val are = if (threadStates.size > 1) "are" else "is"
|
||||||
|
val whoHasLock = threadStates.toSeq.flatMap { case (id, state) => state.locks.map(lock => (lock, id)) }.toMap
|
||||||
|
val reason = threadStates.collect {
|
||||||
|
case (id, state: CanContinueIfAcquiresLock) if !notFree(state.lockToAquire) =>
|
||||||
|
s"Thread $id is waiting on lock ${state.lockToAquire} held by thread ${whoHasLock(state.lockToAquire)}"
|
||||||
|
}.mkString("\n")
|
||||||
|
throw new Exception(s"Deadlock: Thread$s $waiting are interlocked. Indeed:\n$reason")
|
||||||
|
} else if (threadsNotBlocked.size == 1) { // Do not consume the schedule if only one thread can execute.
|
||||||
|
Some(threadsNotBlocked(0))
|
||||||
|
} else {
|
||||||
|
val next = schedule.indexWhere(t => threadsNotBlocked.exists { case (id, state) => id == t })
|
||||||
|
if (next != -1) {
|
||||||
|
//println(s"$threadId: schedule is $schedule, next chosen is ${schedule(next)}")
|
||||||
|
val chosenOne = schedule(next) // TODO: Make schedule a mutable list.
|
||||||
|
schedule = schedule.take(next) ++ schedule.drop(next + 1)
|
||||||
|
Some((chosenOne, threadStates(chosenOne)))
|
||||||
|
} else {
|
||||||
|
threadPreference = (threadPreference + 1) % threadsNotBlocked.size
|
||||||
|
val chosenOne = threadsNotBlocked(threadPreference) // Maybe another strategy
|
||||||
|
Some(chosenOne)
|
||||||
|
//threadsNotBlocked.indexOf(threadId) >= 0
|
||||||
|
/*
|
||||||
|
val tnb = threadsNotBlocked.map(_._1).mkString(",")
|
||||||
|
val s = if (schedule.isEmpty) "empty" else schedule.mkString(",")
|
||||||
|
val only = if (schedule.isEmpty) "" else " only"
|
||||||
|
throw new Exception(s"The schedule is $s but$only threads ${tnb} can continue")*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else canContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This will be called before a schedulable operation begins.
|
||||||
|
* This should not use synchronized
|
||||||
|
*/
|
||||||
|
var numThreadsWaiting = new AtomicInteger(0)
|
||||||
|
//var waitingForDecision = Map[Int, Option[Int]]() // Mapping from thread ids to a number indicating who is going to make the choice.
|
||||||
|
var canContinue: Option[(Int, ThreadState)] = None // The result of the decision thread Id of the thread authorized to continue.
|
||||||
|
private def waitForTurn = {
|
||||||
|
synchronized {
|
||||||
|
if (numThreadsWaiting.incrementAndGet() == threadStates.size) {
|
||||||
|
canContinue = decide()
|
||||||
|
notifyAll()
|
||||||
|
}
|
||||||
|
//waitingForDecision(threadId) = Some(numThreadsWaiting)
|
||||||
|
//println(s"$threadId Entering waiting with ticket number $numThreadsWaiting/${waitingForDecision.size}")
|
||||||
|
while (!canProceed()) wait()
|
||||||
|
}
|
||||||
|
numThreadsWaiting.decrementAndGet()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* To be invoked when a thread is about to complete
|
||||||
|
*/
|
||||||
|
private def removeFromSchedule(fakeid: Int) = synchronized {
|
||||||
|
//println(s"$fakeid: I'm taking a decision because I finished")
|
||||||
|
schedule = schedule.filterNot(_ == fakeid)
|
||||||
|
threadStates -= fakeid
|
||||||
|
if (numThreadsWaiting.get() == threadStates.size) {
|
||||||
|
canContinue = decide()
|
||||||
|
notifyAll()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def getOperationLog() = opLog
|
||||||
|
}
|
||||||
23
src/main/scala/pubsub/instrumentation/Stats.scala
Normal file
23
src/main/scala/pubsub/instrumentation/Stats.scala
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
/* Copyright 2009-2015 EPFL, Lausanne */
|
||||||
|
package instrumentation
|
||||||
|
|
||||||
|
import java.lang.management._
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A collection of methods that can be used to collect run-time statistics about Leon programs.
|
||||||
|
* This is mostly used to test the resources properties of Leon programs
|
||||||
|
*/
|
||||||
|
object Stats {
|
||||||
|
def timed[T](code: => T)(cont: Long => Unit): T = {
|
||||||
|
var t1 = System.currentTimeMillis()
|
||||||
|
val r = code
|
||||||
|
cont((System.currentTimeMillis() - t1))
|
||||||
|
r
|
||||||
|
}
|
||||||
|
|
||||||
|
def withTime[T](code: => T): (T, Long) = {
|
||||||
|
var t1 = System.currentTimeMillis()
|
||||||
|
val r = code
|
||||||
|
(r, (System.currentTimeMillis() - t1))
|
||||||
|
}
|
||||||
|
}
|
||||||
124
src/main/scala/pubsub/instrumentation/TestHelper.scala
Normal file
124
src/main/scala/pubsub/instrumentation/TestHelper.scala
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
package instrumentation
|
||||||
|
|
||||||
|
import scala.util.Random
|
||||||
|
import scala.collection.mutable.{Map => MutableMap}
|
||||||
|
|
||||||
|
import Stats._
|
||||||
|
|
||||||
|
object TestHelper {
|
||||||
|
val noOfSchedules = 10000 // set this to 100k during deployment
|
||||||
|
val readWritesPerThread = 20 // maximum number of read/writes possible in one thread
|
||||||
|
val contextSwitchBound = 10
|
||||||
|
val testTimeout = 240 // the total time out for a test in seconds
|
||||||
|
val schedTimeout = 15 // the total time out for execution of a schedule in secs
|
||||||
|
|
||||||
|
// Helpers
|
||||||
|
/*def testManySchedules(op1: => Any): Unit = testManySchedules(List(() => op1))
|
||||||
|
def testManySchedules(op1: => Any, op2: => Any): Unit = testManySchedules(List(() => op1, () => op2))
|
||||||
|
def testManySchedules(op1: => Any, op2: => Any, op3: => Any): Unit = testManySchedules(List(() => op1, () => op2, () => op3))
|
||||||
|
def testManySchedules(op1: => Any, op2: => Any, op3: => Any, op4: => Any): Unit = testManySchedules(List(() => op1, () => op2, () => op3, () => op4))*/
|
||||||
|
|
||||||
|
def testSequential[T](ops: Scheduler => Any)(assertions: T => (Boolean, String)) =
|
||||||
|
testManySchedules(1,
|
||||||
|
(sched: Scheduler) => {
|
||||||
|
(List(() => ops(sched)),
|
||||||
|
(res: List[Any]) => assertions(res.head.asInstanceOf[T]))
|
||||||
|
})
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @numThreads number of threads
|
||||||
|
* @ops operations to be executed, one per thread
|
||||||
|
* @assertion as condition that will executed after all threads have completed (without exceptions)
|
||||||
|
* the arguments are the results of the threads
|
||||||
|
*/
|
||||||
|
def testManySchedules(numThreads: Int,
|
||||||
|
ops: Scheduler =>
|
||||||
|
(List[() => Any], // Threads
|
||||||
|
List[Any] => (Boolean, String)) // Assertion
|
||||||
|
) = {
|
||||||
|
var timeout = testTimeout * 1000L
|
||||||
|
val threadIds = (1 to numThreads)
|
||||||
|
//(1 to scheduleLength).flatMap(_ => threadIds).toList.permutations.take(noOfSchedules).foreach {
|
||||||
|
val schedules = (new ScheduleGenerator(numThreads)).schedules()
|
||||||
|
var schedsExplored = 0
|
||||||
|
schedules.takeWhile(_ => schedsExplored <= noOfSchedules && timeout > 0).foreach {
|
||||||
|
//case _ if timeout <= 0 => // break
|
||||||
|
case schedule =>
|
||||||
|
schedsExplored += 1
|
||||||
|
val schedr = new Scheduler(schedule)
|
||||||
|
//println("Exploring Sched: "+schedule)
|
||||||
|
val (threadOps, assertion) = ops(schedr)
|
||||||
|
if (threadOps.size != numThreads)
|
||||||
|
throw new IllegalStateException(s"Number of threads: $numThreads, do not match operations of threads: $threadOps")
|
||||||
|
timed { schedr.runInParallel(schedTimeout * 1000, threadOps) } { t => timeout -= t } match {
|
||||||
|
case Timeout(msg) =>
|
||||||
|
throw new java.lang.AssertionError("assertion failed\n"+"The schedule took too long to complete. A possible deadlock! \n"+msg)
|
||||||
|
case Except(msg, stkTrace) =>
|
||||||
|
val traceStr = "Thread Stack trace: \n"+stkTrace.map(" at "+_.toString).mkString("\n")
|
||||||
|
throw new java.lang.AssertionError("assertion failed\n"+msg+"\n"+traceStr)
|
||||||
|
case RetVal(threadRes) =>
|
||||||
|
// check the assertion
|
||||||
|
val (success, custom_msg) = assertion(threadRes)
|
||||||
|
if(!success) {
|
||||||
|
val msg = "The following schedule resulted in wrong results: \n" + custom_msg + "\n" + schedr.getOperationLog().mkString("\n")
|
||||||
|
throw new java.lang.AssertionError("Assertion failed: "+msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (timeout <= 0) {
|
||||||
|
throw new java.lang.AssertionError("Test took too long to complete! Cannot check all schedules as your code is too slow!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A schedule generator that is based on the context bound
|
||||||
|
*/
|
||||||
|
class ScheduleGenerator(numThreads: Int) {
|
||||||
|
val scheduleLength = readWritesPerThread * numThreads
|
||||||
|
val rands = (1 to scheduleLength).map(i => new Random(0xcafe * i)) // random numbers for choosing a thread at each position
|
||||||
|
def schedules(): LazyList[List[Int]] = {
|
||||||
|
var contextSwitches = 0
|
||||||
|
var contexts = List[Int]() // a stack of thread ids in the order of context-switches
|
||||||
|
val remainingOps = MutableMap[Int, Int]()
|
||||||
|
remainingOps ++= (1 to numThreads).map(i => (i, readWritesPerThread)) // num ops remaining in each thread
|
||||||
|
val liveThreads = (1 to numThreads).toSeq.toBuffer
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates remainingOps and liveThreads once a thread is chosen for a position in the schedule
|
||||||
|
*/
|
||||||
|
def updateState(tid: Int): Unit = {
|
||||||
|
val remOps = remainingOps(tid)
|
||||||
|
if (remOps == 0) {
|
||||||
|
liveThreads -= tid
|
||||||
|
} else {
|
||||||
|
remainingOps += (tid -> (remOps - 1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val schedule = rands.foldLeft(List[Int]()){
|
||||||
|
case (acc, r) if contextSwitches < contextSwitchBound =>
|
||||||
|
val tid = liveThreads(r.nextInt(liveThreads.size))
|
||||||
|
contexts match {
|
||||||
|
case prev :: tail if prev != tid => // we have a new context switch here
|
||||||
|
contexts +:= tid
|
||||||
|
contextSwitches += 1
|
||||||
|
case prev :: tail =>
|
||||||
|
case _ => // init case
|
||||||
|
contexts +:= tid
|
||||||
|
}
|
||||||
|
updateState(tid)
|
||||||
|
acc :+ tid
|
||||||
|
case (acc, _) => // here context-bound has been reached so complete the schedule without any more context switches
|
||||||
|
if(!contexts.isEmpty) {
|
||||||
|
contexts = contexts.dropWhile(remainingOps(_) == 0)
|
||||||
|
}
|
||||||
|
val tid = contexts match {
|
||||||
|
case top :: tail => top
|
||||||
|
case _ => liveThreads(0) // here, there has to be threads that have not even started
|
||||||
|
}
|
||||||
|
updateState(tid)
|
||||||
|
acc :+ tid
|
||||||
|
}
|
||||||
|
schedule #:: schedules()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
19
src/main/scala/pubsub/instrumentation/TestUtils.scala
Normal file
19
src/main/scala/pubsub/instrumentation/TestUtils.scala
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package instrumentation
|
||||||
|
|
||||||
|
import scala.concurrent._
|
||||||
|
import scala.concurrent.duration._
|
||||||
|
import scala.concurrent.ExecutionContext.Implicits.global
|
||||||
|
|
||||||
|
object TestUtils {
|
||||||
|
def failsOrTimesOut[T](action: => T): Boolean = {
|
||||||
|
val asyncAction = Future {
|
||||||
|
action
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Await.result(asyncAction, 2000.millisecond)
|
||||||
|
} catch {
|
||||||
|
case _: Throwable => return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
32
src/main/scala/pubsub/network/TCPReader.scala
Normal file
32
src/main/scala/pubsub/network/TCPReader.scala
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package pubsub.network
|
||||||
|
|
||||||
|
import java.net.Socket
|
||||||
|
|
||||||
|
import pubsub.Client
|
||||||
|
import pubsub.collection.BoundedBuffer
|
||||||
|
import pubsub.command._
|
||||||
|
|
||||||
|
class TCPReader(id: Int, socket: Socket, buffer: BoundedBuffer[Command]) {
|
||||||
|
val client = new Client(socket, id)
|
||||||
|
val reader = new CommandReader(socket.getInputStream(), client)
|
||||||
|
|
||||||
|
def read(): Unit = {
|
||||||
|
client.sayHello()
|
||||||
|
println(s"New client: ${client.name}")
|
||||||
|
while(client.isConnected) {
|
||||||
|
|
||||||
|
reader.fetchCommand() match {
|
||||||
|
case c: EndOfClient =>
|
||||||
|
buffer.put(c)
|
||||||
|
println(c)
|
||||||
|
client.sayGoodbye()
|
||||||
|
client.close()
|
||||||
|
case _: MalformedCommand =>
|
||||||
|
client.invalidPreviousCommand()
|
||||||
|
case command =>
|
||||||
|
println(command)
|
||||||
|
buffer.put(command)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,126 +0,0 @@
|
|||||||
/**
|
|
||||||
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
|
|
||||||
*/
|
|
||||||
package actorbintree
|
|
||||||
|
|
||||||
import akka.actor.{ActorRef, ActorSystem, Props, actorRef2Scala, scala2ActorRef}
|
|
||||||
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
|
|
||||||
import org.junit.Test
|
|
||||||
import org.junit.Assert._
|
|
||||||
|
|
||||||
import scala.util.Random
|
|
||||||
import scala.concurrent.duration._
|
|
||||||
|
|
||||||
class BinaryTreeSuite extends TestKit(ActorSystem("BinaryTreeSuite")) with ImplicitSender {
|
|
||||||
|
|
||||||
import actorbintree.BinaryTreeSet._
|
|
||||||
|
|
||||||
def receiveN(requester: TestProbe, ops: Seq[Operation], expectedReplies: Seq[OperationReply]): Unit =
|
|
||||||
requester.within(5.seconds) {
|
|
||||||
val repliesUnsorted = for (i <- 1 to ops.size) yield try {
|
|
||||||
requester.expectMsgType[OperationReply]
|
|
||||||
} catch {
|
|
||||||
case ex: Throwable if ops.size > 10 => sys.error(s"failure to receive confirmation $i/${ops.size}\n$ex")
|
|
||||||
case ex: Throwable => sys.error(s"failure to receive confirmation $i/${ops.size}\nRequests:" + ops.mkString("\n ", "\n ", "") + s"\n$ex")
|
|
||||||
}
|
|
||||||
val replies = repliesUnsorted.sortBy(_.id)
|
|
||||||
if (replies != expectedReplies) {
|
|
||||||
val pairs = (replies zip expectedReplies).zipWithIndex filter (x => x._1._1 != x._1._2)
|
|
||||||
fail("unexpected replies:" + pairs.map(x => s"at index ${x._2}: got ${x._1._1}, expected ${x._1._2}").mkString("\n ", "\n ", ""))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
def verify(probe: TestProbe, ops: Seq[Operation], expected: Seq[OperationReply]): Unit = {
|
|
||||||
val topNode = system.actorOf(Props[BinaryTreeSet])
|
|
||||||
|
|
||||||
ops foreach { op =>
|
|
||||||
topNode ! op
|
|
||||||
}
|
|
||||||
|
|
||||||
receiveN(probe, ops, expected)
|
|
||||||
// the grader also verifies that enough actors are created
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test def `proper inserts and lookups (5pts)`(): Unit = {
|
|
||||||
val topNode = system.actorOf(Props[BinaryTreeSet])
|
|
||||||
|
|
||||||
topNode ! Contains(testActor, id = 1, 1)
|
|
||||||
expectMsg(ContainsResult(1, false))
|
|
||||||
|
|
||||||
topNode ! Insert(testActor, id = 2, 1)
|
|
||||||
topNode ! Contains(testActor, id = 3, 1)
|
|
||||||
|
|
||||||
expectMsg(OperationFinished(2))
|
|
||||||
expectMsg(ContainsResult(3, true))
|
|
||||||
()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test def `instruction example (5pts)`(): Unit = {
|
|
||||||
val requester = TestProbe()
|
|
||||||
val requesterRef = requester.ref
|
|
||||||
val ops = List(
|
|
||||||
Insert(requesterRef, id=100, 1),
|
|
||||||
Contains(requesterRef, id=50, 2),
|
|
||||||
Remove(requesterRef, id=10, 1),
|
|
||||||
Insert(requesterRef, id=20, 2),
|
|
||||||
Contains(requesterRef, id=80, 1),
|
|
||||||
Contains(requesterRef, id=70, 2)
|
|
||||||
)
|
|
||||||
|
|
||||||
val expectedReplies = List(
|
|
||||||
OperationFinished(id=10),
|
|
||||||
OperationFinished(id=20),
|
|
||||||
ContainsResult(id=50, false),
|
|
||||||
ContainsResult(id=70, true),
|
|
||||||
ContainsResult(id=80, false),
|
|
||||||
OperationFinished(id=100)
|
|
||||||
)
|
|
||||||
|
|
||||||
verify(requester, ops, expectedReplies)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test def `behave identically to built-in set (includes GC) (40pts)`(): Unit = {
|
|
||||||
val rnd = new Random()
|
|
||||||
def randomOperations(requester: ActorRef, count: Int): Seq[Operation] = {
|
|
||||||
def randomElement: Int = rnd.nextInt(100)
|
|
||||||
def randomOperation(requester: ActorRef, id: Int): Operation = rnd.nextInt(4) match {
|
|
||||||
case 0 => Insert(requester, id, randomElement)
|
|
||||||
case 1 => Insert(requester, id, randomElement)
|
|
||||||
case 2 => Contains(requester, id, randomElement)
|
|
||||||
case 3 => Remove(requester, id, randomElement)
|
|
||||||
}
|
|
||||||
|
|
||||||
for (seq <- 0 until count) yield randomOperation(requester, seq)
|
|
||||||
}
|
|
||||||
|
|
||||||
def referenceReplies(operations: Seq[Operation]): Seq[OperationReply] = {
|
|
||||||
var referenceSet = Set.empty[Int]
|
|
||||||
def replyFor(op: Operation): OperationReply = op match {
|
|
||||||
case Insert(_, seq, elem) =>
|
|
||||||
referenceSet = referenceSet + elem
|
|
||||||
OperationFinished(seq)
|
|
||||||
case Remove(_, seq, elem) =>
|
|
||||||
referenceSet = referenceSet - elem
|
|
||||||
OperationFinished(seq)
|
|
||||||
case Contains(_, seq, elem) =>
|
|
||||||
ContainsResult(seq, referenceSet(elem))
|
|
||||||
}
|
|
||||||
|
|
||||||
for (op <- operations) yield replyFor(op)
|
|
||||||
}
|
|
||||||
|
|
||||||
val requester = TestProbe()
|
|
||||||
val topNode = system.actorOf(Props[BinaryTreeSet])
|
|
||||||
val count = 1000
|
|
||||||
|
|
||||||
val ops = randomOperations(requester.ref, count)
|
|
||||||
val expectedReplies = referenceReplies(ops)
|
|
||||||
|
|
||||||
ops foreach { op =>
|
|
||||||
topNode ! op
|
|
||||||
if (rnd.nextDouble() < 0.1) topNode ! GC
|
|
||||||
}
|
|
||||||
receiveN(requester, ops, expectedReplies)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
43
src/test/scala/pubsub/BoundedBufferSuite.scala
Normal file
43
src/test/scala/pubsub/BoundedBufferSuite.scala
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package pubsub
|
||||||
|
|
||||||
|
import scala.concurrent._
|
||||||
|
import scala.concurrent.duration._
|
||||||
|
import scala.concurrent.ExecutionContext.Implicits.global
|
||||||
|
import scala.collection.mutable.HashMap
|
||||||
|
import pubsub.collection._
|
||||||
|
import org.junit._
|
||||||
|
import org.junit.Assert.assertEquals
|
||||||
|
|
||||||
|
import instrumentation._
|
||||||
|
import instrumentation.Stats._
|
||||||
|
import TestHelper._
|
||||||
|
import TestUtils._
|
||||||
|
|
||||||
|
class BoundedBufferSuite {
|
||||||
|
@Test def `Should work in a sequential setting`: Unit = {
|
||||||
|
testSequential[(Int, Int, Int, Int)]{ sched =>
|
||||||
|
val buffer = new SchedulableBoundedBuffer[Int](4, sched)
|
||||||
|
buffer.put(1)
|
||||||
|
buffer.put(2)
|
||||||
|
buffer.put(3)
|
||||||
|
buffer.put(4)
|
||||||
|
(buffer.take(),
|
||||||
|
buffer.take(),
|
||||||
|
buffer.take(),
|
||||||
|
buffer.take())
|
||||||
|
}{ tuple =>
|
||||||
|
(tuple == (1, 2, 3, 4), s"Expected (1, 2, 3, 4) got $tuple")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test def `Should work when Thread 1: 'put(1)', Thread 2: 'take' and a buffer of size 1`: Unit = {
|
||||||
|
testManySchedules(2, sched => {
|
||||||
|
val prodCons = new SchedulableBoundedBuffer[Int](1, sched)
|
||||||
|
(List(() => prodCons.put(1), () => prodCons.take()),
|
||||||
|
args => (args(1) == 1, s"expected 1 your 'take' implementation returned ${args(1)}"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Rule def individualTestTimeout = new org.junit.rules.Timeout(400 * 1000)
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user