commit 58b079621fa545c33a5b6c958767a39c628ae875 Author: Guillaume Martres Date: Tue Feb 19 20:44:23 2019 +0100 Add barneshut assignment diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..349d2e8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +# General +*.DS_Store +*.swp +*~ + +# Dotty +*.class +*.tasty +*.hasTasty + +# sbt +target/ + +# Dotty IDE +/.dotty-ide-artifact +/.dotty-ide.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..4dec89f --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,36 @@ +# DO NOT EDIT THIS FILE + +stages: + - build + - grade + +compile: + stage: build + image: lampepfl/moocs:dotty-2020-02-12 + except: + - tags + tags: + - cs206 + script: + - sbt packageSubmission + artifacts: + expire_in: 1 day + paths: + - submission.jar + +grade: + stage: grade + except: + - tags + tags: + - cs206 + image: + name: smarter3/moocs:concpar-pubsub-2020-03-15 + entrypoint: [""] + allow_failure: true + before_script: + - mkdir -p /shared/submission/ + - cp submission.jar /shared/submission/submission.jar + script: + - cd /grader + - /grader/grade | /grader/feedback-printer diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..a35362b --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "dotty": { + "trace": { + "remoteTracingUrl": "wss://lamppc36.epfl.ch/dotty-remote-tracer/upload/lsp.log", + "server": { "format": "JSON", "verbosity": "verbose" } + } + } +} diff --git a/assignment.sbt b/assignment.sbt new file mode 100644 index 0000000..c6705a8 --- /dev/null +++ b/assignment.sbt @@ -0,0 +1,4 @@ +// Student tasks (i.e. submit, packageSubmission) +enablePlugins(StudentTasks) + + diff --git a/build.sbt b/build.sbt new file mode 100644 index 0000000..e27eb6e --- /dev/null +++ b/build.sbt @@ -0,0 +1,11 @@ +course := "concpar" +assignment := "pubsub" + +scalaVersion := "0.23.0-bin-20200211-5b006fb-NIGHTLY" + +scalacOptions ++= Seq("-language:implicitConversions", "-deprecation") + +libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % Test + +testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-a", "-v", "-s") +testSuite := "pubsub.BoundedBufferSuite" diff --git a/grading-tests.jar b/grading-tests.jar new file mode 100644 index 0000000..3bc9c02 Binary files /dev/null and b/grading-tests.jar differ diff --git a/project/MOOCSettings.scala b/project/MOOCSettings.scala new file mode 100644 index 0000000..171244f --- /dev/null +++ b/project/MOOCSettings.scala @@ -0,0 +1,46 @@ +package ch.epfl.lamp + +import sbt._ +import sbt.Keys._ + +/** + * Coursera uses two versions of each assignment. They both have the same assignment key and part id but have + * different item ids. + * + * @param key Assignment key + * @param partId Assignment partId + * @param itemId Item id of the non premium version + * @param premiumItemId Item id of the premium version (`None` if the assignment is optional) + */ +case class CourseraId(key: String, partId: String, itemId: String, premiumItemId: Option[String]) + +/** + * Settings shared by all assignments, reused in various tasks. + */ +object MOOCSettings extends AutoPlugin { + + object autoImport { + val course = SettingKey[String]("course") + val assignment = SettingKey[String]("assignment") + val options = SettingKey[Map[String, Map[String, String]]]("options") + val courseraId = settingKey[CourseraId]("Coursera-specific information identifying the assignment") + val testSuite = settingKey[String]("Fully qualified name of the test suite of this assignment") + // Convenient alias + type CourseraId = ch.epfl.lamp.CourseraId + val CourseraId = ch.epfl.lamp.CourseraId + } + + import autoImport._ + + override val globalSettings: Seq[Def.Setting[_]] = Seq( + // supershell is verbose, buggy and useless. + useSuperShell := false + ) + + override val projectSettings: Seq[Def.Setting[_]] = Seq( + parallelExecution in Test := false, + // Report test result after each test instead of waiting for every test to finish + logBuffered in Test := false, + name := s"${course.value}-${assignment.value}" + ) +} diff --git a/project/StudentTasks.scala b/project/StudentTasks.scala new file mode 100644 index 0000000..7604830 --- /dev/null +++ b/project/StudentTasks.scala @@ -0,0 +1,318 @@ +package ch.epfl.lamp + +import sbt._ +import Keys._ + +// import scalaj.http._ +import java.io.{File, FileInputStream, IOException} +import org.apache.commons.codec.binary.Base64 +// import play.api.libs.json.{Json, JsObject, JsPath} +import scala.util.{Failure, Success, Try} + +/** + * Provides tasks for submitting the assignment + */ +object StudentTasks extends AutoPlugin { + + override def requires = super.requires && MOOCSettings + + object autoImport { + val packageSourcesOnly = TaskKey[File]("packageSourcesOnly", "Package the sources of the project") + val packageBinWithoutResources = TaskKey[File]("packageBinWithoutResources", "Like packageBin, but without the resources") + val packageSubmissionZip = TaskKey[File]("packageSubmissionZip") + val packageSubmission = inputKey[Unit]("package solution as an archive file") + val runGradingTests = taskKey[Unit]("run black-box tests used for final grading") + } + + + import autoImport._ + import MOOCSettings.autoImport._ + + override lazy val projectSettings = Seq( + packageSubmissionSetting, + // submitSetting, + runGradingTestsSettings, + + fork := true, + connectInput in run := true, + outputStrategy := Some(StdoutOutput), + ) ++ packageSubmissionZipSettings + + lazy val runGradingTestsSettings = runGradingTests := { + val testSuiteJar = "grading-tests.jar" + if (!new File(testSuiteJar).exists) { + throw new MessageOnlyException(s"Could not find tests JarFile: $testSuiteJar") + } + + val classPath = s"${(Test / dependencyClasspath).value.map(_.data).mkString(File.pathSeparator)}${File.pathSeparator}$testSuiteJar" + val junitProcess = + Fork.java.fork( + ForkOptions(), + "-cp" :: classPath :: + "org.junit.runner.JUnitCore" :: + (Test / testSuite).value :: + Nil + ) + + // Wait for tests to complete. + junitProcess.exitValue() + } + + + /** ********************************************************** + * SUBMITTING A SOLUTION TO COURSERA + */ + + val packageSubmissionZipSettings = Seq( + packageSubmissionZip := { + val submission = crossTarget.value / "submission.zip" + val sources = (packageSourcesOnly in Compile).value + val binaries = (packageBinWithoutResources in Compile).value + IO.zip(Seq(sources -> "sources.zip", binaries -> "binaries.jar"), submission) + submission + }, + artifactClassifier in packageSourcesOnly := Some("sources"), + artifact in (Compile, packageBinWithoutResources) ~= (art => art.withName(art.name + "-without-resources")) + ) ++ + inConfig(Compile)( + Defaults.packageTaskSettings(packageSourcesOnly, Defaults.sourceMappings) ++ + Defaults.packageTaskSettings(packageBinWithoutResources, Def.task { + val relativePaths = + (unmanagedResources in Compile).value.flatMap(Path.relativeTo((unmanagedResourceDirectories in Compile).value)(_)) + (mappings in (Compile, packageBin)).value.filterNot { case (_, path) => relativePaths.contains(path) } + }) + ) + + val maxSubmitFileSize = { + val mb = 1024 * 1024 + 10 * mb + } + + /** Check that the jar exists, isn't empty, isn't crazy big, and can be read + * If so, encode jar as base64 so we can send it to Coursera + */ + def prepareJar(jar: File, s: TaskStreams): String = { + val errPrefix = "Error submitting assignment jar: " + val fileLength = jar.length() + if (!jar.exists()) { + s.log.error(errPrefix + "jar archive does not exist\n" + jar.getAbsolutePath) + failSubmit() + } else if (fileLength == 0L) { + s.log.error(errPrefix + "jar archive is empty\n" + jar.getAbsolutePath) + failSubmit() + } else if (fileLength > maxSubmitFileSize) { + s.log.error(errPrefix + "jar archive is too big. Allowed size: " + + maxSubmitFileSize + " bytes, found " + fileLength + " bytes.\n" + + jar.getAbsolutePath) + failSubmit() + } else { + val bytes = new Array[Byte](fileLength.toInt) + val sizeRead = try { + val is = new FileInputStream(jar) + val read = is.read(bytes) + is.close() + read + } catch { + case ex: IOException => + s.log.error(errPrefix + "failed to read sources jar archive\n" + ex.toString) + failSubmit() + } + if (sizeRead != bytes.length) { + s.log.error(errPrefix + "failed to read the sources jar archive, size read: " + sizeRead) + failSubmit() + } else encodeBase64(bytes) + } + } + + /** Task to package solution to a given file path */ + lazy val packageSubmissionSetting = packageSubmission := { + val args: Seq[String] = Def.spaceDelimited("[path]").parsed + val s: TaskStreams = streams.value // for logging + val jar = (packageSubmissionZip in Compile).value + + val base64Jar = prepareJar(jar, s) + + val path = args.headOption.getOrElse((baseDirectory.value / "submission.jar").absolutePath) + scala.tools.nsc.io.File(path).writeAll(base64Jar) + } + +/* + /** Task to submit a solution to coursera */ + val submit = inputKey[Unit]("submit solution to Coursera") + lazy val submitSetting = submit := { + // Fail if scalafix linting does not pass. + scalafixLinting.value + + val args: Seq[String] = Def.spaceDelimited("").parsed + val s: TaskStreams = streams.value // for logging + val jar = (packageSubmissionZip in Compile).value + + val assignmentDetails = + courseraId.?.value.getOrElse(throw new MessageOnlyException("This assignment can not be submitted to Coursera because the `courseraId` setting is undefined")) + val assignmentKey = assignmentDetails.key + val courseName = + course.value match { + case "capstone" => "scala-capstone" + case "bigdata" => "scala-spark-big-data" + case other => other + } + + val partId = assignmentDetails.partId + val itemId = assignmentDetails.itemId + val premiumItemId = assignmentDetails.premiumItemId + + val (email, secret) = args match { + case email :: secret :: Nil => + (email, secret) + case _ => + val inputErr = + s"""|Invalid input to `submit`. The required syntax for `submit` is: + |submit + | + |The submit token is NOT YOUR LOGIN PASSWORD. + |It can be obtained from the assignment page: + |https://www.coursera.org/learn/$courseName/programming/$itemId + |${ + premiumItemId.fold("") { id => + s"""or (for premium learners): + |https://www.coursera.org/learn/$courseName/programming/$id + """.stripMargin + } + } + """.stripMargin + s.log.error(inputErr) + failSubmit() + } + + val base64Jar = prepareJar(jar, s) + val json = + s"""|{ + | "assignmentKey":"$assignmentKey", + | "submitterEmail":"$email", + | "secret":"$secret", + | "parts":{ + | "$partId":{ + | "output":"$base64Jar" + | } + | } + |}""".stripMargin + + def postSubmission[T](data: String): Try[HttpResponse[String]] = { + val http = Http("https://www.coursera.org/api/onDemandProgrammingScriptSubmissions.v1") + val hs = List( + ("Cache-Control", "no-cache"), + ("Content-Type", "application/json") + ) + s.log.info("Connecting to Coursera...") + val response = Try(http.postData(data) + .headers(hs) + .option(HttpOptions.connTimeout(10000)) // scalaj default timeout is only 100ms, changing that to 10s + .asString) // kick off HTTP POST + response + } + + val connectMsg = + s"""|Attempting to submit "${assignment.value}" assignment in "$courseName" course + |Using: + |- email: $email + |- submit token: $secret""".stripMargin + s.log.info(connectMsg) + + def reportCourseraResponse(response: HttpResponse[String]): Unit = { + val code = response.code + val respBody = response.body + + /* Sample JSON response from Coursera + { + "message": "Invalid email or token.", + "details": { + "learnerMessage": "Invalid email or token." + } + } + */ + + // Success, Coursera responds with 2xx HTTP status code + if (response.is2xx) { + val successfulSubmitMsg = + s"""|Successfully connected to Coursera. (Status $code) + | + |Assignment submitted successfully! + | + |You can see how you scored by going to: + |https://www.coursera.org/learn/$courseName/programming/$itemId/ + |${ + premiumItemId.fold("") { id => + s"""or (for premium learners): + |https://www.coursera.org/learn/$courseName/programming/$id + """.stripMargin + } + } + |and clicking on "My Submission".""".stripMargin + s.log.info(successfulSubmitMsg) + } + + // Failure, Coursera responds with 4xx HTTP status code (client-side failure) + else if (response.is4xx) { + val result = Try(Json.parse(respBody)).toOption + val learnerMsg = result match { + case Some(resp: JsObject) => + (JsPath \ "details" \ "learnerMessage").read[String].reads(resp).get + case Some(x) => // shouldn't happen + "Could not parse Coursera's response:\n" + x + case None => + "Could not parse Coursera's response:\n" + respBody + } + val failedSubmitMsg = + s"""|Submission failed. + |There was something wrong while attempting to submit. + |Coursera says: + |$learnerMsg (Status $code)""".stripMargin + s.log.error(failedSubmitMsg) + } + + // Failure, Coursera responds with 5xx HTTP status code (server-side failure) + else if (response.is5xx) { + val failedSubmitMsg = + s"""|Submission failed. + |Coursera seems to be unavailable at the moment (Status $code) + |Check https://status.coursera.org/ and try again in a few minutes. + """.stripMargin + s.log.error(failedSubmitMsg) + } + + // Failure, Coursera repsonds with an unexpected status code + else { + val failedSubmitMsg = + s"""|Submission failed. + |Coursera replied with an unexpected code (Status $code) + """.stripMargin + s.log.error(failedSubmitMsg) + } + } + + // kick it all off, actually make request + postSubmission(json) match { + case Success(resp) => reportCourseraResponse(resp) + case Failure(e) => + val failedConnectMsg = + s"""|Connection to Coursera failed. + |There was something wrong while attempting to connect to Coursera. + |Check your internet connection. + |${e.toString}""".stripMargin + s.log.error(failedConnectMsg) + } + + } +*/ + + def failSubmit(): Nothing = { + sys.error("Submission failed") + } + + /** + * ***************** + * DEALING WITH JARS + */ + def encodeBase64(bytes: Array[Byte]): String = + new String(Base64.encodeBase64(bytes)) +} diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 0000000..a919a9b --- /dev/null +++ b/project/build.properties @@ -0,0 +1 @@ +sbt.version=1.3.8 diff --git a/project/buildSettings.sbt b/project/buildSettings.sbt new file mode 100644 index 0000000..8fac702 --- /dev/null +++ b/project/buildSettings.sbt @@ -0,0 +1,5 @@ +// Used for Coursera submission (StudentPlugin) +// libraryDependencies += "org.scalaj" %% "scalaj-http" % "2.4.2" +// libraryDependencies += "com.typesafe.play" %% "play-json" % "2.7.4" +// Used for Base64 (StudentPlugin) +libraryDependencies += "commons-codec" % "commons-codec" % "1.10" diff --git a/project/plugins.sbt b/project/plugins.sbt new file mode 100644 index 0000000..017735d --- /dev/null +++ b/project/plugins.sbt @@ -0,0 +1,2 @@ +addSbtPlugin("org.scala-js" % "sbt-scalajs" % "0.6.28") +addSbtPlugin("ch.epfl.lamp" % "sbt-dotty" % "0.4.0") diff --git a/src/main/scala/pubsub/Client.scala b/src/main/scala/pubsub/Client.scala new file mode 100644 index 0000000..dec6784 --- /dev/null +++ b/src/main/scala/pubsub/Client.scala @@ -0,0 +1,34 @@ +package pubsub + +import java.net.Socket +import java.nio.charset.Charset + +case class Client(socket: Socket, id: Int) + (implicit charset: Charset = Charset.forName("UTF-8")) { + private var name_ = "client_" + id + val outStream = socket.getOutputStream() + + def name = name_ + def name_=(newName: String) = name_ = newName + + def isConnected: Boolean = socket.isConnected() + + def close(): Unit = socket.close() + + def send(message: String): Unit = { + val payload = message.getBytes(charset) + outStream.write(payload) + outStream.flush() + } + + def sendAck(ackType: String, message: String): Unit = + send(s"${ackType}_ack $message\n") + + def sayHello(): Unit = sendAck("connection", name_) + def sayGoodbye(): Unit = send(s"Bye Bye dear $name_!\n") + + def invalidPreviousCommand(): Unit = send("! previous command was invalid\n") + + def sendMessage(sender: String, topic: String, message: String): Unit = + send(s"$sender@$topic $message\n") +} diff --git a/src/main/scala/pubsub/Server.scala b/src/main/scala/pubsub/Server.scala new file mode 100644 index 0000000..eb04961 --- /dev/null +++ b/src/main/scala/pubsub/Server.scala @@ -0,0 +1,51 @@ +package pubsub + +import java.net.ServerSocket +import java.net.Socket +import java.io.BufferedReader +import java.io.InputStreamReader +import java.net.URL +import java.util.concurrent.Executors + +import scala.concurrent.JavaConversions._ +import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.ExecutionContext.Implicits.global +import pubsub.collection._ +import pubsub.command._ +import pubsub.network.TCPReader + +object Server extends App { + val port = 7676 + val maxWorkers = 12 + val bufferSize = 20 + val socket = new ServerSocket(port) + try { + val whatismyip = new URL("http://checkip.amazonaws.com") + val in = new BufferedReader(new InputStreamReader(whatismyip.openStream())); + val serverIP = in.readLine() + println(s"Connect to $serverIP (or `localhost`), port $port with `telnet` to join this server") + } catch { + case e: Exception => + println("There is a problem with your internet connection, you can only access it via localhost") + } + + val buffer = new BoundedBuffer[Command](20) + val commandHandlers = for{ + i <- 0 until maxWorkers + } yield { + Future { + new CommandHandler(buffer).handle() + } + } + val threadPool = Executors.newFixedThreadPool(maxWorkers) + + var clientId = 0 + while(true) { + val client = socket.accept(); + val cid = clientId + clientId += 1 + Future{ + new TCPReader(clientId, client, buffer).read() + }(ExecutionContext.fromExecutorService(threadPool)) + } +} diff --git a/src/main/scala/pubsub/collection/AbstractBoundedBuffer.scala b/src/main/scala/pubsub/collection/AbstractBoundedBuffer.scala new file mode 100644 index 0000000..64c077e --- /dev/null +++ b/src/main/scala/pubsub/collection/AbstractBoundedBuffer.scala @@ -0,0 +1,34 @@ +package pubsub.collection + +import instrumentation.Monitor + +trait InternalBuffer[T] { + def update(index: Int, elem: T): Unit + def apply(index: Int): T + def delete(index: Int): Unit + val size: Int +} + + +abstract class AbstractBoundedBuffer[T](bufferSize: Int) extends Monitor { + require(bufferSize > 0) + + def put(element: T): Unit + def take(): T + + val buffer: InternalBuffer[T] = new InternalBuffer[T] { + private val buffer: Array[Option[T]] = new Array(bufferSize) + def update(index: Int, elem: T): Unit = buffer(index) = Some(elem) + def apply(index: Int): T = buffer(index).get + def delete(index: Int): Unit = buffer(index) = None + val size = bufferSize + } + + def head: Int = _head + def head_=(e: Int): Unit = _head = e + def count: Int = _count + def count_=(e: Int): Unit = _count = e + + private var _head = 0; + private var _count = 0; +} diff --git a/src/main/scala/pubsub/collection/BoundedBuffer.scala b/src/main/scala/pubsub/collection/BoundedBuffer.scala new file mode 100644 index 0000000..26b69be --- /dev/null +++ b/src/main/scala/pubsub/collection/BoundedBuffer.scala @@ -0,0 +1,26 @@ +package pubsub.collection + +class BoundedBuffer[T](size: Int) extends AbstractBoundedBuffer[T](size) { + + // You have at your disposition the following two variables: + // - count : Int + // - head : Int + // In addition, you have access to an array-like internal buffer: + // - buffer + // You can access elements of this buffer using: + // - buffer(i) + // Similarly, you can set elements using: + // - buffer(i) = e + // + // You do not need to create those variables yourself! + // They are inherited from the AbstractBoundedBuffer class. + + override def put(e: T): Unit = ??? + + override def take(): T = ??? + + // You may want to add methods to: + // - check whether the buffer is empty + // - check whether the buffer is full + // - get the index of tail +} diff --git a/src/main/scala/pubsub/collection/ConcurrentMultiMap.scala b/src/main/scala/pubsub/collection/ConcurrentMultiMap.scala new file mode 100644 index 0000000..f23d50a --- /dev/null +++ b/src/main/scala/pubsub/collection/ConcurrentMultiMap.scala @@ -0,0 +1,65 @@ +package pubsub.collection + +import scala.collection.mutable +import java.util.concurrent.locks.ReentrantReadWriteLock + +class ConcurrentMultiMap[K,V] { + + private val lockRW = new ReentrantReadWriteLock() + val map = mutable.HashMap[K, Set[V]]() + + def lock(): Unit = lockRW.writeLock().lock() + + def unlock(): Unit = lockRW.writeLock().unlock() + + def add(key: K, value: V): Unit = { + try { + lockRW.writeLock().lock() + map.get(key) match { + case Some(set) => + if (! set.contains(value)) { + map += ((key, set + value)) + } + case None => + map += ((key, Set(value))) + } + } finally { + lockRW.writeLock().unlock() + } + } + + def get(key: K): Option[Set[V]] = { + try { + lockRW.readLock().lock() + val v = map.get(key) + v + } finally { + lockRW.readLock().unlock() + } + } + + def remove(key: K, value: V): Unit = { + try { + lockRW.writeLock().lock() + map.get(key) match { + case Some(set) => + if (set.contains(value)) { + map += ((key, set - value)) + } + case None => + } + } finally { + lockRW.writeLock().unlock() + } + } + + def removeValueFromAll(value: V): Unit = { + try { + lockRW.writeLock().lock() + map.keys.foreach(remove(_, value)) + } finally { + lockRW.writeLock().unlock() + } + } + +} diff --git a/src/main/scala/pubsub/command/CommandHandler.scala b/src/main/scala/pubsub/command/CommandHandler.scala new file mode 100644 index 0000000..c6b3afb --- /dev/null +++ b/src/main/scala/pubsub/command/CommandHandler.scala @@ -0,0 +1,43 @@ +package pubsub.command + +import pubsub.Client +import pubsub.collection._ + +class CommandHandler(buffer: BoundedBuffer[Command]) { + import CommandHandler._ + + def handle(): Unit = { + val command = buffer.take() + + command match { + case Subscribe(topic, client) => + multiMap.add(topic, client) + client.sendAck("subscribe", topic) + + case Unsubscribe(topic, client) => + multiMap.remove(topic, client) + client.sendAck("unsubscribe", topic) + + case Publish(topic, message, sender) => + for { + subscribers <- multiMap.get(topic) + client <- subscribers + } client.sendMessage(sender.name, topic, message) + + case EndOfClient(client) => + multiMap.removeValueFromAll(client) + + case Rename(newName,client) => + client.name = newName + client.sendAck("rename", newName) + + case _ => + // nothing should happen + } + } +} + + +object CommandHandler { + val multiMap = new ConcurrentMultiMap[String, Client]() +} diff --git a/src/main/scala/pubsub/command/CommandReader.scala b/src/main/scala/pubsub/command/CommandReader.scala new file mode 100644 index 0000000..580715d --- /dev/null +++ b/src/main/scala/pubsub/command/CommandReader.scala @@ -0,0 +1,45 @@ +package pubsub.command + +import java.io.BufferedReader +import java.io.InputStreamReader +import java.io.InputStream + +import pubsub.Client + +class CommandReader(inStream: InputStream, client: Client) { + val inputBuffer = new BufferedReader(new InputStreamReader(inStream)) + + def fetchCommand(): Command = { + val line = inputBuffer.readLine() + + if (line == null || line.startsWith("leave")) { + EndOfClient(client) + } + else { + val quoteIndex = line.indexOf('\'') + val hasPayload = quoteIndex != -1 + val parts = + if(!hasPayload) { + line.split(" ").toList + } else { + val (command, payload) = line.splitAt(quoteIndex) + command.split(" ").toList :+ payload + } + + parts match { + case "subscribe" :: topic :: Nil => Subscribe(topic, client) + case "unsubscribe" :: topic :: Nil => Unsubscribe(topic, client) + case "rename" :: newName :: Nil => Rename(newName, client) + + case "publish" :: topic :: msg :: Nil if hasPayload && msg != "\'" => + var message = msg + while(!message.endsWith("\'")) { + message += "\n" + inputBuffer.readLine() + } + Publish(topic, message, client) + + case _ => MalformedCommand(client) + } + } + } +} diff --git a/src/main/scala/pubsub/command/CommandType.scala b/src/main/scala/pubsub/command/CommandType.scala new file mode 100644 index 0000000..cfd35c6 --- /dev/null +++ b/src/main/scala/pubsub/command/CommandType.scala @@ -0,0 +1,33 @@ +package pubsub.command + +import pubsub.Client + +sealed trait Topic { + val topic: String +} + +sealed abstract class Command(from: Client) { + def toString(): String +} +case class EndOfClient(from: Client) extends Command(from) { + override def toString(): String = s"${from.name}: End of client" +} +case class MalformedCommand(from: Client) extends Command(from) { + override def toString(): String = s"${from.name}: Invalid Command" +} +case class Subscribe(topic: String, + from: Client) extends Command(from) with Topic { + override def toString(): String = s"${from.name}: Subscribe @ $topic" +} +case class Unsubscribe(topic: String, + from: Client) extends Command(from) with Topic { + override def toString(): String = s"${from.name}: Unsubscribe @ $topic" +} +case class Publish(topic: String, message: String, + from: Client) extends Command(from) with Topic { + override def toString(): String = s"${from.name}: Publish @ $topic -> $message" +} + +case class Rename(newName: String, from: Client) extends Command(from) { + override def toString(): String = s"${from.name}: Renamed to $newName" +} diff --git a/src/main/scala/pubsub/instrumentation/MockedMonitor.scala b/src/main/scala/pubsub/instrumentation/MockedMonitor.scala new file mode 100644 index 0000000..a93723c --- /dev/null +++ b/src/main/scala/pubsub/instrumentation/MockedMonitor.scala @@ -0,0 +1,72 @@ +package instrumentation + +trait MockedMonitor extends Monitor { + def scheduler: Scheduler + + // Can be overriden. + override def waitDefault() = { + scheduler.log("wait") + scheduler updateThreadState Wait(this, scheduler.threadLocks.tail) + } + override def synchronizedDefault[T](toExecute: =>T): T = { + scheduler.log("synchronized check") + val prevLocks = scheduler.threadLocks + scheduler updateThreadState Sync(this, prevLocks) // If this belongs to prevLocks, should just continue. + scheduler.log("synchronized -> enter") + try { + toExecute + } finally { + scheduler updateThreadState Running(prevLocks) + scheduler.log("synchronized -> out") + } + } + override def notifyDefault() = { + scheduler mapOtherStates { + state => state match { + case Wait(lockToAquire, locks) if lockToAquire == this => SyncUnique(this, state.locks) + case e => e + } + } + scheduler.log("notify") + } + override def notifyAllDefault() = { + scheduler mapOtherStates { + state => state match { + case Wait(lockToAquire, locks) if lockToAquire == this => Sync(this, state.locks) + case SyncUnique(lockToAquire, locks) if lockToAquire == this => Sync(this, state.locks) + case e => e + } + } + scheduler.log("notifyAll") + } +} + +trait LockFreeMonitor extends Monitor { + override def waitDefault() = { + throw new Exception("Please use lock-free structures and do not use wait()") + } + override def synchronizedDefault[T](toExecute: =>T): T = { + throw new Exception("Please use lock-free structures and do not use synchronized()") + } + override def notifyDefault() = { + throw new Exception("Please use lock-free structures and do not use notify()") + } + override def notifyAllDefault() = { + throw new Exception("Please use lock-free structures and do not use notifyAll()") + } +} + + +abstract class ThreadState { + def locks: Seq[AnyRef] +} +trait CanContinueIfAcquiresLock extends ThreadState { + def lockToAquire: AnyRef +} +case object Start extends ThreadState { def locks: Seq[AnyRef] = Seq.empty } +case object End extends ThreadState { def locks: Seq[AnyRef] = Seq.empty } +case class Wait(lockToAquire: AnyRef, locks: Seq[AnyRef]) extends ThreadState +case class SyncUnique(lockToAquire: AnyRef, locks: Seq[AnyRef]) extends ThreadState with CanContinueIfAcquiresLock +case class Sync(lockToAquire: AnyRef, locks: Seq[AnyRef]) extends ThreadState with CanContinueIfAcquiresLock +case class Running(locks: Seq[AnyRef]) extends ThreadState +case class VariableReadWrite(locks: Seq[AnyRef]) extends ThreadState diff --git a/src/main/scala/pubsub/instrumentation/Monitor.scala b/src/main/scala/pubsub/instrumentation/Monitor.scala new file mode 100644 index 0000000..82c6cf2 --- /dev/null +++ b/src/main/scala/pubsub/instrumentation/Monitor.scala @@ -0,0 +1,23 @@ +package instrumentation + +class Dummy + +trait Monitor { + implicit val dummy: Dummy = new Dummy + + def wait()(implicit i: Dummy) = waitDefault() + + def synchronized[T](e: => T)(implicit i: Dummy) = synchronizedDefault(e) + + def notify()(implicit i: Dummy) = notifyDefault() + + def notifyAll()(implicit i: Dummy) = notifyAllDefault() + + private val lock = new AnyRef + + // Can be overriden. + def waitDefault(): Unit = lock.wait() + def synchronizedDefault[T](toExecute: =>T): T = lock.synchronized(toExecute) + def notifyDefault(): Unit = lock.notify() + def notifyAllDefault(): Unit = lock.notifyAll() +} diff --git a/src/main/scala/pubsub/instrumentation/SchedulableInternalBuffer.scala b/src/main/scala/pubsub/instrumentation/SchedulableInternalBuffer.scala new file mode 100644 index 0000000..4c12d7e --- /dev/null +++ b/src/main/scala/pubsub/instrumentation/SchedulableInternalBuffer.scala @@ -0,0 +1,63 @@ +package instrumentation + +import java.util.concurrent._; +import scala.concurrent.duration._ +import scala.collection.mutable._ +import Stats._ + +import java.util.concurrent.atomic.AtomicInteger + +import pubsub.collection._ + +class SchedulableInternalBuffer[T](val size: Int, scheduler: Scheduler) extends InternalBuffer[T] { + private val buffer = new Array[Option[T]](size) + private val threadBuffer = new Array[Option[Int]](size) // Who last wrote in the array. + + def update(index: Int, elem: T): Unit = { + scheduler.exec { + buffer(index) = Some(elem) + threadBuffer(index) = Some(scheduler.threadId) + }(s"Write buffer($index) = $elem") + } + + def apply(index: Int): T = scheduler.exec { + buffer(index).fold { + threadBuffer(index).fold { + throw new Exception(s"buffer($index) was never set ! ") + } { tid => + throw new Exception(s"buffer($index) was deleted by thread $tid ! ") + } + }(identity) + }(s"Read buffer($index)") + + def delete(index: Int): Unit = { + scheduler.exec { + buffer(index) = None + threadBuffer(index) = Some(scheduler.threadId) + }(s"Delete buffer($index)") + } +} + +trait MockedInternals[T] { self: SchedulableBoundedBuffer[T] => + override val buffer = new SchedulableInternalBuffer[T](self.size, self.scheduler) + + var h: Int = 0 + var c: Int = 0 + + override def head_=(i: Int) = scheduler.exec { + h = i + }(s"Write head = $i") + override def head: Int = scheduler.exec { h }(s"Read head -> $h") + + override def count_=(i: Int) = scheduler.exec { + c = i + }(s"Write count = $i") + + override def count: Int = scheduler.exec { c }(s"Read count -> $c") +} + +class SchedulableBoundedBuffer[T](val size: Int, val scheduler: Scheduler) + extends BoundedBuffer[T](size) with MockedMonitor with MockedInternals[T] { + +} + diff --git a/src/main/scala/pubsub/instrumentation/Scheduler.scala b/src/main/scala/pubsub/instrumentation/Scheduler.scala new file mode 100644 index 0000000..67d4aea --- /dev/null +++ b/src/main/scala/pubsub/instrumentation/Scheduler.scala @@ -0,0 +1,305 @@ +package instrumentation + +import java.util.concurrent._; +import scala.concurrent.duration._ +import scala.collection.mutable._ +import Stats._ + +import java.util.concurrent.atomic.AtomicInteger + +sealed abstract class Result +case class RetVal(rets: List[Any]) extends Result +case class Except(msg: String, stackTrace: Array[StackTraceElement]) extends Result +case class Timeout(msg: String) extends Result + +/** + * A class that maintains schedule and a set of thread ids. + * The schedules are advanced after an operation of a SchedulableBuffer is performed. + * Note: the real schedule that is executed may deviate from the input schedule + * due to the adjustments that had to be made for locks + */ +class Scheduler(sched: List[Int]) { + val maxOps = 500 // a limit on the maximum number of operations the code is allowed to perform + + private var schedule = sched + private var numThreads = 0 + private val realToFakeThreadId = Map[Long, Int]() + private val opLog = ListBuffer[String]() // a mutable list (used for efficient concat) + private val threadStates = Map[Int, ThreadState]() + + /** + * Runs a set of operations in parallel as per the schedule. + * Each operation may consist of many primitive operations like reads or writes + * to shared data structure each of which should be executed using the function `exec`. + * @timeout in milliseconds + * @return true - all threads completed on time, false -some tests timed out. + */ + def runInParallel(timeout: Long, ops: List[() => Any]): Result = { + numThreads = ops.length + val threadRes = Array.fill(numThreads) { None: Any } + var exception: Option[Except] = None + val syncObject = new Object() + var completed = new AtomicInteger(0) + // create threads + val threads = ops.zipWithIndex.map { + case (op, i) => + new Thread(new Runnable() { + def run(): Unit = { + val fakeId = i + 1 + setThreadId(fakeId) + try { + updateThreadState(Start) + val res = op() + updateThreadState(End) + threadRes(i) = res + // notify the master thread if all threads have completed + if (completed.incrementAndGet() == ops.length) { + syncObject.synchronized { syncObject.notifyAll() } + } + } catch { + case e: Throwable if exception != None => // do nothing here and silently fail + case e: Throwable => + log(s"throw ${e.toString}") + exception = Some(Except(s"Thread $fakeId crashed on the following schedule: \n" + opLog.mkString("\n"), + e.getStackTrace)) + syncObject.synchronized { syncObject.notifyAll() } + //println(s"$fakeId: ${e.toString}") + //Runtime.getRuntime().halt(0) //exit the JVM and all running threads (no other way to kill other threads) + } + } + }) + } + // start all threads + threads.foreach(_.start()) + // wait for all threads to complete, or for an exception to be thrown, or for the time out to expire + var remTime = timeout + syncObject.synchronized { + + timed { if(completed.get() != ops.length) syncObject.wait(timeout) } { time => remTime -= time } + } + if (exception.isDefined) { + exception.get + } else if (remTime <= 1) { // timeout ? using 1 instead of zero to allow for some errors + Timeout(opLog.mkString("\n")) + } else { + // every thing executed normally + RetVal(threadRes.toList) + } + } + + // Updates the state of the current thread + def updateThreadState(state: ThreadState): Unit = { + val tid = threadId + synchronized { + threadStates(tid) = state + } + state match { + case Sync(lockToAquire, locks) => + if (locks.indexOf(lockToAquire) < 0) waitForTurn else { + // Re-aqcuiring the same lock + updateThreadState(Running(lockToAquire +: locks)) + } + case Start => waitStart() + case End => removeFromSchedule(tid) + case Running(_) => + case _ => waitForTurn // Wait, SyncUnique, VariableReadWrite + } + } + + def waitStart(): Unit = { + //while (threadStates.size < numThreads) { + //Thread.sleep(1) + //} + synchronized { + if (threadStates.size < numThreads) { + wait() + } else { + notifyAll() + } + } + } + + def threadLocks = { + synchronized { + threadStates(threadId).locks + } + } + + def threadState = { + synchronized { + threadStates(threadId) + } + } + + def mapOtherStates(f: ThreadState => ThreadState) = { + val exception = threadId + synchronized { + for (k <- threadStates.keys if k != exception) { + threadStates(k) = f(threadStates(k)) + } + } + } + + def log(str: String) = { + if((realToFakeThreadId contains Thread.currentThread().getId())) { + val space = (" " * ((threadId - 1) * 2)) + val s = space + threadId + ":" + "\n".r.replaceAllIn(str, "\n" + space + " ") + opLog += s + } + } + + /** + * Executes a read or write operation to a global data structure as per the given schedule + * @param msg a message corresponding to the operation that will be logged + */ + def exec[T](primop: => T)(msg: => String, postMsg: => Option[T => String] = None): T = { + if(! (realToFakeThreadId contains Thread.currentThread().getId())) { + primop + } else { + updateThreadState(VariableReadWrite(threadLocks)) + val m = msg + if(m != "") log(m) + if (opLog.size > maxOps) + throw new Exception(s"Total number of reads/writes performed by threads exceed $maxOps. A possible deadlock!") + val res = primop + postMsg match { + case Some(m) => log(m(res)) + case None => + } + res + } + } + + private def setThreadId(fakeId: Int) = synchronized { + realToFakeThreadId(Thread.currentThread.getId) = fakeId + } + + def threadId = + try { + realToFakeThreadId(Thread.currentThread().getId()) + } catch { + case e: NoSuchElementException => + throw new Exception("You are accessing shared variables in the constructor. This is not allowed. The variables are already initialized!") + } + + private def isTurn(tid: Int) = synchronized { + (!schedule.isEmpty && schedule.head != tid) + } + + def canProceed(): Boolean = { + val tid = threadId + canContinue match { + case Some((i, state)) if i == tid => + //println(s"$tid: Runs ! Was in state $state") + canContinue = None + state match { + case Sync(lockToAquire, locks) => updateThreadState(Running(lockToAquire +: locks)) + case SyncUnique(lockToAquire, locks) => + mapOtherStates { + _ match { + case SyncUnique(lockToAquire2, locks2) if lockToAquire2 == lockToAquire => Wait(lockToAquire2, locks2) + case e => e + } + } + updateThreadState(Running(lockToAquire +: locks)) + case VariableReadWrite(locks) => updateThreadState(Running(locks)) + } + true + case Some((i, state)) => + //println(s"$tid: not my turn but $i !") + false + case None => + false + } + } + + var threadPreference = 0 // In the case the schedule is over, which thread should have the preference to execute. + + /** returns true if the thread can continue to execute, and false otherwise */ + def decide(): Option[(Int, ThreadState)] = { + if (!threadStates.isEmpty) { // The last thread who enters the decision loop takes the decision. + //println(s"$threadId: I'm taking a decision") + if (threadStates.values.forall { case e: Wait => true case _ => false }) { + val waiting = threadStates.keys.map(_.toString).mkString(", ") + val s = if (threadStates.size > 1) "s" else "" + val are = if (threadStates.size > 1) "are" else "is" + throw new Exception(s"Deadlock: Thread$s $waiting $are waiting but all others have ended and cannot notify them.") + } else { + // Threads can be in Wait, Sync, SyncUnique, and VariableReadWrite mode. + // Let's determine which ones can continue. + val notFree = threadStates.collect { case (id, state) => state.locks }.flatten.toSet + val threadsNotBlocked = threadStates.toSeq.filter { + case (id, v: VariableReadWrite) => true + case (id, v: CanContinueIfAcquiresLock) => !notFree(v.lockToAquire) || (v.locks contains v.lockToAquire) + case _ => false + } + if (threadsNotBlocked.isEmpty) { + val waiting = threadStates.keys.map(_.toString).mkString(", ") + val s = if (threadStates.size > 1) "s" else "" + val are = if (threadStates.size > 1) "are" else "is" + val whoHasLock = threadStates.toSeq.flatMap { case (id, state) => state.locks.map(lock => (lock, id)) }.toMap + val reason = threadStates.collect { + case (id, state: CanContinueIfAcquiresLock) if !notFree(state.lockToAquire) => + s"Thread $id is waiting on lock ${state.lockToAquire} held by thread ${whoHasLock(state.lockToAquire)}" + }.mkString("\n") + throw new Exception(s"Deadlock: Thread$s $waiting are interlocked. Indeed:\n$reason") + } else if (threadsNotBlocked.size == 1) { // Do not consume the schedule if only one thread can execute. + Some(threadsNotBlocked(0)) + } else { + val next = schedule.indexWhere(t => threadsNotBlocked.exists { case (id, state) => id == t }) + if (next != -1) { + //println(s"$threadId: schedule is $schedule, next chosen is ${schedule(next)}") + val chosenOne = schedule(next) // TODO: Make schedule a mutable list. + schedule = schedule.take(next) ++ schedule.drop(next + 1) + Some((chosenOne, threadStates(chosenOne))) + } else { + threadPreference = (threadPreference + 1) % threadsNotBlocked.size + val chosenOne = threadsNotBlocked(threadPreference) // Maybe another strategy + Some(chosenOne) + //threadsNotBlocked.indexOf(threadId) >= 0 + /* + val tnb = threadsNotBlocked.map(_._1).mkString(",") + val s = if (schedule.isEmpty) "empty" else schedule.mkString(",") + val only = if (schedule.isEmpty) "" else " only" + throw new Exception(s"The schedule is $s but$only threads ${tnb} can continue")*/ + } + } + } + } else canContinue + } + + /** + * This will be called before a schedulable operation begins. + * This should not use synchronized + */ + var numThreadsWaiting = new AtomicInteger(0) + //var waitingForDecision = Map[Int, Option[Int]]() // Mapping from thread ids to a number indicating who is going to make the choice. + var canContinue: Option[(Int, ThreadState)] = None // The result of the decision thread Id of the thread authorized to continue. + private def waitForTurn = { + synchronized { + if (numThreadsWaiting.incrementAndGet() == threadStates.size) { + canContinue = decide() + notifyAll() + } + //waitingForDecision(threadId) = Some(numThreadsWaiting) + //println(s"$threadId Entering waiting with ticket number $numThreadsWaiting/${waitingForDecision.size}") + while (!canProceed()) wait() + } + numThreadsWaiting.decrementAndGet() + } + + /** + * To be invoked when a thread is about to complete + */ + private def removeFromSchedule(fakeid: Int) = synchronized { + //println(s"$fakeid: I'm taking a decision because I finished") + schedule = schedule.filterNot(_ == fakeid) + threadStates -= fakeid + if (numThreadsWaiting.get() == threadStates.size) { + canContinue = decide() + notifyAll() + } + } + + def getOperationLog() = opLog +} diff --git a/src/main/scala/pubsub/instrumentation/Stats.scala b/src/main/scala/pubsub/instrumentation/Stats.scala new file mode 100644 index 0000000..ab221bf --- /dev/null +++ b/src/main/scala/pubsub/instrumentation/Stats.scala @@ -0,0 +1,23 @@ +/* Copyright 2009-2015 EPFL, Lausanne */ +package instrumentation + +import java.lang.management._ + +/** + * A collection of methods that can be used to collect run-time statistics about Leon programs. + * This is mostly used to test the resources properties of Leon programs + */ +object Stats { + def timed[T](code: => T)(cont: Long => Unit): T = { + var t1 = System.currentTimeMillis() + val r = code + cont((System.currentTimeMillis() - t1)) + r + } + + def withTime[T](code: => T): (T, Long) = { + var t1 = System.currentTimeMillis() + val r = code + (r, (System.currentTimeMillis() - t1)) + } +} diff --git a/src/main/scala/pubsub/instrumentation/TestHelper.scala b/src/main/scala/pubsub/instrumentation/TestHelper.scala new file mode 100644 index 0000000..57b7215 --- /dev/null +++ b/src/main/scala/pubsub/instrumentation/TestHelper.scala @@ -0,0 +1,124 @@ +package instrumentation + +import scala.util.Random +import scala.collection.mutable.{Map => MutableMap} + +import Stats._ + +object TestHelper { + val noOfSchedules = 10000 // set this to 100k during deployment + val readWritesPerThread = 20 // maximum number of read/writes possible in one thread + val contextSwitchBound = 10 + val testTimeout = 240 // the total time out for a test in seconds + val schedTimeout = 15 // the total time out for execution of a schedule in secs + + // Helpers + /*def testManySchedules(op1: => Any): Unit = testManySchedules(List(() => op1)) + def testManySchedules(op1: => Any, op2: => Any): Unit = testManySchedules(List(() => op1, () => op2)) + def testManySchedules(op1: => Any, op2: => Any, op3: => Any): Unit = testManySchedules(List(() => op1, () => op2, () => op3)) + def testManySchedules(op1: => Any, op2: => Any, op3: => Any, op4: => Any): Unit = testManySchedules(List(() => op1, () => op2, () => op3, () => op4))*/ + + def testSequential[T](ops: Scheduler => Any)(assertions: T => (Boolean, String)) = + testManySchedules(1, + (sched: Scheduler) => { + (List(() => ops(sched)), + (res: List[Any]) => assertions(res.head.asInstanceOf[T])) + }) + + /** + * @numThreads number of threads + * @ops operations to be executed, one per thread + * @assertion as condition that will executed after all threads have completed (without exceptions) + * the arguments are the results of the threads + */ + def testManySchedules(numThreads: Int, + ops: Scheduler => + (List[() => Any], // Threads + List[Any] => (Boolean, String)) // Assertion + ) = { + var timeout = testTimeout * 1000L + val threadIds = (1 to numThreads) + //(1 to scheduleLength).flatMap(_ => threadIds).toList.permutations.take(noOfSchedules).foreach { + val schedules = (new ScheduleGenerator(numThreads)).schedules() + var schedsExplored = 0 + schedules.takeWhile(_ => schedsExplored <= noOfSchedules && timeout > 0).foreach { + //case _ if timeout <= 0 => // break + case schedule => + schedsExplored += 1 + val schedr = new Scheduler(schedule) + //println("Exploring Sched: "+schedule) + val (threadOps, assertion) = ops(schedr) + if (threadOps.size != numThreads) + throw new IllegalStateException(s"Number of threads: $numThreads, do not match operations of threads: $threadOps") + timed { schedr.runInParallel(schedTimeout * 1000, threadOps) } { t => timeout -= t } match { + case Timeout(msg) => + throw new java.lang.AssertionError("assertion failed\n"+"The schedule took too long to complete. A possible deadlock! \n"+msg) + case Except(msg, stkTrace) => + val traceStr = "Thread Stack trace: \n"+stkTrace.map(" at "+_.toString).mkString("\n") + throw new java.lang.AssertionError("assertion failed\n"+msg+"\n"+traceStr) + case RetVal(threadRes) => + // check the assertion + val (success, custom_msg) = assertion(threadRes) + if(!success) { + val msg = "The following schedule resulted in wrong results: \n" + custom_msg + "\n" + schedr.getOperationLog().mkString("\n") + throw new java.lang.AssertionError("Assertion failed: "+msg) + } + } + } + if (timeout <= 0) { + throw new java.lang.AssertionError("Test took too long to complete! Cannot check all schedules as your code is too slow!") + } + } + + /** + * A schedule generator that is based on the context bound + */ + class ScheduleGenerator(numThreads: Int) { + val scheduleLength = readWritesPerThread * numThreads + val rands = (1 to scheduleLength).map(i => new Random(0xcafe * i)) // random numbers for choosing a thread at each position + def schedules(): LazyList[List[Int]] = { + var contextSwitches = 0 + var contexts = List[Int]() // a stack of thread ids in the order of context-switches + val remainingOps = MutableMap[Int, Int]() + remainingOps ++= (1 to numThreads).map(i => (i, readWritesPerThread)) // num ops remaining in each thread + val liveThreads = (1 to numThreads).toSeq.toBuffer + + /** + * Updates remainingOps and liveThreads once a thread is chosen for a position in the schedule + */ + def updateState(tid: Int): Unit = { + val remOps = remainingOps(tid) + if (remOps == 0) { + liveThreads -= tid + } else { + remainingOps += (tid -> (remOps - 1)) + } + } + val schedule = rands.foldLeft(List[Int]()){ + case (acc, r) if contextSwitches < contextSwitchBound => + val tid = liveThreads(r.nextInt(liveThreads.size)) + contexts match { + case prev :: tail if prev != tid => // we have a new context switch here + contexts +:= tid + contextSwitches += 1 + case prev :: tail => + case _ => // init case + contexts +:= tid + } + updateState(tid) + acc :+ tid + case (acc, _) => // here context-bound has been reached so complete the schedule without any more context switches + if(!contexts.isEmpty) { + contexts = contexts.dropWhile(remainingOps(_) == 0) + } + val tid = contexts match { + case top :: tail => top + case _ => liveThreads(0) // here, there has to be threads that have not even started + } + updateState(tid) + acc :+ tid + } + schedule #:: schedules() + } + } +} diff --git a/src/main/scala/pubsub/instrumentation/TestUtils.scala b/src/main/scala/pubsub/instrumentation/TestUtils.scala new file mode 100644 index 0000000..3ab651a --- /dev/null +++ b/src/main/scala/pubsub/instrumentation/TestUtils.scala @@ -0,0 +1,19 @@ +package instrumentation + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.concurrent.ExecutionContext.Implicits.global + +object TestUtils { + def failsOrTimesOut[T](action: => T): Boolean = { + val asyncAction = Future { + action + } + try { + Await.result(asyncAction, 2000.millisecond) + } catch { + case _: Throwable => return true + } + return false + } +} diff --git a/src/main/scala/pubsub/network/TCPReader.scala b/src/main/scala/pubsub/network/TCPReader.scala new file mode 100644 index 0000000..b7528dd --- /dev/null +++ b/src/main/scala/pubsub/network/TCPReader.scala @@ -0,0 +1,32 @@ +package pubsub.network + +import java.net.Socket + +import pubsub.Client +import pubsub.collection.BoundedBuffer +import pubsub.command._ + +class TCPReader(id: Int, socket: Socket, buffer: BoundedBuffer[Command]) { + val client = new Client(socket, id) + val reader = new CommandReader(socket.getInputStream(), client) + + def read(): Unit = { + client.sayHello() + println(s"New client: ${client.name}") + while(client.isConnected) { + + reader.fetchCommand() match { + case c: EndOfClient => + buffer.put(c) + println(c) + client.sayGoodbye() + client.close() + case _: MalformedCommand => + client.invalidPreviousCommand() + case command => + println(command) + buffer.put(command) + } + } + } +} diff --git a/src/test/scala/pubsub/BoundedBufferSuite.scala b/src/test/scala/pubsub/BoundedBufferSuite.scala new file mode 100644 index 0000000..ebc93fd --- /dev/null +++ b/src/test/scala/pubsub/BoundedBufferSuite.scala @@ -0,0 +1,43 @@ +package pubsub + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.concurrent.ExecutionContext.Implicits.global +import scala.collection.mutable.HashMap +import pubsub.collection._ +import org.junit._ +import org.junit.Assert.assertEquals + +import instrumentation._ +import instrumentation.Stats._ +import TestHelper._ +import TestUtils._ + +class BoundedBufferSuite { + @Test def `Should work in a sequential setting`: Unit = { + testSequential[(Int, Int, Int, Int)]{ sched => + val buffer = new SchedulableBoundedBuffer[Int](4, sched) + buffer.put(1) + buffer.put(2) + buffer.put(3) + buffer.put(4) + (buffer.take(), + buffer.take(), + buffer.take(), + buffer.take()) + }{ tuple => + (tuple == (1, 2, 3, 4), s"Expected (1, 2, 3, 4) got $tuple") + } + } + + @Test def `Should work when Thread 1: 'put(1)', Thread 2: 'take' and a buffer of size 1`: Unit = { + testManySchedules(2, sched => { + val prodCons = new SchedulableBoundedBuffer[Int](1, sched) + (List(() => prodCons.put(1), () => prodCons.take()), + args => (args(1) == 1, s"expected 1 your 'take' implementation returned ${args(1)}")) + }) + } + + + @Rule def individualTestTimeout = new org.junit.rules.Timeout(400 * 1000) +}