diff --git a/LICENSE.txt b/LICENSE similarity index 93% rename from LICENSE.txt rename to LICENSE index e674b8dd..261eeb9e 100644 --- a/LICENSE.txt +++ b/LICENSE @@ -1,22 +1,3 @@ - - Scala-Gopher. - Copyright 2013-2016 Ruslan Shevchenko - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this project except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ------------------------------------------------------------------------- - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -194,4 +175,27 @@ END OF TERMS AND CONDITIONS + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..ab7bab67 --- /dev/null +++ b/NOTICE @@ -0,0 +1,5 @@ + + Scala-Gopher. + Copyright 2013-2020 Ruslan Shevchenko + + diff --git a/README.md b/README.md index e5fc8207..bcf41272 100644 --- a/README.md +++ b/README.md @@ -1,164 +1,145 @@ - -## Gopher: asynchronous implementation of go-like channels/selectors in scala +# 🇺🇦 HELP UKRAINE + +I'm the creator of this project. +My country, Ukraine, [is being invaded by the Russian Federation, right now](https://war.ukraine.ua). If you want to help my country to fight, consider donating to [charity supporting Ukrainian army](https://www.comebackalive.in.ua/). More options is described on [support ukraine](https://supportukrainenow.org/) site. + +# Gopher: asynchronous implementation of go-like channels/selectors in scala ======= ### Dependences: - * scala 2.12.1 or 2.11.8 - * akka 2.4.14 + - * scala-async 0.9.6 +For scala 3.1.1+: + + libraryDependencies += "com.github.rssh" %% "scala-gopher" % "4.0.2" + +For scala 3 and 3.1.0: + + libraryDependencies += "com.github.rssh" %% "scala-gopher" % "2.1.0" -#### Download: +Note, that 3.0.x have no new functionality agains 2.1.0 but need to be a next major release because of binary incompability caused by difference between dotty-cps-async-0.9.5 and 0.9.7. - libraryDependencies += "com.github.rssh" %% "scala-gopher" % "0.99.8" +For scala2: -(or `0.99.9-SNAPSHOT` for development version). + libraryDependencies += "com.github.rssh" %% "scala-gopher" % "0.99.15" + +(For 0.99.x documentation look at README at 0.99x branch: https://github.com/rssh/scala-gopher/tree/0.99x) +The main differences between 0.99 and 2.0.0 is described in https://github.com/rssh/scala-gopher/blob/master/docs/changes-2.0.0.md Scala-gopher is open source (license is Apache2); binaries are available from the maven-central repository. ## Overview - Scala-gopher is a scala library, build on top of Akka and SIP-22 async, which provide an implementation of - CSP [Communicate Sequential Processes] primitives, known as 'Go-like channels.' Also, analogs of go/defer/recover control-flow constructions are provided. + Scala-gopher is a scala library, build on top of dotty-cps-async, which provide an implementation of + CSP [Communicate Sequential Processes] primitives, known as 'Go-like channels.' Note, which this is not an emulation of go language structures in Scala, but rather a reimplementation of the main ideas in 'scala-like' manner. - ### Initialization - You need an instance of gopherApi for creating channels and selectors. The easiest way is to use one as Akka extension: + You need a given of gopherApi for creating channels and selectors. - import akka.actors._ import gopher._ + import cps._ + import cps.monads.FutureAsyncMonad ...... - val actorSystem = ActorSystem.create("system") - val gopherApi = Gopher(actorSystem) - - In akka.conf we can place config values in 'gopher' entry. + given Gopher[Future]() -## Control flow constructions: + type parameter can be any monad, which should implement CpsSchedulingMonad typeclass. -### goScope - - `goScope[T](body: =>T)` is expression, which allows to use inside `body` go-like 'defer' and 'recover' expression. - Typical usage: -~~~ scala -import gopher._ -import java.io._ - -object CopyFile { - - def main(args: Array[String]): Unit = { - if (args.length != 3) { - System.err.println("usage: copy in out"); - } - copy(new File(args(1)), new File(args(2))) - } - - def copy(inf: File, outf: File): Long = - goScope { - val in = new FileInputStream(inf) - defer { - in.close() - } - val out = new FileOutputStream(outf); - defer { - out.close() - } - out.getChannel() transferFrom(in.getChannel(), 0, Long.MaxValue) - } - -} -~~~ - Here statements inside defer block executed at the end of goScope block in reverse order. - - Inside goScope we can use two pseudo functions: - -* `defer(body: =>Unit):Unit` - defer execution of `body` until the end of `go` or `goScope` block and previous defered blocks. -* `recover[T](f:PartialFunction[Throwable,T]):Boolean` -- can be used only within `defer` block with next semantics: -* * if exception was raised inside `go` or `goScope` than `recover` try to apply `f` to this exception and -* * * if `f` is applicable - set `f(e)` as return value of the block and return true -* * * otherwise - do nothing and return false -* * during normal exit - return false. - -You can look on `defer` as on stackable finally clauses, and on `defer` with `recover` inside as on `catch` clause. Small example: - -~~~ scala -val s = goScope{ - defer{ recover{ - case ex: Throwable => "CCC" - } } - throw new Exception("") - "QQQ" - } -~~~ - - will set `s` to "CCC". - - - -### go - - `go[T](body: =>T)(implicit ex:ExecutionContext):Future[T]` starts asynchronous execution of `body` in provided execution context. Inside go we can use `defer`/`recover` clauses and blocked read/write channel operations. - - Go implemented on top of [SIP-22](http://docs.scala-lang.org/sips/pending/async.html) async and share the same limitations. In addition to async/await transfoirm `go` provide lifting up asynchronous expressions inside some well-known hight-order functions (i.e. it is possible to use async operations inside for loops). Details are available in the tech report: https://arxiv.org/abs/1611.00602 . ## Channels Channels are used for asynchronous communication between execution flows. - -When using channel inside *go* block, you can look at one as on classic blocked queue with fixed size with methods read and write: +When using channel inside async block, you can look at one as on classic blocked queue with fixed size with methods read and write: - val channel = gopherApi.makeChannel[Int]; + val channel = makeChannel[Int]; - go { + async { channel.write(a) } ...... - go { - val i = channel.read + async { + val i = channel.read() } - * `channel.write(x)` - send x to channel and wait until one will be sent (it is possible us as synonyms `channel<~x` and `channel!x` if you prefer short syntax) * `channel.read` or `(channel ?)` - blocking read -Blocking operations can be used only inside `go` or `Async.await` blocks. +Blocking operations can be used only inside `await` blocks. Outside we can use asynchronous version: * `channel.awrite(x)` will write `x` and return to us `Future[Unit]` which will be executed after x will send -* `channel.aread` will return future to the value, which will be read. +* `channel.aread()` will return future to the value, which will be read. -Also, channels can be closed. After this attempt to write will cause throwing 'ClosedChannelException.' Reading will be still possible up to 'last written value', after this attempt to read will cause the same exception. -Note, closing channels is not mandatory; unreachable channels are garbage-collected regardless of they are closed or not. +Channels can be closed. After this attempt to write will cause throwing 'ClosedChannelException.' Reading will be still possible up to 'last written value', after this attempt to read will cause the same exception. Also, each channel provides `done` input for firing close events. +Closing channels are not mandatory; unreachable channels are garbage-collected regardless of they are closed or not. Channels can be buffered and unbuffered. In a unbuffered channel, write return control to the caller after another side actually will start processing; buffered channel force provider to wait only if internal channel buffer is full. -Also, you can use only `Input` or `Output` interfaces, where appropriative read/write operations are defined. -For `Input`, exists usual collection functions, like `map`, `zip`, `takeN`, `fold` ... etc. Scala Iterable can be represented as `channels.Input` via method `gopherApi.iterableInput`. Also, we can use Scala futures as channels, which produce one value and then closes. For obtaining such input use `gopherApi.futureInput`. +Also, you can use only `ReadChannel` or `WriteChannel` interfaces, where an appropriative read/write operations are defined. +For `ReadChannel`, exists usual stream functions, like `map`, `zip`, `takeN`, `fold` ... etc. -`|` (i.e. or) operator used for merged inputs, i.e. `(x|y).read` will read a value from channel x or y when one will be available. +For example, here is direct translation of golang code: +~~~ scala + +val channel = gopher.makeChannel[Int](100) + +val producer = channel.awrite(1 to 1000) + +@volatile var sum = 0; +val consumer = async { + var done = false + while(!done) + val i = channel.read() + sum = sum + i + if i==1000 then + done = true +} + +~~~ + +last loop can be repharased in more scala way as: -For each input and output you can create a facility with tracked timeout, i.e. if `in` is input, then ~~~ scala - val (inReady, inTimeouts) = in.withInputTimeouts(10 seconds) +val sum = (channel.take(1000)).fold(0)((s,i) => s+i) ~~~ -will return two inputs, where reading from `inReady` will return the same as reading from `in`. And if waiting for reading takes longer than 10 seconds then the value of timeout will be available in `inTimeouts`. Analogically we can create output with timeouts: + + + Here is filtered channel, wich produce prime numbers: + ~~~ scala - val (outReady, outTimeouts) = out.withOutputTimeouts(10 seconds) + def filter0(in:Channel[Future,Int,Int]):ReadChannel[Future,Int] = + val filtered = makeChannel[Int]() + var proxy: ReadChannel[Future, Int] = in; + async { + while(true) { + val prime = proxy.read() + proxy = proxy.filter(_ % prime != 0) + filtered.write(prime) + } + } + filtered ~~~ + +(less imperative way to do the same, described later in `select.fold`). -Also, note that you can provide own Input and Output implementations by implementing callback `cbread` and `cbwrite` methods. +Any Iterable can be represented as `ReadChannel` via extension method `asReadChannel`. + +Also, we can use Scala futures as channels, which produce one value and then closes. For obtaining such input use `gopherApi.futureInput`. + +`|` (i.e. or) operator used for merged inputs, i.e. `(x|y).read` will read a value from channel x or y when one will be available. + +Also, note that you can provide own ReadChannel and WriteChannel implementations by implementing ```addReader/addWriter``` methods. ## Select loops and folds @@ -167,90 +148,88 @@ Also, note that you can provide own Input and Output implementations by implemen from a set of blocking operations select one who is ready to input/output and run it. The usual pattern of channel processing in go language is to wrap select operation into an endless loop. - - Gopher provides similar functionality: ~~~ scala -go{ - for( s <- gopherApi.select.forever) - s match { +async[Future]{ + while(!done) + select { case i:channelA.read => ..do-something-with-i case ch:channelB.read .. do-something-with-b - } + } } ~~~ Here we read in the loop from channelA or channelB. - Body of select loop must consist only of one `match` statement where - left parts in `case` clauses must have the following form + select accepts partial functions syntax, left parts in `case` clauses must have the following form * `v:channel.read` (for reading from channel) - * `v:Tye if (v==read(ch))` (for reading from channel or future) * `v:channel.write if (v==expr)` (for writing `expr` into channel). - * `v:Type if (v==write(ch,expr))` (for writing `expr` into channel). - * `_` - for 'idle' action. + * `v:Time.after if (v==expr)` (for timeouts). + Inside case actions, we can use blocking read/writes and await operations. - For endless loop inside `go` we can use the shortcut with syntax of partial function: - ~~~ scala - gopherApi.select.forever{ - case i:channelA.read => ... do-something-with-i - case ch:channelB.read ... do-something-with-b - } +async{ + var done = false + while(!done) { + select { + case x: ch.read => + sum = sum+x + if (x > 100) { + done = true + } + } + } +} ~~~ - - - Inside case actions, we can use blocking read/writes and await operations. Call of doExit in the implicit instance of `FlowTermination[T]` (for a forever loop this is `FlowTermination[Unit]`) can be used for exiting from the loop. `select.exit` and `select.shutdown` macroses can be used as shortcuts. - - Example: + + select.loop can be used for less imperative code organization: ~~~ scala -val channel = gopherApi.makeChannel[Int](100) - -val producer = channel.awrite(1 to 1000) - -@volatile var sum = 0; -val consumer = gopherApi.select.forever{ - case i: channerl.read => - sum = sum + i - if (i==1000) { - select.shutdown() - } +async{ + select.loop{ + case x: ch.read => + sum = sum+x + (x <= 100) + } } - -Await.ready(consumer, 5.second) ~~~ - A combination of variable and select loop better modeled with help 'fold over select' construction: + Here, the branch inside select should return true or false. If true -- loop will be continued, if false - finished. + + + select.fold (or afold - as variant which is alredy wrapped in async) provide an abstraction for iterating over set of + events by applying function to state: ~~~ scala -val sum = gopherApi.select.afold(0) { (state, selector) => - selector match { - case i: channel.read => - val nstate = state + i - if (i==1000) { - select.exit(nstate) - } - nstate +def filter1(in:Channel[Future,Int,Int]):ReadChannel[Future,Int] = + val q = makeChannel[Int]() + val filtered = makeChannel[Int]() + select.afold(in){ ch => + select{ + case prime: ch.read => + filtered.write(prime) + ch.filter(_ % prime != 0) + } } -} + filtered ~~~ + The argument to the fold function is state. + Function should or produce next state (as in `filter1` example) or produce special value SelectFold.Done(x): - More than one variables in state can be modelled with partial function case syntax: ~~~ scala -val fib = select.afold((0,1)) { case ((x,y), s) => - s match { +val fib = select.fold((0,1)) { case (x,y) => + select{ case x:channel.write => (y,y+x) - case q:quit.read => select.exit((x,y)) + case q:quit.read => SelectFold.Done((x,y)) } } ~~~ - Also, we can use 'map over select' to represent results of handling of different events as input side of channel: + Also, we can use 'map over select' to represent results of handling of different events as input side of a channel: ~~~ scala val multiplexed = select amap { @@ -260,261 +239,41 @@ val multiplexed = select amap { ~~~ - For using select operation not enclosed in a loop, scala-gopher provide - *select.once* syntax: - -~~~ scala -gopherApi.select.once{ - case i: channelA.read => s"Readed(${i})" - case x:channelB.write if (x==1) => s"Written(${x})" -} -~~~ - - - Such form can be called from any environment and will return `Future[String]`. Inside `go` you can wrap this in await of use 'for' syntax as with `forever`. - -~~~ scala -go { - ..... - val s = for(s <-gopherApi.select.once) - s match { - case i: channelA.read => s"Readed(${i})" - case x: channelB.write if (x==1) => s"Written(${x})" - } - -} -~~~ - +## Done signals. - and afold become fold: + Sometimes it is useful to receive a message when some `ReadChannel` becomes closed. Exists a way to receive close notification in selector using `done` pseudo-channel, which is available for each 'normal' channel. When the channel is closed, all readers of done channels receive notifications. ~~~ scala -go { - ... - val sum = select.fold(0) { (n,s) => - s match { - case x: channelA.read => n+x - case q: quit.read => select.exit(n) - } - } -} -~~~ - - amap - map - -~~~ scala -val multiplexed = for(s <- select) yield - s match { - case x:ch1.read => (s1,x) - case y:ch2.read => (s2,y) - } -~~~ - - -## Effected{Input,Output,Channel} - - One useful programming pattern, often used in CSP-style programming: have a channel from wich we read (or to where we write) as a part of a state. In Go language, this is usually modelled as a mutable variable, changed inside the same select statement, where one is read/written. - - In scala-gopher, we have the ability to use a technique of 'EffectedChannel', which can be seen as an entity, which holds channel, can be used in read/write and can be changed only via effect (operation, which accepts previous state and return next). - -Let's look on example: - -~~~ scala - def generate(n:Int, quit:Promise[Boolean]):Channel[Int] = - { - val channel = makeChannel[Int]() - channel.awriteAll(2 to n) andThen (_ => quit success true) - channel - } - - def filter(in:Channel[Int]):Input[Int] = - { - val filtered = makeChannel[Int]() - val sieve = makeEffectedInput(in) - sieve.aforeach { prime => - sieve <<= (_.filter(_ % prime != 0)) - filtered <~ prime - } - filtered - } -~~~ - -Here in 'filter', we generate a set of prime numbers, and make a sieve of Eratosthenes by sequentially applying 'filter' effect to state of sieve EffectedInput. - - -## Transputers - - The logic of data transformation between channels can be encapsulated in special `Transputer` concept. (Word 'transputer' was chosen - as a reminder about INMOS processor, for which one of the first CSP languages, Occam, was developed). You can view on transputer as - representation of restartable process that consists from: - - * Set of named input and output ports. - * Logic for propagating information from the input to the output ports. - * Possible state - * Logic of error recovering. - -I.e. we saw that Transputer is similar to Actor with the following difference: - When Actor provides reaction to incoming messages from the mailbox and sending signals to other actors, Transputers provide processing of incoming messages from input ports and sending outcoming messages to output ports. When operations inside Actor must not be blocked, operations inside Transputer can wait. - -Transformers are build hierarchically with help of 3 operations: - - * select (logic is execution of a select statement ) - * parallel combination (logic is parallel execution of parts) - * replication (logic is parallel execution of a set of identical transformers.) - -### Select transputer - - Let's look at a simple example: transputer with two input ports and one output. -When the same number has come from `inA` and `inB`, then -transputer prints `Bingo` on console and output this number to `out`: - -~~~ scala - trait BingoTransputer extends SelectTransputer - { - val inA = InPort[Int] - val inB = InPort[Int] - val out = OutPort[Boolean] - - loop { - case x:inA.read => - y = inB.read - out.write(x==y) - if (x==y) { - Console.println(s"Bingo: ${x}") - } - } - - } -~~~ - - A select loop is described in `loop` statement. - - To create transputer we can use `gopherApi.makeTransputer` call: -~~~ scala -val bing = gopherApi.makeTransputer[BingoTransputer] -~~~ - after the creation of transputer, we can create channels, connect one to ports and start transformer. - -~~~ scala -val inA = makeChannel[Int]() -bingo.inA.connect(inA) -val inB = makeChannel[Int]() -bingo.inB.connect(inB) -val out = makeChannel[Int]() -bingo.out.connect(out) - -val shutdownFuture = bingo.start() -~~~ - - - Then after we will write to `inA` and `inB` values `(1,1)` then `true` will become available for reading from `out`. - -#### Error recovery - - On an exception from a loop statement, transputer will be restarted with ports, connected to the same channels. Such behaviour is default; we can configure one by setting recovery policy: - -~~~ scala -val t = makeTransputer[MyType].recover { - case ex: MyException => SupervisorStrategy.Escalate - } -~~~ - - Recovery policy is a partial function from throwable to akka `SupervisorStrategy.Direction`. Escalated exceptions are passed to parent transputers or to TransputerSupervisor actor, which handle failures according to akka default supervisor strategy. - - How many times transputer can be restarted within given period can be configured via failureLimit call: - -~~~ scala - t.failureLimit(maxFailures = 20, windowDuration = 10 seconds) -~~~ - - This setting means that if 20 failures will occur during 10 seconds, then exception Transputer.TooManyFailures will be escalated to parent. - -### Par transputers. - - 'Par' is a group of transputers running in parallel. Par transputer can be created with the help of plus operator: - -~~~ scala -val par = (t1 + t1 + t3) -par.start() -~~~ - - When one from `t1`, `t2`, ... is stopped or failed, then all other members of `par` are stopped. After this `par` can be restarted according to current recovery policy. - - -### Replication - - Replicated transputer is a set of identical transputers t_{i}, running in parallel. It cam be created with `gopherApi.replicate` call. Next code fragment: - -~~~ scala -val r = gopherApi.replicate[MyTransputer](10) -~~~ - - will produce ten copies of MyTransputer (`r` will be a container transputer for them). Ports of all replicated internal transputers will be shared with ports of the container. (I.e. if we will write something to input port then it will be read by one of the replicas; if one of the replicas will write something to out port, this will be visible in out port of container.) - - Mapping from a container to replica port can be changed from sharing to other approaches, like duplicating or distributing, via applying port transformations. - - For example, next code fragment: - -~~~ scala -r.inA.duplicate() - .inB.distribute( _.hashCode ) + while(!done) + select{ + case x:ch.read => Console.println(s"received: ${x}") + case _:ch.done.read => Console.println(s"done") + done = true + } ~~~ - - will set port `inA` be duplicated in replicas (i.e. message, send to container port `inA` will be received by each instance) and messages from `inB` will be distributed by hashcode: i.e. messages with the same hashcode will be directed to the same replica. Such behaviour is useful when we keep in replicated transputer some state information about messages. - - Stopping and recovering of replicated transformer is the same as in `par` (i.e. stopping/failing of one instance will cause stopping/failing of container) - - Also note, that we can receive a sequence of replicated instances with the help of `ReplicateTransformer.replicated` method. -## Unsugared interfaces - - It is worth to know that exists gopher API without macro-based syntax sugar. - -~~~ scala -( - new ForeverSelectorBuilder(gopherApi) - .reading(ch1){ x => something-x } - .writing(ch2,y){ y => something-y } - .idle(something idle).go -) -~~~ - - can be used instead of appropriative macro-based call. - - Moreover, for tricky things exists even low-level interface, which can combine computations by adding to functional interfaces, similar to continuations: - -~~~ scala -{ - val selector = new Selector[Unit](gopherApi) - selector.addReader(ch1, cont=>Some{ in => something-x - Future successful cont - } - ) - selector.addWriter(ch2, cont=>Some{(y,{something y; - Future successful cont - })}) - selector.addIdle(cont => {..do-something-when-idle; Future successful cont}) -} -~~~ - - Please, consult with source code for details. + Note, that if we query some channel and it's done channel in the same select, and done channel is not aliased in some vairable, then done handler will be called first after channel close. -## Additional Informatiom - ---------------------- +# References: +---------------------- -* API reference: http://rssh.github.io/scala-gopher/api/index.html#package +## 2.0.x implementation * source code: https://github.com/rssh/scala-gopher +* scaladoc: https://rssh.github.io/scala-gopher/api/jvm/index.html + +## [0.99.x] implementation: +* source code: https://github.com/rssh/scala-gopher/tree/0.99x * presentations: * Odessa Java/Scala Labs; Kiev Scala Meetup: Oct. 2014: http://www.slideshare.net/rssh1/scala-gopher2014 * Wix R&D meetup. Mart 2016: http://www.slideshare.net/rssh1/csp-scala-wixmeetup2016 - * Scala Symposium. Oct. 2016. Amsterdam. http://http://www.slideshare.net/rssh1/scalagopher-cspstyle-programming-techniques-with-idiomatic-scala -* techreport: http://www.slideshare.net/rssh1/scalagopher-cspstyle-programming-techniques-with-idiomatic-scala - + * Scala Symposium. Oct. 2016. Amsterdam. http://www.slideshare.net/rssh1/scalagopher-cspstyle-programming-techniques-with-idiomatic-scala +* techreport: https://arxiv.org/abs/1611.00602 - Some related links: +## CSP-Related links: * [Communicating Sequential Processes book by Tony Hoare](http://www.usingcsp.com) * [brief history of CSP in Bell-labs](http://swtch.com/~rsc/thread/) -* [introduction article about go defer/recover](http://blog.golang.org/defer-panic-and-recover) + + diff --git a/build.sbt b/build.sbt index 3e2f747e..ce5add4a 100644 --- a/build.sbt +++ b/build.sbt @@ -1,75 +1,63 @@ - -name:="scala-gopher" - -organization:="com.github.rssh" - -scalaVersion := "2.12.1" -crossScalaVersions := Seq("2.11.8", "2.12.1") - -resolvers += Resolver.sonatypeRepo("snapshots") - -resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/" - -scalacOptions ++= Seq("-unchecked","-deprecation", "-feature" - /* , "-Ymacro-debug-lite" */ - /* , "-Ydebug" , "-Ylog:lambdalift" */ - ) - -libraryDependencies <+= scalaVersion( "org.scala-lang" % "scala-reflect" % _ ) - -libraryDependencies += "org.scala-lang.modules" %% "scala-async" % "0.9.6" -//libraryDependencies += "org.scala-lang.modules" %% "scala-async" % "0.9.6-SNAPSHOT" - -libraryDependencies += "org.scalatest" %% "scalatest" % "3.0.0" % "test" - -libraryDependencies += "com.typesafe.akka" %% "akka-actor" % "2.4.14" - -//TODO: enable after 1.0 -//libraryDependencies += "com.typesafe.akka" %% "akka-stream-experimental" % "0.9" - -//testOptions in Test += Tests.Argument(TestFrameworks.ScalaTest, "-n", "Now") -//fork in Test := true -//javaOptions in Test += s"""-javaagent:${System.getProperty("user.home")}/.ivy2/local/com.github.rssh/trackedfuture_2.11/0.3/jars/trackedfuture_2.11-assembly.jar""" - -version:="0.99.8" - - - -publishMavenStyle := true - -publishTo <<= version { (v: String) => - val nexus = "https://oss.sonatype.org/" - if (v.trim.endsWith("SNAPSHOT")) - Some("snapshots" at nexus + "content/repositories/snapshots") - else - Some("releases" at nexus + "service/local/staging/deploy/maven2") -} - - -publishArtifact in Test := false - -pomIncludeRepository := { _ => false } - -pomExtra := ( - http://rssh.github.com/scala-gopher - - - Apache 2 - http://www.apache.org/licenses/LICENSE-2.0 - repo - - - - git@github.com:rssh/scala-gopher.git - scm:git:git@github.com:rssh/scala-gopher.git - - - - rssh - Ruslan Shevchenko - rssh.github.com - - +//val dottyVersion = "3.0.0-RC2-bin-SNAPSHOT" +val dottyVersion = "3.3.5" +//val dottyVersion = "3.1.3-RC1-bin-SNAPSHOT" +//val dottyVersion = dottyLatestNightlyBuild.get + +ThisBuild/version := "4.0.7" +ThisBuild/versionScheme := Some("semver-spec") + +val sharedSettings = Seq( + organization := "com.github.rssh", + scalaVersion := dottyVersion, + name := "scala-gopher", + //resolvers += "Local Ivy Repository" at "file://"+Path.userHome.absolutePath+"/.ivy2/local", + libraryDependencies += "io.github.dotty-cps-async" %%% "dotty-cps-async" % "1.0.2", + libraryDependencies += "org.scalameta" %%% "munit" % "1.0.4" % Test, ) - +lazy val root = project + .in(file(".")) + .aggregate(gopher.js, gopher.jvm) + .settings( + git.remoteRepo := "git@github.com:rssh/scala-gopher.git", + SiteScaladocPlugin.scaladocSettings(GopherJVM, gopher.jvm / Compile / packageDoc / mappings, "api/jvm"), + SiteScaladocPlugin.scaladocSettings(GopherJS, gopher.js / Compile / packageDoc / mappings, "api/js"), + siteDirectory := baseDirectory.value / "target" / "site", + publishArtifact := false, + ).enablePlugins(GhpagesPlugin, SiteScaladocPlugin) + + +// for scala-native support we need munit +lazy val gopher = crossProject(JSPlatform, JVMPlatform) + .in(file(".")) + .settings(sharedSettings) + .disablePlugins(SitePlugin) + .disablePlugins(SitePreviewPlugin) + .jvmSettings( + //scalacOptions ++= Seq( "-unchecked", "-Xcheck-macros", "-Ycheck:macro", "-uniqid", "-Xprint:types", "-explain" ), + // Error in dotty + scalacOptions ++= Seq( "-unchecked", "-Xprint:types" ), + fork := true, + /* + javaOptions ++= Seq( + "--add-opens", + "java.base/java.lang=ALL-UNNAMED", + s"-javaagent:${System.getProperty("user.home")}/.ivy2/local/com.github.rssh/trackedfuture_3/0.5.0/jars/trackedfuture_3-assembly.jar" + ) + */ + Compile / doc / scalacOptions := Seq("-groups", + "-source-links:shared=github://rssh/scala-gopher/master#shared", + "-source-links:jvm=github://rssh/scala-gopher/master#jvm"), + mimaPreviousArtifacts := Set( "com.github.rssh" %% "scala-gopher" % "4.0.5" ) + ).jsSettings( + libraryDependencies += ("org.scala-js" %%% "scalajs-java-logging" % "1.0.0").cross(CrossVersion.for3Use2_13), + // TODO: switch to ModuleES ? + scalaJSLinkerConfig ~= { _.withModuleKind(ModuleKind.CommonJSModule) }, + scalaJSUseMainModuleInitializer := true, + Compile / doc / scalacOptions := Seq("-groups", + "-source-links:shared=github://rssh/scala-gopher/master#shared", + "-source-links:js=github://rssh/scala-gopher/master#js"), + ) + +lazy val GopherJVM = config("gopher.jvm") +lazy val GopherJS = config("gopher.js") diff --git a/docs/changes-2.0.0.md b/docs/changes-2.0.0.md new file mode 100644 index 00000000..3da8346c --- /dev/null +++ b/docs/changes-2.0.0.md @@ -0,0 +1,19 @@ + +Gopher 2.0 rewritten from scratch for Scala3. + + This rewrite allows me to review some design decision [from 2013 ]: + +- In gopher-0.99.x, the primary internal entity was 'flow,’ which maintains a set of channels and selectors and coordinates the call of the next asynchronous callback in a program. Such a scheme provides a correct algorithm for context switching but has quite limited scalability. For example, if we have few channels connected inside one flow and want to filter one of them, we can't just ignore filtered-out values. Instead, we need to propagate an empty callback to allow context switching to be transferred via our flow. In gopher-2.0, we change this: channels and selectors directly work with program control-flow without using intermediate entities. + +- In gopher-0.99.x, channel primitives and scheduler were built on top of Akka. In gopher-2.0, we implemented primitives from scratch. + +- Gopher-0.99.x provides a go statement as a wrapper around scala-async with the addition of 'go-like' error handling. +Gopher-2.0 uses monadic async/await from dotty-cps-async. Since dotty-cps-async fully supports try/catch, additional error handling constructions are now not needed. Also, translation of high-order function calls moved to dotty-cps-async. + +- In gopher-0.99x select statement is represented as a match statement, which should be inside go-expression. Gopher-2.0 select statements have the form of passing a partial function to the `select` object. + +- Gopher-0.99.x transputers were an exciting proof of concept, but support and maintenance were a significant part of scala-gopher support when transputers’ usage was relatively low. So, we exclude this part from the package. If you use this feature in production, you can create your port on top of scala-gopher-2.0.0 or hire me to do the same. + + +Overall, the approach from 2013 can be described as 'Rich DSL' and a special syntax for each feature, when an approach from 2021 -- minimal DSL in favor of reusing existing language constructions and minimizing the learning curve. + diff --git a/notes/0.99.1.markdown b/docs/history/notes/0.99.1.markdown similarity index 100% rename from notes/0.99.1.markdown rename to docs/history/notes/0.99.1.markdown diff --git a/docs/history/notes/0.99.12.markdown b/docs/history/notes/0.99.12.markdown new file mode 100644 index 00000000..44028781 --- /dev/null +++ b/docs/history/notes/0.99.12.markdown @@ -0,0 +1 @@ +- scala 2.13.0 diff --git a/notes/0.99.2.markdown b/docs/history/notes/0.99.2.markdown similarity index 100% rename from notes/0.99.2.markdown rename to docs/history/notes/0.99.2.markdown diff --git a/notes/0.99.3.markdown b/docs/history/notes/0.99.3.markdown similarity index 100% rename from notes/0.99.3.markdown rename to docs/history/notes/0.99.3.markdown diff --git a/notes/0.99.4.markdown b/docs/history/notes/0.99.4.markdown similarity index 100% rename from notes/0.99.4.markdown rename to docs/history/notes/0.99.4.markdown diff --git a/notes/0.99.5.markdown b/docs/history/notes/0.99.5.markdown similarity index 100% rename from notes/0.99.5.markdown rename to docs/history/notes/0.99.5.markdown diff --git a/notes/0.99.6.markdown b/docs/history/notes/0.99.6.markdown similarity index 100% rename from notes/0.99.6.markdown rename to docs/history/notes/0.99.6.markdown diff --git a/notes/0.99.7.markdown b/docs/history/notes/0.99.7.markdown similarity index 100% rename from notes/0.99.7.markdown rename to docs/history/notes/0.99.7.markdown diff --git a/docs/history/notes/0.99.8.markdown b/docs/history/notes/0.99.8.markdown new file mode 100644 index 00000000..58a6a6e2 --- /dev/null +++ b/docs/history/notes/0.99.8.markdown @@ -0,0 +1,9 @@ +- added support for select.timeout construct. +- added support for lifting-up await in hight-order functions. + ie in simplicified explanation: + for(x <- 1 to n) { s += read(x) } + is translated to `(1 to n).foreachAsync { async(s += await(aread(x))) }` + Details can be found in techreport: [https://arxiv.org/abs/1611.00602](https://arxiv.org/abs/1611.00602) + +- added support for select.fold construct. +- scala 2.12 diff --git a/docs/history/notes/0.99.9.markdown b/docs/history/notes/0.99.9.markdown new file mode 100644 index 00000000..c0195cc7 --- /dev/null +++ b/docs/history/notes/0.99.9.markdown @@ -0,0 +1,5 @@ +- akka 2.14.16 +- FoldSelected reimplemented without synthetic runtime channels. +- added channel.done event as select source +- added Input.closeless +- added error handling in select.error branch diff --git a/notes/about.markdown b/docs/history/notes/about.markdown similarity index 100% rename from notes/about.markdown rename to docs/history/notes/about.markdown diff --git a/notes/papers.markdown b/docs/history/notes/papers.markdown similarity index 100% rename from notes/papers.markdown rename to docs/history/notes/papers.markdown diff --git a/notes/techreport.pdf b/docs/history/notes/techreport.pdf similarity index 100% rename from notes/techreport.pdf rename to docs/history/notes/techreport.pdf diff --git a/notes/techreport.bib b/docs/history/techreport.bib similarity index 100% rename from notes/techreport.bib rename to docs/history/techreport.bib diff --git a/notes/techreport.tex b/docs/history/techreport.tex similarity index 100% rename from notes/techreport.tex rename to docs/history/techreport.tex diff --git a/js/src/main/scala/gopher/DeadlockDetected.scala b/js/src/main/scala/gopher/DeadlockDetected.scala new file mode 100644 index 00000000..5bd8a805 --- /dev/null +++ b/js/src/main/scala/gopher/DeadlockDetected.scala @@ -0,0 +1,4 @@ +package gopher + +class DeadlockDetected extends RuntimeException("Deadlock detected") + \ No newline at end of file diff --git a/js/src/main/scala/gopher/JSGopher.scala b/js/src/main/scala/gopher/JSGopher.scala new file mode 100644 index 00000000..e0db6454 --- /dev/null +++ b/js/src/main/scala/gopher/JSGopher.scala @@ -0,0 +1,55 @@ +package gopher + +import cps.* +import java.util.Timer +import java.util.logging.* +import scala.concurrent.ExecutionContext +import scala.concurrent.duration.* +import scala.scalajs.concurrent.* + +class JSGopher[F[_]:CpsSchedulingMonad](cfg: JSGopherConfig) extends Gopher[F]: + + + def makeChannel[A](bufSize:Int = 0, autoClose: Boolean = false) = + if (!autoClose) then + if (bufSize == 0) then + impl.UnbufferedChannel[F,A](this) + else + impl.BufferedChannel[F,A](this,bufSize) + else + impl.PromiseChannel[F,A](this) + + + val time = new impl.JSTime(this) + + def setLogFun(logFun:(Level, String, Throwable|Null) => Unit): ((Level, String, Throwable|Null) => Unit) = + val r = currentLogFun + currentLogFun = logFun + r + + def log(level: Level, message: String, ex: Throwable| Null): Unit = + currentLogFun.apply(level,message,ex) + + def taskExecutionContext: ExecutionContext = JSExecutionContext.queue + + private var currentLogFun: (Level, String, Throwable|Null )=> Unit = { (level,message,ex) => + System.err.println(s"${level}:${message}"); + if !(ex eq null) then + ex.nn.printStackTrace() + } + + +object JSGopher extends GopherAPI: + + def apply[F[_]:CpsSchedulingMonad](cfg: GopherConfig):Gopher[F] = + val jsConfig = cfg match + case DefaultGopherConfig => JSGopherConfig("default") + case jcfg:JSGopherConfig => jcfg + new JSGopher[F](jsConfig) + + val timer = new Timer("gopher") + + + +val Gopher = JSGopher + diff --git a/js/src/main/scala/gopher/JSGopherConfig.scala b/js/src/main/scala/gopher/JSGopherConfig.scala new file mode 100644 index 00000000..741810c3 --- /dev/null +++ b/js/src/main/scala/gopher/JSGopherConfig.scala @@ -0,0 +1,3 @@ +package gopher + +case class JSGopherConfig(flawor: String = "default") extends GopherConfig \ No newline at end of file diff --git a/js/src/main/scala/gopher/Platform.scala b/js/src/main/scala/gopher/Platform.scala new file mode 100644 index 00000000..b2d2be95 --- /dev/null +++ b/js/src/main/scala/gopher/Platform.scala @@ -0,0 +1,6 @@ +package gopher + +object Platform: + + def initShared(): Unit = + SharedGopherAPI.setApi(JSGopher) \ No newline at end of file diff --git a/js/src/main/scala/gopher/impl/BaseChannel.scala b/js/src/main/scala/gopher/impl/BaseChannel.scala new file mode 100644 index 00000000..6f96d6ff --- /dev/null +++ b/js/src/main/scala/gopher/impl/BaseChannel.scala @@ -0,0 +1,112 @@ +package gopher.impl + +import cps._ +import gopher._ +import scala.collection.mutable.Queue +import scala.scalajs.concurrent.JSExecutionContext +import scala.util._ +import scala.util.control.NonFatal +import java.util.logging.Level + + +abstract class BaseChannel[F[_],A](override val gopherApi: JSGopher[F]) extends Channel[F,A,A]: + + protected val readers: Queue[Reader[A]] = Queue.empty + protected val writers: Queue[Writer[A]] = Queue.empty + protected val doneReaders: Queue[Reader[Unit]] = Queue.empty + protected var closed: Boolean = false + + override def close(): Unit = + closed = true + processClose() + + override def isClosed: Boolean = + closed + + protected def submitTask(f: ()=>Unit ): Unit = + JSExecutionContext.queue.execute{ () => + try + f() + catch + case NonFatal(ex) => + if (true) then + gopherApi.log(Level.WARNING, "impossible: exception in channel callback", ex) + ex.printStackTrace() + if (false) then + JSExecutionContext.queue.execute( ()=> throw ex ) + } + + def addReader(reader: Reader[A]): Unit = + readers.enqueue(reader) + process() + + def addWriter(writer: Writer[A]): Unit = + if (closed) { + writer.capture() match + case Expirable.Capture.Ready((a,f)) => + writer.markUsed() + submitTask( () => + f(Failure(new ChannelClosedException())) + ) + case _ => + } else { + writers.enqueue(writer) + process() + } + + def addDoneReader(reader: Reader[Unit]): Unit = + if (closed && isEmpty) { + reader.capture() match + case Expirable.Capture.Ready(f) => + reader.markUsed() + submitTask( () => f(Success(()))) + case Expirable.Capture.WaitChangeComplete => + // mb is blocked and will be evaluated in + doneReaders.enqueue(reader) + process() + case Expirable.Capture.Expired => + } else { + doneReaders.enqueue(reader) + process() + } + + protected def processClose(): Unit = + if (isEmpty) then + processCloseDone() + submitTask(processCloseReaders) + submitTask(processCloseWriters) + + protected def exhauseQueue[T <: Expirable[A],A](queue: Queue[T], action: A => Unit): Unit = + while(!queue.isEmpty) { + val v = queue.dequeue() + if (!v.isExpired) then + v.capture() match + case Expirable.Capture.Ready(a) => + v.markUsed() + action(a) + case _ => + // do nothing. + // exists case, when this is possible: wheb we close channel from + // select-group callback, which is evaluated now. + // in this case we will see one as evaluating. + } + + protected def processCloseDone(): Unit = + val success = Success(()) + exhauseQueue(doneReaders, f => f(success)) + + + protected def processCloseReaders(): Unit = + val channelClosed = Failure(ChannelClosedException()) + exhauseQueue(readers, f => f(channelClosed)) + + protected def processCloseWriters(): Unit = + val channelClosed = Failure(ChannelClosedException()) + exhauseQueue(writers, { case (a,f) => f(channelClosed) }) + + protected def isEmpty: Boolean + + protected def process(): Unit + + + diff --git a/js/src/main/scala/gopher/impl/BufferedChannel.scala b/js/src/main/scala/gopher/impl/BufferedChannel.scala new file mode 100644 index 00000000..497c34b8 --- /dev/null +++ b/js/src/main/scala/gopher/impl/BufferedChannel.scala @@ -0,0 +1,93 @@ +package gopher.impl + +import cps._ +import gopher._ +import scala.collection.mutable.Queue +import scalajs.js +import scalajs.concurrent.JSExecutionContext +import scala.util._ +import scala.util.control.NonFatal + +class BufferedChannel[F[_]:CpsAsyncMonad, A](gopherApi: JSGopher[F], bufSize: Int) extends BaseChannel[F,A](gopherApi): + + val ringBuffer: js.Array[A] = new js.Array[A](bufSize) + var start: Int = 0 + var size: Int = 0 + + // [1] [2] [3] + // ˆ ˆ + + def isEmpty = (size == 0) + + def nElements = size + + def isFull = (size == bufSize) + + protected def internalDequeuePeek(): Option[A] = + if isEmpty then None else Some(ringBuffer(start)) + + protected def internalDequeueFinish(): Unit = + require(size > 0) + start = (start + 1) % bufSize + size = size - 1 + + protected def internalEnqueue(a:A): Boolean = + if size < bufSize then + val end = (start + size) % bufSize + ringBuffer(end) = a + size = size + 1 + true + else + false + + + protected def process(): Unit = + var progress = true + while(progress) + progress = false + internalDequeuePeek() match + case Some(a) => + progress |= processReaders(a) + case None => + // nothing. + progress |= processWriters() + if (closed) then + processClose() + + protected def processReaders(a:A): Boolean = + var progress = false + if (!readers.isEmpty && !isEmpty) then + val reader = readers.dequeue() + progress = true + reader.capture().foreach{ f => + internalDequeueFinish() + reader.markUsed() + submitTask( () => f(Success(a)) ) + } + progress + + protected def processWriters(): Boolean = + var progress = false + if (!writers.isEmpty && !isFull) then + val writer = writers.dequeue() + writer.capture() match + case Expirable.Capture.Ready((a,f)) => + internalEnqueue(a) + writer.markUsed() + submitTask( () => f(Success(())) ) + progress = true + case Expirable.Capture.WaitChangeComplete => + // impossible, we have no parallel execution + throw DeadlockDetected() + case Expirable.Capture.Expired => + progress + + + + + + + + + + diff --git a/js/src/main/scala/gopher/impl/JSTime.scala b/js/src/main/scala/gopher/impl/JSTime.scala new file mode 100644 index 00000000..9b288d39 --- /dev/null +++ b/js/src/main/scala/gopher/impl/JSTime.scala @@ -0,0 +1,61 @@ +package gopher.impl + +import gopher._ +import scala.concurrent.duration._ +import scala.collection.immutable.Queue +import scala.util._ + +import java.util.TimerTask + +class JSTime[F[_]](gopherAPI: JSGopher[F]) extends Time[F](gopherAPI): + + def schedule(fun:()=>Unit, delay: FiniteDuration): Time.Scheduled = + + var listeners: Queue[Try[Boolean]=>Unit] = Queue.empty + var canceled = false + + def notifyListeners(value: Try[Boolean]): Unit = + listeners.foreach{ f=> + try + f(value) + catch + case ex: Throwable => + ex.printStackTrace() + } + listeners = Queue.empty + + val task = new TimerTask { + override def run(): Unit = { + // TODO: log exception (?) + if (!canceled) then + try + fun() + catch + case ex: Throwable => + notifyListeners(Failure(ex)) + notifyListeners(Success(!canceled)) + } + } + + JSGopher.timer.schedule(task,delay.toMillis) + + + new Time.Scheduled { + + def cancel(): Boolean = + val r = task.cancel() + if (r) + notifyListeners(Success(false)) + r + + def onDone(f: Try[Boolean] => Unit) = + listeners = listeners.appended(f) + + } + + + + + + + diff --git a/js/src/main/scala/gopher/impl/PromiseChannel.scala b/js/src/main/scala/gopher/impl/PromiseChannel.scala new file mode 100644 index 00000000..83bd539d --- /dev/null +++ b/js/src/main/scala/gopher/impl/PromiseChannel.scala @@ -0,0 +1,59 @@ +package gopher.impl + +import cps._ +import gopher._ +import scala.collection.mutable.Queue +import scala.scalajs.concurrent.JSExecutionContext +import scala.util._ +import scala.util.control.NonFatal + +class PromiseChannel[F[_]:CpsAsyncMonad, A](gopherApi: JSGopher[F]) extends BaseChannel[F,A](gopherApi): + + private var value: Option[A] = None + private var readed = false + + protected def isEmpty: Boolean = value.isEmpty || readed + + //override def addDoneReader(reader: Reader[Unit]): Unit = + + protected def process(): Unit = + var done = false + // we have only one writer. + while (!writers.isEmpty && value.isEmpty) { + val w = writers.dequeue() + w.capture() match + case Expirable.Capture.Ready((a,f)) => + w.markUsed() + submitTask(()=>f(Success(()))) + value = Some(a) + closed = true + // we can't havw more than one unexpired + case Expirable.Capture.WaitChangeComplete => + // impossible in js, + // (mb processNextTick()?) + throw new DeadlockDetected() + case Expirable.Capture.Expired => + } + if (!readers.isEmpty && value.isDefined) { + while(!readers.isEmpty && !readed) { + val r = readers.dequeue() + r.capture() match + case Expirable.Capture.Ready(f) => + r.markUsed() + submitTask(()=>f(Success(value.get))) + readed = true + case Expirable.Capture.WaitChangeComplete => + throw new DeadlockDetected() + case Expirable.Capture.Expired => + } + //if (readed) { + // processCloseDone() + //} + } + if (closed) then + processClose() + + + + + \ No newline at end of file diff --git a/js/src/main/scala/gopher/impl/UnbufferedChannel.scala b/js/src/main/scala/gopher/impl/UnbufferedChannel.scala new file mode 100644 index 00000000..a73dc134 --- /dev/null +++ b/js/src/main/scala/gopher/impl/UnbufferedChannel.scala @@ -0,0 +1,76 @@ +package gopher.impl + +import cps._ +import gopher._ +import scala.collection.mutable.Queue +import scala.scalajs.concurrent.JSExecutionContext +import scala.util._ +import scala.util.control.NonFatal + + +class UnbufferedChannel[F[_]:CpsAsyncMonad, A](gopherApi: JSGopher[F]) extends BaseChannel[F,A](gopherApi): + + + protected def isEmpty: Boolean = + writers.isEmpty + + protected def process(): Unit = + var progress = true + while(progress) { + progress = false + var done = false + while(!done && !readers.isEmpty && !writers.isEmpty) { + findReader() match + case Some(reader) => + findWriter() match + case Some(writer) => + reader.capture() match + case Expirable.Capture.Ready(readFun) => + writer.capture() match + case Expirable.Capture.Ready((a,writeFun)) => + submitTask( () => readFun(Success(a))) + submitTask( () => writeFun(Success(())) ) + progress = true + done = true + writer.markUsed() + reader.markUsed() + case _ => + // impossible, because in js we have-no interleavinf, bug anyway + // let's fallback + reader.markFree() + readers.prepend(reader) + case Expirable.Capture.WaitChangeComplete => + // impossible, but let's fallback + // TODO: prepend reader and skip event + writers.prepend(writer) + case Expirable.Capture.Expired => + // impossible, but let's fallback + writers.prepend(writer) + case None => + done = true + case None => + done = true + } + } + if (closed) { + processClose() + } + + + private def findUnexpired[T <: Expirable[?]](q: Queue[T]): Option[T] = + var retval: Option[T] = None + while(retval.isEmpty && ! q.isEmpty) { + val c = q.dequeue; + if (!c.isExpired) { + retval = Some(c) + } + } + retval + + private def findReader(): Option[Reader[A]] = + findUnexpired(readers) + + private def findWriter(): Option[Writer[A]] = + findUnexpired(writers) + + \ No newline at end of file diff --git a/js/src/test/scala/gopher/impl/RingBufferTest.scala b/js/src/test/scala/gopher/impl/RingBufferTest.scala new file mode 100644 index 00000000..0f85b5a1 --- /dev/null +++ b/js/src/test/scala/gopher/impl/RingBufferTest.scala @@ -0,0 +1,38 @@ +package gopher.impl + +import cps._ +import cps.monads.FutureAsyncMonad +import gopher._ +import scala.concurrent._ +import scalajs.concurrent.JSExecutionContext +import scalajs.concurrent.JSExecutionContext.Implicits.queue + + +import munit._ + +class RingBufferTests extends munit.FunSuite{ + + test("ring buffer ") { + + val gopher = JSGopher[Future](JSGopherConfig()) + val ch = gopher.makeChannel[Int](3) + + var x = 0 + + async[Future] { + ch.write(1) + ch.write(2) + ch.write(3) + // we should be blocked before sending next + JSExecutionContext.queue.execute{ () => + x = 1 + ch.aread() + } + ch.write(4) + assert(x != 0) + } + + } + + +} \ No newline at end of file diff --git a/js/src/test/scala/gopher/util/Debug.scala b/js/src/test/scala/gopher/util/Debug.scala new file mode 100644 index 00000000..9569d0dc --- /dev/null +++ b/js/src/test/scala/gopher/util/Debug.scala @@ -0,0 +1,41 @@ +package gopher.util + +import java.util.logging.{Level => LogLevel} + +object Debug { + + type InMemoryLog = java.util.concurrent.ConcurrentLinkedQueue[(Long, String, Throwable)] + + def inMemoryLogFun(inMemoryLog: InMemoryLog): (LogLevel, String, Throwable|Null) => Unit = + (level,msg, ex) => inMemoryLog.add((Thread.currentThread().getId(), msg,ex)) + + def showInMemoryLog(inMemoryLog: InMemoryLog): Unit = { + while(!inMemoryLog.isEmpty) { + val r = inMemoryLog.poll() + if (r != null) { + println(r) + } + } + } + + + def showTraces(maxTracesToShow: Int): Unit = { + val traces = Thread.getAllStackTraces(); + val it = traces.entrySet().iterator() + while(it.hasNext()) { + val e = it.next(); + println(e.getKey()); + val elements = e.getValue() + var sti = 0 + var wasPark = false + while(sti < elements.length && sti < maxTracesToShow && !wasPark) { + val st = elements(sti) + println(" "*10 + st) + sti = sti + 1; + wasPark = (st.getMethodName == "park") + } + } + } + + +} \ No newline at end of file diff --git a/jvm/src/main/scala/gopher/JVMGopher.scala b/jvm/src/main/scala/gopher/JVMGopher.scala new file mode 100644 index 00000000..178bfb77 --- /dev/null +++ b/jvm/src/main/scala/gopher/JVMGopher.scala @@ -0,0 +1,80 @@ +package gopher + +import cps._ +import gopher.impl._ + +import java.util.concurrent.Executors +import java.util.concurrent.ExecutorService +import java.util.concurrent.ForkJoinPool +import java.util.concurrent.atomic.AtomicReference +import java.util.Timer +import java.util.logging._ +import scala.concurrent.ExecutionContext +import scala.concurrent.duration._ + + + +class JVMGopher[F[_]:CpsSchedulingMonad](cfg: JVMGopherConfig) extends Gopher[F]: + + + def makeChannel[A](bufSize:Int = 0, autoClose: Boolean = false) = + if autoClose then + PromiseChannel[F,A](this, cfg.taskExecutor) + else + if (bufSize == 0) + GuardedSPSCUnbufferedChannel[F,A](this, cfg.controlExecutor,cfg.taskExecutor) + else + GuardedSPSCBufferedChannel[F,A](this, bufSize, cfg.controlExecutor,cfg.taskExecutor) + + + + val time = new JVMTime(this) + + def setLogFun(logFun:(Level, String, Throwable|Null) => Unit): ((Level, String, Throwable|Null) => Unit) = + currentLogFun.getAndSet(logFun) + + def log(level: Level, message: String, ex: Throwable| Null): Unit = + currentLogFun.get().apply(level,message,ex) + + lazy val taskExecutionContext = ExecutionContext.fromExecutor(cfg.taskExecutor) + + def scheduledExecutor = JVMGopher.scheduledExecutor + + + private val currentLogFun: AtomicReference[(Level,String,Throwable|Null)=>Unit]=new AtomicReference(JVMGopher.defaultLogFun) + + + + +object JVMGopher extends GopherAPI: + + def apply[F[_]:CpsSchedulingMonad](cfg: GopherConfig):Gopher[F] = + val jvmConfig = cfg match + case DefaultGopherConfig => defaultConfig + case jcfg:JVMGopherConfig => jcfg + new JVMGopher[F](jvmConfig) + + lazy val scheduledExecutor = Executors.newScheduledThreadPool(1) + + lazy val defaultConfig=JVMGopherConfig( + controlExecutor=Executors.newFixedThreadPool(2), + taskExecutor=ForkJoinPool.commonPool(), + ) + + // need for binary compability + @deprecated("use summon[Gopher].time instead") + lazy val timer = new Timer("gopher") + + val logger = Logger.getLogger("JVMGopher") + + def defaultLogFun(level: Level, message:String, ex: Throwable|Null): Unit = + if (ex eq null) { + logger.log(level, message) + } else { + logger.log(level, message, ex) + } + + + final val MAX_SPINS = 400 + +val Gopher = JVMGopher diff --git a/jvm/src/main/scala/gopher/JVMGopherConfig.scala b/jvm/src/main/scala/gopher/JVMGopherConfig.scala new file mode 100644 index 00000000..0e01323e --- /dev/null +++ b/jvm/src/main/scala/gopher/JVMGopherConfig.scala @@ -0,0 +1,10 @@ +package gopher + +import java.util.concurrent.ExecutorService +import java.util.concurrent.ScheduledExecutorService + + +case class JVMGopherConfig( + controlExecutor: ExecutorService, + taskExecutor: ExecutorService +) extends GopherConfig diff --git a/jvm/src/main/scala/gopher/JVMTime.scala b/jvm/src/main/scala/gopher/JVMTime.scala new file mode 100644 index 00000000..9b6d48fa --- /dev/null +++ b/jvm/src/main/scala/gopher/JVMTime.scala @@ -0,0 +1,64 @@ +package gopher + +import scala.concurrent.duration._ +import scala.util._ +import java.util.concurrent.TimeUnit +import java.util.concurrent.ScheduledFuture +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.atomic.AtomicBoolean + +class JVMTime[F[_]](gopherAPI: JVMGopher[F]) extends Time[F](gopherAPI) { + + def schedule(fun: () => Unit, delay: FiniteDuration): Time.Scheduled = + new JVMScheduled(fun,delay) + + class JVMScheduled(fun: ()=>Unit, delay: FiniteDuration) extends Time.Scheduled { + + val listeners = new ConcurrentLinkedQueue[Try[Boolean]=>Unit] + val cancelled = new AtomicBoolean(false) + + var wrapper = new Runnable() { + override def run(): Unit = + val doRun = !cancelled.get() + try { + if (doRun) { + fun() + } + } catch { + case ex: Throwable => + // TODO: set log. + notifyListeners(Failure(ex)) + } + notifyListeners(Success(doRun)) + } + + val jf = gopherAPI.scheduledExecutor.schedule(wrapper, delay.toMillis, TimeUnit.MILLISECONDS) + + def notifyListeners(value: Try[Boolean]): Unit = + while(! listeners.isEmpty) + val l = listeners.poll() + if (! (l eq null)) then + try + l.apply(value) + catch + case ex: Throwable => + // TODO: configure logging + ex.printStackTrace() + + + def cancel(): Boolean = + cancelled.set(true) + val r = jf.cancel(false) + if (r) then + notifyListeners(Success(false)) + r + + def onDone(listener: Try[Boolean] => Unit): Unit = + listeners.offer(listener) + + + + } + +} + diff --git a/jvm/src/main/scala/gopher/Platform.scala b/jvm/src/main/scala/gopher/Platform.scala new file mode 100644 index 00000000..82db07cd --- /dev/null +++ b/jvm/src/main/scala/gopher/Platform.scala @@ -0,0 +1,7 @@ +package gopher + + +object Platform: + + def initShared(): Unit = + SharedGopherAPI.setApi(JVMGopher) \ No newline at end of file diff --git a/jvm/src/main/scala/gopher/impl/GuardedSPSCBaseChannel.scala b/jvm/src/main/scala/gopher/impl/GuardedSPSCBaseChannel.scala new file mode 100644 index 00000000..dc6ed404 --- /dev/null +++ b/jvm/src/main/scala/gopher/impl/GuardedSPSCBaseChannel.scala @@ -0,0 +1,236 @@ +package gopher.impl + +import cps._ +import gopher._ +import java.util.concurrent.Executor +import java.util.concurrent.ExecutorService +import java.util.concurrent.ConcurrentLinkedDeque +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicReference +import scala.util.Try +import scala.util.Success +import scala.util.Failure + +import java.util.logging.{Level => LogLevel} + + +/** + * Guarded channel work in the next way: + * reader and writer asynchronically added to readers and writers and force evaluation of internal step function + * or ensure that currently running step function will see the chanes in readers/writers. + * Step functions is executed in some thread loop, and in the same time, only one instance of step function is running. + * (which is ensured by guard) + **/ +abstract class GuardedSPSCBaseChannel[F[_]:CpsAsyncMonad,A](override val gopherApi: JVMGopher[F], controlExecutor: ExecutorService, taskExecutor: ExecutorService) extends Channel[F,A,A]: + + import GuardedSPSCBaseChannel._ + + protected val readers = new ConcurrentLinkedDeque[Reader[A]]() + protected val writers = new ConcurrentLinkedDeque[Writer[A]]() + protected val doneReaders = new ConcurrentLinkedDeque[Reader[Unit]]() + + protected val publishedClosed = new AtomicBoolean(false) + + protected val stepGuard = new AtomicInteger(STEP_FREE) + + protected val stepRunnable: Runnable = (()=>entryStep()) + + def addReader(reader: Reader[A]): Unit = + if (reader.canExpire) then + readers.removeIf( _.isExpired ) + // if (publishedClosed.get()) then + // tryClosedRead() + // else + readers.add(reader) + controlExecutor.submit(stepRunnable) + + def addWriter(writer: Writer[A]): Unit = + if (writer.canExpire) then + writers.removeIf( _.isExpired ) + if (publishedClosed.get()) then + closeWriter(writer) + else + writers.add(writer) + controlExecutor.submit(stepRunnable) + + def addDoneReader(reader: Reader[Unit]): Unit = + if (reader.canExpire) + doneReaders.removeIf( _.isExpired ) + if (publishedClosed.get()) then + closeDoneReader(reader) + else + doneReaders.add(reader) + controlExecutor.submit(stepRunnable) + + def close(): Unit = + publishedClosed.set(true) + controlExecutor.submit(stepRunnable) + + def isClosed: Boolean = + publishedClosed.get() + + protected def step(): Unit + + + protected def entryStep(): Unit = + var done = false + var nSpins = 0 + while(!done) { + if (stepGuard.compareAndSet(STEP_FREE,STEP_BUSY)) { + done = true + step() + } else if (stepGuard.compareAndSet(STEP_BUSY, STEP_UPDATED)) { + done = true + } else if (stepGuard.get() == STEP_UPDATED) { + // merge with othwer changes + done = true + } else { + // other set updates, we should spinLock + nSpins = nSpins + 1 + Thread.onSpinWait() + } + } + + /** + * if truw - we can leave step, otherwise better run yet one step. + */ + protected def checkLeaveStep(): Boolean = + if (stepGuard.compareAndSet(STEP_BUSY,STEP_FREE)) then + true + else if (stepGuard.compareAndSet(STEP_UPDATED, STEP_BUSY)) then + false + else + // impossible, let'a r + false + + // precondition: writers are empty + protected def processReadClose(): Boolean = + require(writers.isEmpty) + var progress = false + while(!readers.isEmpty) { + val r = readers.poll() + if (!(r eq null) && !r.isExpired) then + r.capture() match + case Expirable.Capture.Ready(f) => + progress = true + //println("sending signal in processReadClose"); + //val prevEx = new RuntimeException("prev") + taskExecutor.execute(() => { + //try + //println(s"calling $f, channel = ${GuardedSPSCBaseChannel.this}") + // prevEx.printStackTrace() + val debugInfo = s"channel=${this}, writersEmpty=${writers.isEmpty}, readersEmpty=${readers.isEmpty}, r=$r, f=$f" + f(Failure(new ChannelClosedException(debugInfo))) + //catch + // case ex: Exception => + // println(s"exception in close-reader, channel=${GuardedSPSCBaseChannel.this}, f=$f, r=$r") + // throw ex + }) + r.markUsed() + case Expirable.Capture.WaitChangeComplete => + progressWaitReader(r) + case Expirable.Capture.Expired => + progress = true + } + progress + + // TODO: remove. If we have writers in queue, + protected def processWriteClose(): Boolean = + var progress = false + while(!writers.isEmpty) { + val w = writers.poll() + if !(w eq null) && !w.isExpired then + w.capture() match + case Expirable.Capture.Ready((a,f)) => + progress = true + taskExecutor.execute(() => f(Failure(new ChannelClosedException)) ) + w.markUsed() + case Expirable.Capture.WaitChangeComplete => + progressWaitWriter(w) + case Expirable.Capture.Expired => + progress = true + } + progress + + + protected def processDoneClose(): Boolean = { + var progress = false + while(!doneReaders.isEmpty) { + val r = doneReaders.poll() + if !(r eq null) && !r.isExpired then + r.capture() match + case Expirable.Capture.Ready(f) => + progress = true + taskExecutor.execute(() => f(Success(()))) + r.markUsed() + case Expirable.Capture.WaitChangeComplete => + progressWaitDoneReader(r) + case Expirable.Capture.Expired => + progress = true + } + progress + } + + protected def closeDoneReader(r: Reader[Unit]): Unit = { + while + r.capture() match + case Expirable.Capture.Ready(f) => + taskExecutor.execute(()=>f(Success(()))) + r.markUsed() + false + case Expirable.Capture.WaitChangeComplete => + progressWaitDoneReader(r) + true + case Expirable.Capture.Expired => + false + do () + } + + protected def closeWriter(w: Writer[A]): Unit = { + var done = false + while (!done && !w.isExpired) + w.capture() match + case Expirable.Capture.Ready((a,f)) => + taskExecutor.execute(() => f(Failure(new ChannelClosedException)) ) + w.markUsed() + done = true + case Expirable.Capture.WaitChangeComplete => + Thread.onSpinWait() + case Expirable.Capture.Expired => + done = true + } + + + + // precondition: r.capture() == None + protected def progressWaitReader(r: Reader[A]): Unit = + progressWait(r,readers) + + // precondition: w.capture() == None + protected def progressWaitWriter(w: Writer[A]): Unit = + progressWait(w,writers) + + protected def progressWaitDoneReader(r: Reader[Unit]): Unit = + progressWait(r,doneReaders) + + protected def progressWait[T <: Expirable[_]](v:T, queue: ConcurrentLinkedDeque[T]): Unit = + if (!v.isExpired) + if (queue.isEmpty) + Thread.onSpinWait() + // if (nSpins > JVMGopher.MAX_SPINS) + // Thread.`yield`() + queue.addLast(v) + + + +object GuardedSPSCBaseChannel: + + final val STEP_FREE = 0 + + final val STEP_BUSY = 1 + + final val STEP_UPDATED = 2 + + + diff --git a/jvm/src/main/scala/gopher/impl/GuardedSPSCBufferedChannel.scala b/jvm/src/main/scala/gopher/impl/GuardedSPSCBufferedChannel.scala new file mode 100644 index 00000000..bc4d94c7 --- /dev/null +++ b/jvm/src/main/scala/gopher/impl/GuardedSPSCBufferedChannel.scala @@ -0,0 +1,173 @@ +package gopher.impl + +import cps.* +import gopher.* +import java.util.concurrent.ExecutorService +import java.util.concurrent.atomic.AtomicReferenceArray +import java.util.concurrent.atomic.AtomicInteger +import scala.util.Try +import scala.util.Success +import scala.util.Failure + +import java.util.logging.{Level => LogLevel} + +class GuardedSPSCBufferedChannel[F[_]:CpsAsyncMonad,A](gopherApi: JVMGopher[F], bufSize: Int, +controlExecutor: ExecutorService, +taskExecutor: ExecutorService) extends GuardedSPSCBaseChannel[F,A](gopherApi,controlExecutor, taskExecutor): + + import GuardedSPSCBaseChannel._ + + class RingBuffer extends SPSCBuffer[A] { + + val refs: AtomicReferenceArray[AnyRef | Null] = new AtomicReferenceArray(bufSize); + val publishedStart: AtomicInteger = new AtomicInteger(0) + val publishedSize: AtomicInteger = new AtomicInteger(0) + + var start: Int = 0 + var size: Int = 0 + + override def local(): Unit = { + start = publishedStart.get() + size = publishedSize.get() + } + + override def publish(): Unit = { + publishedStart.set(start) + publishedSize.set(size) + } + + override def isEmpty(): Boolean = (size == 0) + override def isFull(): Boolean = (size == bufSize) + + override def startRead(): A = { + val aRef = refs.get(start) + //TODO: enable debug mode + //if (aRef eq null) { + // throw new IllegalStateException("read null item") + //} + aRef.nn.asInstanceOf[A] + } + + override def finishRead(): Boolean = { + if (size > 0) then + start = (start + 1) % bufSize + size = size - 1 + true + else + false + } + + override def write(a:A): Boolean = { + if (size < bufSize) then + val end = (start + size) % bufSize + val aRef: AnyRef | Null = a.asInstanceOf[AnyRef] // boxing + refs.lazySet(end,aRef) + size += 1 + true + else + false + } + + } + + //Mutable buffer state + protected val state: SPSCBuffer[A] = new RingBuffer() + + + protected def step(): Unit = + state.local() + var isClosed = publishedClosed.get() + var progress = true + while(progress) { + progress = false + if !state.isEmpty() then + progress |= processReadsStep() + else + if isClosed then + progress |= processDoneClose() + if (writers.isEmpty) then + progress |= processReadClose() + if (!state.isFull()) then + progress |= processWriteStep() + //if (isClosed) + // progress |= processWriteClose() + if (!progress) { + state.publish() + if (! checkLeaveStep()) { + progress = true + isClosed = publishedClosed.get() + } + } + } + + + + private def processReadsStep(): Boolean = + // precondition: !isEmpty + val a = state.startRead() + var done = false + var progress = false + var nonExpiredBusyReads = scala.collection.immutable.Queue.empty[Reader[A]] + while(!done && !readers.isEmpty) { + val reader = readers.poll() + if !(reader eq null) && !reader.isExpired then + reader.capture() match + case Expirable.Capture.Ready(f) => + // try/cath arround f is a reader reponsability + taskExecutor.execute(() => f(Success(a))) + reader.markUsed() + state.finishRead() + progress = true + done = true + case Expirable.Capture.WaitChangeComplete => + nonExpiredBusyReads = nonExpiredBusyReads.enqueue(reader) + case Expirable.Capture.Expired => + progress = true + } + while(nonExpiredBusyReads.nonEmpty) { + // not in this thread, but progress. + progress = true + val (r, c) = nonExpiredBusyReads.dequeue + progressWaitReader(r) + nonExpiredBusyReads = c + } + progress + + // precondition: ! isFUll + private def processWriteStep(): Boolean = + var progress = false + var done = false + var nonExpiredBusyWriters = scala.collection.immutable.Queue.empty[Writer[A]] + while(!done && !writers.isEmpty) { + val writer = writers.poll() + if !(writer eq null ) && ! writer.isExpired then + writer.capture() match + case Expirable.Capture.Ready((a,f)) => + done = true + if (state.write(a)) then + taskExecutor.execute( + () => f(Success(())) + ) + progress = true + writer.markUsed() + else + // impossible, because state + //TODO: log + //log("impossibe,unsuccesfull write after !isFull") + writer.markFree() + writers.addFirst(writer) + case Expirable.Capture.WaitChangeComplete => + nonExpiredBusyWriters = nonExpiredBusyWriters.enqueue(writer) + case Expirable.Capture.Expired => + progress = true + } + while(nonExpiredBusyWriters.nonEmpty) { + progress = true + val (w, c) = nonExpiredBusyWriters.dequeue + nonExpiredBusyWriters = c + progressWaitWriter(w) + } + progress + + +end GuardedSPSCBufferedChannel diff --git a/jvm/src/main/scala/gopher/impl/GuardedSPSCUnbufferedChannel.scala b/jvm/src/main/scala/gopher/impl/GuardedSPSCUnbufferedChannel.scala new file mode 100644 index 00000000..8d0f4eeb --- /dev/null +++ b/jvm/src/main/scala/gopher/impl/GuardedSPSCUnbufferedChannel.scala @@ -0,0 +1,78 @@ +package gopher.impl + +import cps._ +import gopher._ + +import java.util.concurrent.ExecutorService +import java.util.concurrent.atomic.AtomicReferenceArray +import java.util.concurrent.atomic.AtomicInteger +import scala.util.Try +import scala.util.Success +import scala.util.Failure + +import GuardedSPSCBaseChannel._ + + +class GuardedSPSCUnbufferedChannel[F[_]:CpsAsyncMonad,A]( + gopherApi: JVMGopher[F], + controlExecutor: ExecutorService, + taskExecutor: ExecutorService) extends GuardedSPSCBaseChannel[F,A](gopherApi,controlExecutor, taskExecutor): + + protected override def step(): Unit = { + var progress = true + var isClosed = publishedClosed.get() + while (progress) { + var readerLoopDone = false + progress = false + while(!readerLoopDone && !readers.isEmpty && !writers.isEmpty) { + val reader = readers.poll() + if (!(reader eq null) && !reader.isExpired) then + var writersLoopDone = false + while(! writersLoopDone && !readerLoopDone && !writers.isEmpty) { + var writer = writers.poll() + if (!(writer eq null) && !writer.isExpired) then + // now we have reader and writer + reader.capture() match + case Expirable.Capture.Ready(readFun) => + progress = true + writer.capture() match + case Expirable.Capture.Ready((a,writeFun)) => + // great, now we have all + taskExecutor.execute(()=>readFun(Success(a))) + taskExecutor.execute(()=>writeFun(Success(()))) + reader.markUsed() + writer.markUsed() + writersLoopDone = true + case Expirable.Capture.WaitChangeComplete => + reader.markFree() + progressWaitWriter(writer) + case Expirable.Capture.Expired => + reader.markFree() + case Expirable.Capture.WaitChangeComplete => + writers.addFirst(writer) + writersLoopDone = true + progress = true // TODO: ??? + progressWaitReader(reader) + case Expirable.Capture.Expired => + writers.addFirst(writer) + writersLoopDone = true + progress = true + } + } + if (isClosed && (readers.isEmpty || writers.isEmpty) ) then + // progress |= processWriteClose() + while(! doneReaders.isEmpty) { + progress |= processDoneClose() + } + if (writers.isEmpty) + progress |= processReadClose() + if (!progress) then + if !checkLeaveStep() then + progress = true + isClosed = publishedClosed.get() + } + } + + + + diff --git a/jvm/src/main/scala/gopher/impl/PromiseChannel.scala b/jvm/src/main/scala/gopher/impl/PromiseChannel.scala new file mode 100644 index 00000000..f88a6b00 --- /dev/null +++ b/jvm/src/main/scala/gopher/impl/PromiseChannel.scala @@ -0,0 +1,145 @@ +package gopher.impl + +import cps._ +import gopher._ +import java.util.concurrent.ConcurrentLinkedDeque +import java.util.concurrent.ExecutorService +import java.util.concurrent.Executor +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicBoolean +import scala.util.Try +import scala.util.Success +import scala.util.Failure + + +/** + * Channel is closed immediatly after successfull write. + **/ + class PromiseChannel[F[_],A](override val gopherApi: JVMGopher[F], taskExecutor: Executor) extends Channel[F,A,A]: + + protected val readers = new ConcurrentLinkedDeque[Reader[A]]() + protected val doneReaders = new ConcurrentLinkedDeque[Reader[Unit]]() + protected val ref: AtomicReference[AnyRef | Null] = new AtomicReference(null) + protected val closed: AtomicBoolean = new AtomicBoolean(false) + protected val readed: AtomicBoolean = new AtomicBoolean(false) + + def addReader(reader: Reader[A]): Unit = + readers.add(reader) + step() + + def addWriter(writer: Writer[A]): Unit = + var done = false + while(!done && !writer.isExpired) + writer.capture() match + case Expirable.Capture.Ready((a,f)) => + val ar: AnyRef = a.asInstanceOf[AnyRef] // + if (ref.compareAndSet(null,ar) && !closed.get() ) then + closed.set(true) + taskExecutor.execute{ ()=> + f(Success(())) + } + writer.markUsed() + step() + else + taskExecutor.execute(() => f(Failure(new ChannelClosedException()))) + writer.markUsed() + done = true + case Expirable.Capture.WaitChangeComplete => + Thread.onSpinWait() + case Expirable.Capture.Expired => + + + def addDoneReader(reader: Reader[Unit]): Unit = + if (!closed.get() || !readed.get) then + doneReaders.add(reader) + if (closed.get()) then + step() + else + var done = false + while(!done & !reader.isExpired) { + reader.capture() match + case Expirable.Capture.Ready(f) => + reader.markUsed() + taskExecutor.execute(()=>f(Success(()))) + done = true + case Expirable.Capture.WaitChangeComplete => + Thread.onSpinWait() + case Expirable.Capture.Expired => + } + + + + + def close(): Unit = + closed.set(true) + if (ref.get() eq null) + closeAll() + + def isClosed: Boolean = + closed.get() + + def step(): Unit = + val ar = ref.get() + if !(ar eq null) then + var done = false + while(!done && !readers.isEmpty) { + val r = readers.poll() + if ! (r eq null) then + while (!done && !r.isExpired) { + r.capture() match + case Expirable.Capture.Ready(f) => + done = true + if (readed.compareAndSet(false,true)) then + r.markUsed() + val a = ar.nn.asInstanceOf[A] + taskExecutor.execute(() => f(Success(a))) + else + // before throw channel-close exception, let's check + if (doneReaders.isEmpty) then + r.markUsed() + taskExecutor.execute(() => f(Failure(new ChannelClosedException()))) + else + r.markFree() + readers.addLast(r) // called later after done + case Expirable.Capture.WaitChangeComplete => + if (readers.isEmpty) then + Thread.onSpinWait() + readers.addLast(r) + case Expirable.Capture.Expired => + } + } + else if (closed.get()) then + closeAll() + + def closeAll(): Unit = + while(!doneReaders.isEmpty) { + val r = doneReaders.poll() + if !(r eq null) then + r.capture() match + case Expirable.Capture.Ready(f) => + r.markUsed() + taskExecutor.execute(()=>f(Success(()))) + case Expirable.Capture.WaitChangeComplete => + if (doneReaders.isEmpty) then + Thread.onSpinWait() + doneReaders.addLast(r) + case Expirable.Capture.Expired => + } + while(!readers.isEmpty) { + val r = readers.poll() + if !(r eq null) then + r.capture() match + case Expirable.Capture.Ready(f) => + r.markUsed() + taskExecutor.execute(() => f(Failure(new ChannelClosedException))) + case Expirable.Capture.WaitChangeComplete => + if (readers.isEmpty) then + Thread.onSpinWait() + readers.addLast(r) + case Expirable.Capture.Expired => + } + + + + diff --git a/jvm/src/main/scala/gopher/impl/SPSCBuffer.scala b/jvm/src/main/scala/gopher/impl/SPSCBuffer.scala new file mode 100644 index 00000000..1112fa1d --- /dev/null +++ b/jvm/src/main/scala/gopher/impl/SPSCBuffer.scala @@ -0,0 +1,27 @@ +package gopher.impl + +import gopher._ + +/** +* Buffer. access to buffer is exclusive by owner channel, +* different loops can start in different threads but only one loop can be active at the samw time +**/ +trait SPSCBuffer[A] { + + def isEmpty(): Boolean + + def startRead(): A + def finishRead(): Boolean + + def isFull(): Boolean + // prcondition: !isFull() + def write(a: A): Boolean + + + // set local state from published + def local(): Unit + + // make buffer be readable from other thread than + def publish(): Unit + +} \ No newline at end of file diff --git a/jvm/src/test/scala/gopher/ApiAccessTests.scala b/jvm/src/test/scala/gopher/ApiAccessTests.scala new file mode 100644 index 00000000..2dec2c9a --- /dev/null +++ b/jvm/src/test/scala/gopher/ApiAccessTests.scala @@ -0,0 +1,37 @@ +package gopher + + +import scala.concurrent._ +import scala.concurrent.duration._ +import cps._ +import cps.monads.FutureAsyncMonad +import scala.language.postfixOps + +import munit._ + +class ApiAccessTests extends FunSuite { + + + import scala.concurrent.ExecutionContext.Implicits.global + + test("simple unbuffered channel") { + given Gopher[Future] = JVMGopher[Future]() + val ch = makeChannel[Int](0) + val fw1 = ch.awrite(1) + //println("after awrite") + val fr1 = ch.aread() + //println("after aread, waiting result") + val r1 = Await.result(fr1, 1 second) + assert( r1 == 1 ) + } + + test("simple 1-buffered channel") { + given Gopher[Future] = JVMGopher[Future]() + val ch = makeChannel[Int](1) + val fw1 = ch.awrite(1) + val fr1 = ch.aread() + val r1 = Await.result(fr1, 1 second) + assert( r1 == 1 ) + } + +} \ No newline at end of file diff --git a/jvm/src/test/scala/gopher/channels/DuppedChannelsMultipleSuite.scala b/jvm/src/test/scala/gopher/channels/DuppedChannelsMultipleSuite.scala new file mode 100644 index 00000000..b15f20ff --- /dev/null +++ b/jvm/src/test/scala/gopher/channels/DuppedChannelsMultipleSuite.scala @@ -0,0 +1,67 @@ +package gopher.channels + +import cps._ +import cps.monads.FutureAsyncMonad +import gopher._ +import munit._ + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.language.postfixOps +import scala.util._ + +import gopher.util.Debug + +import java.util.logging.{Level => LogLevel} + +class DuppedChannelsMultipleSuite extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given gopherApi: Gopher[Future] = SharedGopherAPI.apply[Future]() + + val inMemoryLog = new Debug.InMemoryLog() + + + test("on closing of main stream dupped outputs also closed N times.") { + val N = 1000 + var logIndex = 0 + gopherApi.setLogFun(Debug.inMemoryLogFun(inMemoryLog)) + for(i <- 1 to N) { + inMemoryLog.clear() + logIndex = i + val ch = makeChannel[Int](1) + gopherApi.log(LogLevel.FINE, s"created origin ch=${ch}") + val (in1, in2) = ch.dup() + val f1 = async{ + gopherApi.log(LogLevel.FINE, s"before ch.write(1), ch=${ch}") + ch.write(1) + gopherApi.log(LogLevel.FINE, s"before ch.close, ch=${ch}") + ch.close() + } + val f = for{ fx <- f1 + x <- in1.aread() + r <- in1.aread().transformWith { + case Success(u) => + Future failed new IllegalStateException("Mist be closed") + case Failure(u) => + Future successful (assert(x == 1)) + } + } yield { + r + } + try { + val r = Await.result(f, 30 seconds); + }catch{ + case ex: Throwable => //: TimeoutException => + Debug.showTraces(20) + println("---") + Debug.showInMemoryLog(inMemoryLog) + throw ex + } + } + + } + +} + + diff --git a/jvm/src/test/scala/gopher/stream/JVMBasicGeneratorSuite.scala b/jvm/src/test/scala/gopher/stream/JVMBasicGeneratorSuite.scala new file mode 100644 index 00000000..0e6ed3ef --- /dev/null +++ b/jvm/src/test/scala/gopher/stream/JVMBasicGeneratorSuite.scala @@ -0,0 +1,76 @@ +package gopher.stream + +import scala.concurrent.* +import scala.concurrent.duration.* +import scala.concurrent.ExecutionContext.Implicits.global + +import cps.* +import cps.monads.given + +import gopher.* +import gopher.util.Debug +import java.util.logging.{Level => LogLevel} + + +import munit.* + + +class JVMBasicGeneratorSuite extends FunSuite { + + val N = 10000 + + given Gopher[Future] = SharedGopherAPI[Future]() + + val inMemoryLog = new Debug.InMemoryLog() + + + summon[Gopher[Future]].setLogFun( Debug.inMemoryLogFun(inMemoryLog) ) + + + test("M small loop in gopher ReadChannel") { + + val M = 1000 + val N = 100 + + val folds: Seq[Future[Int]] = for(k <- 1 to M) yield { + val channel = asyncStream[ReadChannel[Future,Int]] { out => + var last = 0 + for(i <- 1 to N) { + out.emit(i) + last = i + //println("emitted: "+i) + //summon[Gopher[Future]].log(LogLevel.FINE, s"emitted $i in $k") + } + summon[Gopher[Future]].log(LogLevel.FINE, s"last $last in $k") + } + async[Future]{ + channel.fold(0)(_ + _) + } + } + + val expected = (1 to N).sum + + + val f = folds.foldLeft(Future.successful(())){ (s,e) => + s.flatMap{ r => + e.map{ x => + assert(x == expected) + } } + } + + try { + val r = Await.result(f, 30.seconds); + }catch{ + case ex: Throwable => //: TimeoutException => + Debug.showTraces(20) + println("---") + Debug.showInMemoryLog(inMemoryLog) + throw ex + } + + + } + + + +} \ No newline at end of file diff --git a/jvm/src/test/scala/gopher/util/Debug.scala b/jvm/src/test/scala/gopher/util/Debug.scala new file mode 100644 index 00000000..0950f80a --- /dev/null +++ b/jvm/src/test/scala/gopher/util/Debug.scala @@ -0,0 +1,47 @@ +package gopher.util + + +import java.util.logging.{Level => LogLevel} + + +object Debug { + + export java.util.logging.Level as LogLevel + + type InMemoryLog = java.util.concurrent.ConcurrentLinkedQueue[(Long, String, Throwable)] + + def inMemoryLogFun(inMemoryLog: InMemoryLog): (LogLevel, String, Throwable|Null) => Unit = + (level,msg, ex) => inMemoryLog.add((Thread.currentThread().getId(), msg,ex)) + + def showInMemoryLog(inMemoryLog: InMemoryLog): Unit = { + while(!inMemoryLog.isEmpty) { + val r = inMemoryLog.poll() + if (r != null) { + println(r) + } + } + } + + + def showTraces(maxTracesToShow: Int): Unit = { + val traces = Thread.getAllStackTraces(); + val it = traces.entrySet().iterator() + while(it.hasNext()) { + val e = it.next(); + println(e.getKey()); + val elements = e.getValue() + var sti = 0 + var wasPark = false + while(sti < elements.length && sti < maxTracesToShow && !wasPark) { + val st = elements(sti) + println(" "*10 + st) + sti = sti + 1; + wasPark = (st.getMethodName == "park") + } + } + } + + + + +} \ No newline at end of file diff --git a/notes/0.99.8.markdown b/notes/0.99.8.markdown deleted file mode 100644 index b50a987c..00000000 --- a/notes/0.99.8.markdown +++ /dev/null @@ -1,13 +0,0 @@ -- added support for select.timeout construct -- added support for lifting-up await in hight-order functions. -ie in simplicified explanation: -``` - for(x <- 1 to n) { s += read(x) } -``` - is translated to -``` - 1.to(n).foreachAsync { async(s += await(aread(x))) } -``` -details can be found in techreport: https://arxiv.org/abs/1611.00602 -- added support for select.fold construct -- scala 2.12 diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 00000000..081fdbbc --- /dev/null +++ b/project/build.properties @@ -0,0 +1 @@ +sbt.version=1.10.0 diff --git a/project/plugins.sbt b/project/plugins.sbt index 2e132a3e..3c903831 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,3 +1,8 @@ -addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.4.0") - -addSbtPlugin("com.typesafe.sbt" % "sbt-pgp" % "0.8.1") +addSbtPlugin("com.jsuereth" % "sbt-pgp" % "2.0.2") +addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1") +addSbtPlugin("com.typesafe.sbt" % "sbt-ghpages" % "0.6.3") +addSbtPlugin("org.portable-scala" % "sbt-scala-native-crossproject" % "1.3.2") +addSbtPlugin("org.portable-scala" % "sbt-scalajs-crossproject" % "1.3.2") +addSbtPlugin("org.scala-js" % "sbt-scalajs" % "1.19.0") +addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.5.7") +addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.0.1") diff --git a/publish.sbt b/publish.sbt new file mode 100644 index 00000000..cc5dbc4e --- /dev/null +++ b/publish.sbt @@ -0,0 +1,39 @@ +credentials += Credentials(Path.userHome / ".sbt" / "sonatype_credentials") + +ThisBuild / organization := "com.github.rssh" +ThisBuild / organizationName := "rssh" +ThisBuild / organizationHomepage := Some(url("https://github.com/rssh")) + +ThisBuild / scmInfo := Some( + ScmInfo( + url("https://github.com/rssh/scala-gopher"), + "scm:git@github.com:rssh/scala-gopher.git" + ) +) + + +ThisBuild / developers := List( + Developer( + id = "rssh", + name = "Ruslan Shevchenko", + email = "ruslan@shevchenko.kiev.ua", + url = url("https://github.com/rssh") + ) +) + + +ThisBuild / description := "scala-gopher: asynchronous implementation of CSP ( go-like channels/selectors ) in scala " +ThisBuild / licenses := List("Apache 2" -> new URL("http://www.apache.org/licenses/LICENSE-2.0.txt")) +ThisBuild / homepage := Some(url("https://github.com/rssh/scala-gopher")) + +ThisBuild / pomIncludeRepository := { _ => false } +ThisBuild / publishTo := { + val nexus = "https://oss.sonatype.org/" + if (isSnapshot.value) Some("snapshots" at nexus + "content/repositories/snapshots") + else Some("releases" at nexus + "service/local/staging/deploy/maven2") +} +ThisBuild / publishMavenStyle := true + + + + diff --git a/shared/src/main/scala/gopher/Channel.scala b/shared/src/main/scala/gopher/Channel.scala new file mode 100644 index 00000000..94b20e8e --- /dev/null +++ b/shared/src/main/scala/gopher/Channel.scala @@ -0,0 +1,60 @@ +package gopher + +import cps._ +import java.io.Closeable +import scala.concurrent.duration.FiniteDuration + +import gopher.impl._ + +/** + * Channel with ability to read and to write. + * @see [[gopher.ReadChannel]] + * @see [[gopher.WriteChannel]] + **/ +trait Channel[F[_],W,R] extends WriteChannel[F,W] with ReadChannel[F,R] with Closeable: + + override def gopherApi: Gopher[F] + + def withExpiration(ttl: FiniteDuration, throwTimeouts: Boolean): ChannelWithExpiration[F,W,R] = + new ChannelWithExpiration(this, ttl, throwTimeouts) + + override def map[R1](f: R=>R1): Channel[F,W,R1] = + MappedChannel(this,f) + + override def mapAsync[R1](f: R=>F[R1]): Channel[F,W,R1] = + MappedAsyncChannel(this, f) + + def flatMap[R1](f: R=> ReadChannel[F,R1]): Channel[F,W,R1] = + ChFlatMappedChannel(this,f) + + //def flatMapAsync[R1](f: R=> F[ReadChannel[F,R1]]): Channel[F,W,R1] = + // ChFlatMappedAsyncChannel(this,f) + + override def filter(p: R=>Boolean): Channel[F,W,R] = + FilteredChannel(this, p) + + override def filterAsync(p: R=>F[Boolean]): Channel[F,W,R] = + FilteredAsyncChannel(this,p) + + def isClosed: Boolean + + +end Channel + +object Channel: + + def apply[F[_],A]()(using Gopher[F]): Channel[F,A,A] = + summon[Gopher[F]].makeChannel[A]() + + case class Read[F[_],A](a:A, ch:ReadChannel[F,A]|F[A]) { + type Element = A + } + case class FRead[F[_],A](a:A, ch: F[A]) + case class Write[F[_],A](a: A, ch: WriteChannel[F,A]) + + import cps.stream._ + + + +end Channel + diff --git a/shared/src/main/scala/gopher/ChannelClosedException.scala b/shared/src/main/scala/gopher/ChannelClosedException.scala new file mode 100644 index 00000000..3ffbda63 --- /dev/null +++ b/shared/src/main/scala/gopher/ChannelClosedException.scala @@ -0,0 +1,5 @@ +package gopher + +class ChannelClosedException( + debugInfo: String = "" +) extends RuntimeException(s"channel is closed. ${debugInfo}") diff --git a/shared/src/main/scala/gopher/ChannelWithExpiration.scala b/shared/src/main/scala/gopher/ChannelWithExpiration.scala new file mode 100644 index 00000000..5bfa5fdf --- /dev/null +++ b/shared/src/main/scala/gopher/ChannelWithExpiration.scala @@ -0,0 +1,34 @@ +package gopher + +import cps._ +import gopher.impl._ +import scala.concurrent.duration.FiniteDuration + +class ChannelWithExpiration[F[_],W,R](internal: Channel[F,W,R], ttl: FiniteDuration, throwTimeouts: Boolean) + extends WriteChannelWithExpiration[F,W](internal, ttl, throwTimeouts, internal.gopherApi) + with Channel[F,W,R]: + + + override def gopherApi: Gopher[F] = internal.gopherApi + + override def asyncMonad: CpsSchedulingMonad[F] = gopherApi.asyncMonad + + override def addReader(reader: Reader[R]): Unit = + internal.addReader(reader) + + override def addDoneReader(reader: Reader[Unit]): Unit = + internal.addDoneReader(reader) + + + override def withExpiration(ttl: FiniteDuration, throwTimeouts: Boolean): ChannelWithExpiration[F,W,R] = + new ChannelWithExpiration(internal , ttl, throwTimeouts) + + + override def close(): Unit = internal.close() + + override def isClosed: Boolean = internal.isClosed + + + def qqq: Int = 0 + + \ No newline at end of file diff --git a/shared/src/main/scala/gopher/Gopher.scala b/shared/src/main/scala/gopher/Gopher.scala new file mode 100644 index 00000000..cfe52c26 --- /dev/null +++ b/shared/src/main/scala/gopher/Gopher.scala @@ -0,0 +1,122 @@ +package gopher + +import cps._ +import scala.concurrent.ExecutionContext +import scala.concurrent.duration.Duration +import scala.util._ + +import java.util.logging.{Level => LogLevel} +import java.util.concurrent.Executor + + +/** + * core of Gopher API. Given instance of Gopher[F] need for using most of Gopher operations. + * + * Gopher is a framework, which implements CSP (Communication Sequence Process). + * Process here - scala units of execution (i.e. functions, blok of code, etc). + * Communication channels represented by [gopher.Channel] + * + * @see [[gopher.Channel]] + * @see [[gopher#select]] + **/ +trait Gopher[F[_]:CpsSchedulingMonad]: + + type Monad[X] = F[X] + + /** + * Monad which control asynchronic execution. + * The main is scheduling: i.e. ability to submit monadic expression to scheduler + * and know that this monadic expression will be evaluated. + **/ + def asyncMonad: CpsSchedulingMonad[F] = summon[CpsSchedulingMonad[F]] + + /** + * Create Read/Write channel. + * @param bufSize - size of buffer. If it is zero, the channel is unbuffered. (i.e. writer is blocked until reader start processing). + * @param autoClose - close after first message was written to channel. + * @see [gopher.Channel] + **/ + def makeChannel[A](bufSize:Int = 0, + autoClose: Boolean = false): Channel[F,A,A] + + /** + * Create channel where you can write only one element. + * @see [gopher.Channel] + **/ + def makeOnceChannel[A](): Channel[F,A,A] = + makeChannel[A](1,true) + + /*** + *Create a select statement, which used for choosing one action from a set of potentially concurrent asynchronics events. + *[@see [[gopher.Select#apply]] + **/ + def select: Select[F] = + new Select[F](this) + + /** + * get an object with time operations. + * @see [[gopher.Time]] + **/ + def time: Time[F] + + /** + * set logging function, which output internal diagnostics and errors from spawned processes. + **/ + def setLogFun(logFun:(LogLevel, String, Throwable|Null) => Unit): ((LogLevel, String, Throwable|Null) => Unit) + + def log(level: LogLevel, message: String, ex: Throwable| Null): Unit + + def log(level: LogLevel, message: String): Unit = + log(level,message, null) + + def taskExecutionContext: ExecutionContext + + protected[gopher] def logImpossible(ex: Throwable): Unit = + log(LogLevel.WARNING, "impossible", ex) + + protected[gopher] def spawnAndLogFail[T](op: =>F[T]): F[Unit] = + asyncMonad.mapTry(asyncMonad.spawn(op)){ + case Success(_) => () + case Failure(ex) => + log(LogLevel.WARNING, "exception in spawned process", ex) + () + } + +end Gopher + + +/** +* Create Read/Write channel. +* @param bufSize - size of buffer. If it is zero, the channel is unbuffered. (i.e. writer is blocked until reader start processing). +* @param autoClose - close after first message was written to channel. +* @see [gopher.Channel] +**/ +def makeChannel[A](bufSize:Int = 0, + autoClose: Boolean = false)(using g:Gopher[?]):Channel[g.Monad,A,A] = + g.makeChannel(bufSize, autoClose) + + +def makeOnceChannel[A]()(using g:Gopher[?]): Channel[g.Monad,A,A] = + g.makeOnceChannel[A]() + + +def select(using g:Gopher[?]):Select[g.Monad] = + g.select + +/** + * represent `F[_]` as read channel. + **/ +def futureInput[F[_],A](f: F[A])(using g: Gopher[F]): ReadChannel[F,A] = + val ch = g.makeOnceChannel[Try[A]]() + g.spawnAndLogFail{ + g.asyncMonad.flatMapTry(f)(r => ch.awrite(r)) + } + ch.map(_.get) + +extension [F[_],A](fa: F[A])(using g: Gopher[F]) + def asChannel : ReadChannel[F,A] = + futureInput(fa) + +extension [F[_],A](c: IterableOnce[A])(using g: Gopher[F]) + def asReadChannel: ReadChannel[F,A] = + ReadChannel.fromIterable(c) \ No newline at end of file diff --git a/shared/src/main/scala/gopher/GopherAPI.scala b/shared/src/main/scala/gopher/GopherAPI.scala new file mode 100644 index 00000000..894e8b39 --- /dev/null +++ b/shared/src/main/scala/gopher/GopherAPI.scala @@ -0,0 +1,41 @@ +package gopher + +import cps._ + +trait GopherConfig +case object DefaultGopherConfig extends GopherConfig + + + +trait GopherAPI: + + def apply[F[_]:CpsSchedulingMonad](cfg:GopherConfig = DefaultGopherConfig): Gopher[F] + +/** + * Shared gopehr api, which is initialized by platofrm part, + * Primary used for cross-platforming test, you shoul initialize one of platform API + * behind and then run tests. + **/ +object SharedGopherAPI { + + private[this] var _api: Option[GopherAPI] = None + + def apply[F[_]:CpsSchedulingMonad](cfg:GopherConfig = DefaultGopherConfig): Gopher[F] = + api.apply[F](cfg) + + + + def api: GopherAPI = + if (_api.isEmpty) then + initPlatformSpecific() + _api.get + + + private[gopher] def setApi(api: GopherAPI): Unit = + this._api = Some(api) + + + private[gopher] def initPlatformSpecific(): Unit = + Platform.initShared() + +} \ No newline at end of file diff --git a/shared/src/main/scala/gopher/ReadChannel.scala b/shared/src/main/scala/gopher/ReadChannel.scala new file mode 100644 index 00000000..e02ae609 --- /dev/null +++ b/shared/src/main/scala/gopher/ReadChannel.scala @@ -0,0 +1,342 @@ +package gopher + +import cps._ +import gopher.impl._ +import scala.util.Try +import scala.util.Success +import scala.util.Failure +import scala.util.control.NonFatal +import scala.concurrent.ExecutionContext +import scala.concurrent.duration.Duration + +import java.util.logging.{Level => LogLevel} + +/** + * ReadChannel: Interface providing asynchronous reading API. + * + **/ +trait ReadChannel[F[_], A]: + + thisReadChannel => + + /** + * Special type which is used in select statement. + *@see [gopher.Select] + **/ + type read = A + + def gopherApi: Gopher[F] + + def asyncMonad: CpsSchedulingMonad[F] = gopherApi.asyncMonad + + // workarround for https://github.com/lampepfl/dotty/issues/10477 + protected def rAsyncMonad: CpsAsyncMonad[F] = asyncMonad + + def addReader(reader: Reader[A]): Unit + + def addDoneReader(reader: Reader[Unit]): Unit + + final lazy val done: ReadChannel[F,Unit] = DoneReadChannel() + + type done = Unit + + /** + * async version of read. Immediatly return future, which will contains result of read or failur with StreamClosedException + * in case of stream is closed. + */ + def aread():F[A] = + asyncMonad.adoptCallbackStyle(f => addReader(SimpleReader(f))) + + /** + * blocked read: if currently not element available - wait for one. + * Can be used only inside async block. + * If stream is closed and no values to read left in the stream - throws StreamClosedException + **/ + transparent inline def read[G[_]]()(using mc:CpsMonadContext[G], fg:CpsMonadConversion[F,G]): A = + await(aread()) + + /** + * Synonim for read. + */ + transparent inline def ?(using mc:CpsMonadContext[F]) : A = await(aread()) + + /** + * return F which contains sequence from first `n` elements. + */ + def atake(n: Int): F[IndexedSeq[A]] = + given CpsAsyncMonad[F] = asyncMonad + async[F]{ + var b = IndexedSeq.newBuilder[A] + try { + var c = 0 + while(c < n) { + val a = read() + b.addOne(a) + c = c + 1 + } + }catch{ + case ex: ChannelClosedException => + } + b.result() + } + + /** + * take first `n` elements. + * should be called inside async block. + **/ + transparent inline def take(n: Int)(using CpsMonadContext[F]): IndexedSeq[A] = + await(atake(n)) + + /** + * read value and return future with + * - Some(value) if value is available to read + * - None if stream is closed. + **/ + def aOptRead(): F[Option[A]] = + asyncMonad.adoptCallbackStyle( f => + addReader(SimpleReader{ x => x match + case Failure(ex: ChannelClosedException) => f(Success(None)) + case Failure(ex) => f(Failure(ex)) + case Success(v) => f(Success(Some(v))) + }) + ) + + /** + * read value and return + * - Some(value) if value is available to read + * - None if stream is closed. + * + * should be called inside async block. + **/ + transparent inline def optRead()(using CpsMonadContext[F]): Option[A] = await(aOptRead()) + + def foreach_async(f: A=>F[Unit]): F[Unit] = + given CpsAsyncMonad[F] = asyncMonad + async[F]{ + var done = false + while(!done) { + optRead() match + case Some(v) => await(f(v)) + case None => done = true + } + } + + def aforeach_async(f: A=>F[Unit]): F[F[Unit]] = + rAsyncMonad.pure(foreach_async(f)) + + def aforeach(f: A=> Unit): F[Unit] = + foreach_async( x => rAsyncMonad.pure(f(x))) + + /** + * run code each time when new object is arriced. + * until end of stream is not reached + **/ + transparent inline def foreach(inline f: A=>Unit)(using CpsMonadContext[F]): Unit = + await(aforeach(f)) + + + def map[B](f: A=>B): ReadChannel[F,B] = + new MappedReadChannel(this, f) + + def mapAsync[B](f: A=>F[B]): ReadChannel[F,B] = + new MappedAsyncReadChannel(this, f) + + def filter(p: A=>Boolean): ReadChannel[F,A] = + new FilteredReadChannel(this,p) + + def filterAsync(p: A=>F[Boolean]): ReadChannel[F,A] = + new FilteredAsyncReadChannel(this,p) + + def dup(bufSize: Int=1, expiration: Duration=Duration.Inf): (ReadChannel[F,A], ReadChannel[F,A]) = + DuppedInput(this, bufSize)(using gopherApi).pair + + def afold[S](s0:S)(f: (S,A)=>S): F[S] = + fold_async(s0)((s,e) => asyncMonad.pure(f(s,e))) + + def afold_async[S](s0: S)(f: (S,A)=>F[S]): F[S] = + fold_async(s0)(f) + + def fold_async[S](s0:S)(f: (S,A) => F[S] ): F[S] = + given CpsSchedulingMonad[F] = asyncMonad + async[F] { + var s = s0 + while{ + optRead() match + case Some(a) => + s = await(f(s,a)) + true + case None => + false + }do() + s + } + + transparent inline def fold[S](inline s0:S)(inline f: (S,A) => S )(using mc:CpsMonadContext[F]): S = + await[F,S,F](afold(s0)(f)) + + def zip[B](x: ReadChannel[F,B]): ReadChannel[F,(A,B)] = + given CpsSchedulingMonad[F] = asyncMonad + val retval = gopherApi.makeChannel[(A,B)]() + gopherApi.spawnAndLogFail(async[F]{ + var done = false + while(!done) { + this.optRead() match + case Some(a) => + x.optRead() match + case Some(b) => + retval.write((a,b)) + case None => + done=true + case None => + done = true + } + retval.close() + }) + retval + + def or(other: ReadChannel[F,A]):ReadChannel[F,A] = + new OrReadChannel(this, other) + + def |(other: ReadChannel[F,A]):ReadChannel[F,A] = + new OrReadChannel(this,other) + + def append(other: ReadChannel[F,A]): ReadChannel[F, A] = + new AppendReadChannel(this, other) + + class DoneReadChannel extends ReadChannel[F,Unit]: + + def addReader(reader: Reader[Unit]): Unit = + thisReadChannel.addDoneReader(reader) + + def addDoneReader(reader: Reader[Unit]): Unit = + thisReadChannel.addDoneReader(reader) + + def gopherApi: Gopher[F] = thisReadChannel.gopherApi + + end DoneReadChannel + + + class SimpleReader(f: Try[A] => Unit) extends Reader[A]: + + def canExpire: Boolean = false + def isExpired: Boolean = false + + def capture(): Expirable.Capture[Try[A]=>Unit] = Expirable.Capture.Ready(f) + + def markUsed(): Unit = () + def markFree(): Unit = () + + end SimpleReader + +end ReadChannel + +object ReadChannel: + + + def empty[F[_],A](using Gopher[F]): ReadChannel[F,A] = + val retval = summon[Gopher[F]].makeChannel[A]() + retval.close() + retval + + /** + *@param c - iteratable to read from. + *@return channel, which will emit all elements from 'c' and then close. + **/ + def fromIterable[F[_],A](c: IterableOnce[A])(using Gopher[F]): ReadChannel[F,A] = + given asyncMonad: CpsSchedulingMonad[F] = summon[Gopher[F]].asyncMonad + val retval = makeChannel[A]() + summon[Gopher[F]].spawnAndLogFail(async{ + val it = c.iterator + while(it.hasNext) { + val a = it.next() + retval.write(a) + } + retval.close() + }) + retval + + /** + *@return one copy of `a` and close. + **/ + def once[F[_],A](a: A)(using Gopher[F]): ReadChannel[F,A] = + fromIterable(List(a)) + + /** + *@param a - value to produce + *@return channel which emit value of a in loop and never close + **/ + def always[F[_],A](a: A)(using Gopher[F]): ReadChannel[F,A] = + given asyncMonad: CpsSchedulingMonad[F] = summon[Gopher[F]].asyncMonad + val retval = makeChannel[A]() + summon[Gopher[F]].spawnAndLogFail( + async{ + while(true) { + retval.write(a) + } + } + ) + retval + + + + def fromFuture[F[_],A](f: F[A])(using Gopher[F]): ReadChannel[F,A] = + futureInput(f) + + def fromValues[F[_],A](values: A*)(using Gopher[F]): ReadChannel[F,A] = + fromIterable(values) + + def unfold[S,F[_],A](s:S)(f:S => Option[(A,S)])(using Gopher[F]): ReadChannel[F,A] = + unfoldAsync[S,F,A](s)( state => summon[Gopher[F]].asyncMonad.tryPure(f(state)) ) + + def unfoldAsync[S,F[_],A](s:S)(f:S => F[Option[(A,S)]])(using Gopher[F]): ReadChannel[F,A] = + given asyncMonad: CpsSchedulingMonad[F] = summon[Gopher[F]].asyncMonad + val retval = makeChannel[Try[A]]() + summon[Gopher[F]].spawnAndLogFail(async{ + var done = false + var state = s + try + while(!done) { + await(f(state)) match + case Some((a,next)) => + retval.write(Success(a)) + state = next + case None => + done = true + } + catch + case NonFatal(ex) => + summon[Gopher[F]].log(LogLevel.FINE, s"exception (ch: $retval)", ex) + retval.write(Failure(ex)) + finally + summon[Gopher[F]].log(LogLevel.FINE, s"closing $retval") + retval.close(); + }) + retval.map{ + case Success(x) => x + case Failure(ex) => + throw ex + } + + + + import cps.stream._ + + given emitAbsorber[F[_], C<:CpsMonadContext[F], T](using auxMonad: CpsSchedulingMonad[F]{ type Context = C }, gopherApi: Gopher[F]): BaseUnfoldCpsAsyncEmitAbsorber[ReadChannel[F,T],F,C,T]( + using gopherApi.taskExecutionContext, auxMonad) with + + override type Element = T + + def asSync(fs: F[ReadChannel[F,T]]): ReadChannel[F,T] = + DelayedReadChannel(fs) + + + def unfold[S](s0:S)(f: S => F[Option[(T,S)]]): ReadChannel[F,T] = + val r: ReadChannel[F,T] = unfoldAsync(s0)(f) + r + + +end ReadChannel + + + + diff --git a/shared/src/main/scala/gopher/Select.scala b/shared/src/main/scala/gopher/Select.scala new file mode 100644 index 00000000..4ca37b84 --- /dev/null +++ b/shared/src/main/scala/gopher/Select.scala @@ -0,0 +1,116 @@ +package gopher + +import cps._ + +import scala.quoted._ +import scala.compiletime._ +import scala.concurrent.duration._ + +/** Organize waiting for read/write from multiple async channels + * + * Gopher[F] provide a function `select` of this type. + */ +class Select[F[_]](api: Gopher[F]): + + /** wait until some channels from the list in pf . + * + *```Scala + *async{ + * .... + * select { + * case vx:xChannel.read => doSomethingWithX + * case vy:yChannel.write if (vy == valueToWrite) => doSomethingAfterWrite(vy) + * case t: Time.after if (t == 1.minute) => processTimeout + * } + * ... + *} + *``` + */ + transparent inline def apply[A](inline pf: PartialFunction[Any,A])(using mc:CpsMonadContext[F]): A = + ${ + SelectMacro.onceImpl[F,A]('pf, 'api, 'mc ) + } + + /** + * create select groop + *@see [gopher.SelectGroup] + **/ + def group[S]: SelectGroup[F,S] = new SelectGroup[F,S](api) + + def once[S]: SelectGroup[F,S] = new SelectGroup[F,S](api) + + /** + * create Select Loop. + **/ + def loop: SelectLoop[F] = new SelectLoop[F](api) + + + def fold[S](s0:S)(step: S => S | SelectFold.Done[S]): S = { + var s: S = s0 + while{ + step(s) match + case SelectFold.Done(r) => + s = r.asInstanceOf[S] + false + case other => + s = other.asInstanceOf[S] + true + } do () + s + } + + def fold_async[S](s0:S)(step: S => F[S | SelectFold.Done[S]]): F[S] = { + api.asyncMonad.flatMap(step(s0)){ s => + s match + case SelectFold.Done(r) => api.asyncMonad.pure(r.asInstanceOf[S]) + case other => fold_async[S](other.asInstanceOf[S])(step) + } + } + + transparent inline def afold[S](s0:S)(inline step: CpsMonadContext[F] ?=> S => S | SelectFold.Done[S]) : F[S] = + given CpsAsyncMonad[F] = api.asyncMonad + async[F]{ + fold(s0)(step) + } + + def afold_async[S](s0:S)(step: S => F[S | SelectFold.Done[S]]) : F[S] = + fold_async(s0)(step) + + + def map[A](step: SelectGroup[F,A] => A): ReadChannel[F,A] = + mapAsync[A](x => api.asyncMonad.pure(step(x))) + + def mapAsync[A](step: SelectGroup[F,A] => F[A]): ReadChannel[F,A] = + val r = makeChannel[A]()(using api) + given CpsSchedulingMonad[F] = api.asyncMonad + api.spawnAndLogFail{ + async{ + var done = false + while(!done) + val g = SelectGroup[F,A](api) + try { + val e = await(step(g)) + r.write(e) + } catch { + case ex: ChannelClosedException => + r.close() + done=true + } + } + } + r + + /** + * create forever runner. + **/ + def forever: SelectForever[F] = new SelectForever[F](api) + + /** + * run forever expression in `pf`, return + **/ + transparent inline def aforever(inline pf: PartialFunction[Any,Unit]): F[Unit] = + ${ SelectMacro.aforeverImpl('pf, 'api) } + + + + diff --git a/shared/src/main/scala/gopher/SelectFold.scala b/shared/src/main/scala/gopher/SelectFold.scala new file mode 100644 index 00000000..2dee2981 --- /dev/null +++ b/shared/src/main/scala/gopher/SelectFold.scala @@ -0,0 +1,13 @@ +package gopher + +/** + * Helper namespace for Select.Fold return value + * @see [Select.fold] + **/ +object SelectFold: + + /** + * return value in Select.Fold which means that we should stop folding + **/ + case class Done[S](s: S) + \ No newline at end of file diff --git a/shared/src/main/scala/gopher/SelectForever.scala b/shared/src/main/scala/gopher/SelectForever.scala new file mode 100644 index 00000000..3fecc3a9 --- /dev/null +++ b/shared/src/main/scala/gopher/SelectForever.scala @@ -0,0 +1,48 @@ +package gopher + +import cps._ +import scala.quoted._ +import scala.compiletime._ +import scala.concurrent.duration._ + + +/** + * Result of `select.forever`: apply method accept partial pseudofunction which evalueated forever. + **/ +class SelectForever[F[_]](api: Gopher[F]) extends SelectGroupBuilder[F,Unit, Unit](api): + + + transparent inline def apply(inline pf: PartialFunction[Any,Unit])(using mc:CpsMonadContext[F]): Unit = + ${ + SelectMacro.foreverImpl('pf,'api, 'mc) + } + + + def runAsync(): F[Unit] = + given CpsSchedulingMonad[F] = api.asyncMonad + async[F]{ + while{ + val group = api.select.group[Unit] + try + groupBuilder(group).run() + true + catch + case ex: ChannelClosedException => + false + } do () + } + + + + + + + + + + + + + + + diff --git a/shared/src/main/scala/gopher/SelectGroup.scala b/shared/src/main/scala/gopher/SelectGroup.scala new file mode 100644 index 00000000..6db7cd2c --- /dev/null +++ b/shared/src/main/scala/gopher/SelectGroup.scala @@ -0,0 +1,256 @@ +package gopher + +import cps._ +import gopher.impl._ +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicLong +import java.util.TimerTask; +import scala.annotation.unchecked.uncheckedVariance +import scala.util._ +import scala.concurrent.duration._ +import scala.language.postfixOps + +import java.util.logging.{Level => LogLevel} + + +/** + * Select group is a virtual 'lock' object. + * Readers and writers are grouped into select groups. When + * event about avaiability to read or to write is arrived and + * no current event group members is running, than run of one of the members + * is triggered. + * I.e. only one from group can run. + * + * Note, that application develeper usually not work with `SelectGroup` directly, + * it is created internally by `select` pseudostatement. + * + *@see [gopher.Select] + *@see [gopher.select] + **/ +class SelectGroup[F[_], S](api: Gopher[F]) extends SelectListeners[F,S,S]: + + thisSelectGroup => + + /** + * instance of select group created for call of select. + * 0 - free + * 1 - now processes + * 2 - expired + **/ + val waitState: AtomicInteger = new AtomicInteger(0) + private var call: Try[S] => Unit = { _ => () } + private inline def m = api.asyncMonad + private val retval = m.adoptCallbackStyle[S](f => call=f) + private val startTime = new AtomicLong(0L) + var timeoutScheduled: Option[Time.Scheduled] = None + + override def asyncMonad = api.asyncMonad + + def addReader[A](ch: ReadChannel[F,A], action: Try[A]=>F[S]): Unit = + val record = ReaderRecord(ch, action) + ch.addReader(record) + + def addWriter[A](ch: WriteChannel[F,A], element: A, action: Try[Unit]=>F[S]): Unit = + val record = WriterRecord(ch, element, action) + ch.addWriter(record) + + def setTimeout(timeout: FiniteDuration, action: Try[FiniteDuration] => F[S]): Unit = + timeoutScheduled.foreach(_.cancel()) + val record = new TimeoutRecord(timeout,action) + val newTask = () => { + val v = System.currentTimeMillis() - startTime.get() + record.capture() match + case Some(f) => f(Success(v milliseconds)) + case None => // do nothing. + } + timeoutScheduled = Some(api.time.schedule(newTask,timeout)) + + + + def step():F[S] = + retval + + def runAsync():F[S] = + retval + + transparent inline def apply(inline pf: PartialFunction[Any,S])(using mc: CpsMonadContext[F]): S = + ${ + SelectMacro.onceImpl[F,S]('pf, 'api, 'mc ) + } + + transparent inline def select(inline pf: PartialFunction[Any,S])(using mc: CpsMonadContext[F]): S = + ${ + SelectMacro.onceImpl[F,S]('pf, 'api, 'mc ) + } + + /** + * short alias for SelectFold.Done + */ + def done[S](s:S):SelectFold.Done[S] = + SelectFold.Done(s) + + /** + * FluentDSL for user SelectGroup without macroses. + *``` + * SelectGroup.onRead(input){ x => println(x) } + * .onRead(endSignal){ () => done=true } + *``` + **/ + def onRead[A](ch: ReadChannel[F,A]) (f: A => S ): this.type = + addReader[A](ch,{ + case Success(a) => m.tryPure(f(a)) + case Failure(ex) => m.error(ex) + }) + this + + + // reading call will be tranformed to reader_async in async expressions + def onReadAsync[A](ch: ReadChannel[F,A])(f: A => F[S] ): this.type = + addReader[A](ch,{ + case Success(a) => m.tryImpure(f(a)) + case Failure(ex) => m.error(ex) + }) + this + + // reading call will be tranformed to reader_async in async expressions + def onRead_async[A](ch: ReadChannel[F,A])(f: A => F[S] ): F[this.type] = + m.pure(onReadAsync(ch)(f)) + + /** + * FluentDSL for user SelectGroup without macroses. + *``` + * SelectGroup.onWrite(input){ x => println(x) } + * .onWrite(endSignal){ () => done=true } + *``` + **/ + def onWrite[A](ch: WriteChannel[F,A], a: =>A)(f: A =>S ): this.type = + addWriter[A](ch,a,{ + case Success(()) => m.tryPure(f(a)) + case Failure(ex) => m.error(ex) + }) + this + + + def onWriteAsync[A](ch: WriteChannel[F,A], a: ()=>F[A]) (f: A => F[S] ): this.type = + m.map(a()){ x => + addWriter[A](ch,x,{ + case Success(()) => m.tryImpure(f(x)) + case Failure(ex) => m.error(ex) + }) + } + this + + + def onTimeout(t:FiniteDuration)(f: FiniteDuration => S): this.type = + setTimeout(t,{ + case Success(x) => m.tryPure(f(x)) + case Failure(ex) => m.error(ex) + }) + this + + def onTimeoutAsync(t:FiniteDuration)(f: FiniteDuration => F[S]): this.type = + setTimeout(t,{ + case Success(x) => m.tryImpure(f(x)) + case Failure(ex) => m.error(ex) + }) + this + + + def onTimeout_async(t:FiniteDuration)(f: FiniteDuration => F[S]): F[this.type] = + m.pure(onTimeoutAsync(t)(f)) + + + // + trait Expiration: + def canExpire: Boolean = true + def isExpired: Boolean = waitState.get()==2 + def markUsed(): Unit = waitState.set(2) + def markFree(): Unit = { + waitState.set(0) + } + + + + case class ReaderRecord[A](ch: ReadChannel[F,A], action: Try[A] => F[S]) extends Reader[A] with Expiration: + type Element = A + type State = S + + val ready = Expirable.Capture.Ready[Try[A]=>Unit](v => { + timeoutScheduled.foreach(_.cancel()) + api.spawnAndLogFail(m.mapTry(action(v))(x => call(x))) + }) + + override def capture(): Expirable.Capture[Try[A]=>Unit] = + // fast path + if waitState.compareAndSet(0,1) then + ready + else + var retval: Expirable.Capture[Try[A]=>Unit] = Expirable.Capture.Expired + while { + waitState.get() match + case 2 => retval = Expirable.Capture.Expired + false + case 1 => retval = Expirable.Capture.WaitChangeComplete + false + case 0 => // was just freed + if (waitState.compareAndSet(0,1)) then + retval = ready + false + else + true + case _ => // impossible. + throw new IllegalStateException("Imposible state of busy flag") + } do () + retval + + + + + case class WriterRecord[A](ch: WriteChannel[F,A], + element: A, + action: Try[Unit] => F[S], + ) extends Writer[A] with Expiration: + type Element = A + type State = S + + val ready: Expirable.Capture.Ready[(A,Try[Unit]=>Unit)] = + Expirable.Capture.Ready((element, + (v:Try[Unit]) => { + timeoutScheduled.foreach(_.cancel()) + api.spawnAndLogFail(m.mapTry(action(v))(x=>call(x))) + } + )) + + override def capture(): Expirable.Capture[(A,Try[Unit]=>Unit)] = + if waitState.compareAndSet(0,1) then + ready + else + var retval: Expirable.Capture[(A,Try[Unit]=>Unit)] = Expirable.Capture.Expired + while{ + waitState.get() match + case 2 => false + case 1 => retval = Expirable.Capture.WaitChangeComplete + false + case 0 => + if (waitState.compareAndSet(0,1)) then + retval = ready + false + else + true + } do () + retval + + + case class TimeoutRecord(duration: FiniteDuration, + action: Try[FiniteDuration] => F[S], + ) extends Expiration: + + def capture(): Option[Try[FiniteDuration] => Unit] = + if (waitState.compareAndSet(0,1)) then + Some((v:Try[FiniteDuration]) => + api.spawnAndLogFail(m.mapTry(action(v))(x => call(x))) + ) + else + None + + diff --git a/shared/src/main/scala/gopher/SelectListeners.scala b/shared/src/main/scala/gopher/SelectListeners.scala new file mode 100644 index 00000000..c3de322b --- /dev/null +++ b/shared/src/main/scala/gopher/SelectListeners.scala @@ -0,0 +1,77 @@ +package gopher + +import cps._ +import scala.concurrent.duration.FiniteDuration + +trait SelectListeners[F[_],S, R]: + + + def onRead[A](ch: ReadChannel[F,A]) (f: A => S ): this.type + + def onWrite[A](ch: WriteChannel[F,A], a: =>A)(f: A => S): this.type + + def onTimeout(t: FiniteDuration)(f: FiniteDuration => S): this.type + + def asyncMonad: CpsSchedulingMonad[F] + + def runAsync():F[R] + + transparent inline def run()(using CpsMonadContext[F]): R = await(runAsync()) + + + + +abstract class SelectGroupBuilder[F[_],S, R](api: Gopher[F]) extends SelectListeners[F,S, R]: + + protected var groupBuilder: SelectGroup[F,S] => SelectGroup[F,S] = identity + + val m = api.asyncMonad + + def onRead[A](ch: ReadChannel[F,A])(f: A => S): this.type = + groupBuilder = groupBuilder.andThen{ + g => g.onRead(ch)(f) + } + this + + def onReadAsync[A](ch: ReadChannel[F,A])(f: A => F[S]): this.type = + groupBuilder = groupBuilder.andThen( + _.onReadAsync(ch)(f) + ) + this + + + inline def reading[A](ch: ReadChannel[F,A])(f: A=>S): this.type = + onRead(ch)(f) + + def onWrite[A](ch: WriteChannel[F,A], a: =>A)(f: A=>S): this.type = + groupBuilder = groupBuilder.andThen{ + g => g.onWrite(ch,a)(f) + } + this + + def onWriteAsync[A](ch: WriteChannel[F,A], a: ()=>F[A])(f: A=>F[S]): this.type = + groupBuilder = groupBuilder.andThen{ + g => g.onWriteAsync(ch,a)(f) + } + this + + + inline def writing[A](ch: WriteChannel[F,A], a: =>A)(f: A=>S): this.type = + onWrite(ch,a)(f) + + + def onTimeout(t: FiniteDuration)(f: FiniteDuration => S): this.type = + groupBuilder = groupBuilder.andThen{ + g => g.onTimeout(t)(f) + } + this + + def onTimeoutAsync(t: FiniteDuration)(f: FiniteDuration => F[S]): this.type = + groupBuilder = groupBuilder.andThen{ + g => g.onTimeoutAsync(t)(f) + } + this + + def asyncMonad: CpsSchedulingMonad[F] = api.asyncMonad + + diff --git a/shared/src/main/scala/gopher/SelectLoop.scala b/shared/src/main/scala/gopher/SelectLoop.scala new file mode 100644 index 00000000..7393a2b1 --- /dev/null +++ b/shared/src/main/scala/gopher/SelectLoop.scala @@ -0,0 +1,30 @@ +package gopher + +import cps._ +import scala.quoted._ +import scala.compiletime._ +import scala.concurrent.duration._ + +import java.util.logging.{Level => LogLevel} + + +class SelectLoop[F[_]](api: Gopher[F]) extends SelectGroupBuilder[F,Boolean, Unit](api): + + + transparent inline def apply(inline pf: PartialFunction[Any,Boolean])(using mc: CpsMonadContext[F]): Unit = + ${ + SelectMacro.loopImpl[F]('pf, 'api, 'mc ) + } + + def runAsync(): F[Unit] = + given m: CpsSchedulingMonad[F] = api.asyncMonad + async[F]{ + while{ + val group = api.select.group[Boolean] + val build = groupBuilder(group) + val r = build.run() + r + } do () + } + + diff --git a/shared/src/main/scala/gopher/SelectMacro.scala b/shared/src/main/scala/gopher/SelectMacro.scala new file mode 100644 index 00000000..fd93363c --- /dev/null +++ b/shared/src/main/scala/gopher/SelectMacro.scala @@ -0,0 +1,325 @@ +package gopher + + +import cps._ + +import scala.quoted._ +import scala.compiletime._ +import scala.concurrent.duration._ +import scala.util.control.NonFatal + + + + +object SelectMacro: + + import cps.macros.common.TransformUtil + + sealed trait SelectGroupExpr[F[_],S, R]: + def toExprOf[X <: SelectListeners[F,S, R]]: Expr[X] + + sealed trait SelectorCaseExpr[F[_]:Type, S:Type, R:Type]: + type Monad[X] = F[X] + def appended[L <: SelectListeners[F,S,R] : Type](base: Expr[L])(using Quotes): Expr[L] + + case class ReadExpression[F[_]:Type, A:Type, S:Type, R:Type](ch: Expr[ReadChannel[F,A]], f: Expr[A => S], isDone: Boolean) extends SelectorCaseExpr[F,S,R]: + def appended[L <: SelectListeners[F,S,R]: Type](base: Expr[L])(using Quotes): Expr[L] = + '{ $base.onRead($ch)($f) } + + case class WriteExpression[F[_]:Type, A:Type, S:Type, R:Type](ch: Expr[WriteChannel[F,A]], a: Expr[A], f: Expr[A => S]) extends SelectorCaseExpr[F,S,R]: + def appended[L <: SelectListeners[F,S,R]: Type](base: Expr[L])(using Quotes): Expr[L] = + '{ $base.onWrite($ch,$a)($f) } + + case class TimeoutExpression[F[_]:Type,S:Type, R:Type](t: Expr[FiniteDuration], f: Expr[ FiniteDuration => S ]) extends SelectorCaseExpr[F,S,R]: + def appended[L <: SelectListeners[F,S,R]: Type](base: Expr[L])(using Quotes): Expr[L] = + '{ $base.onTimeout($t)($f) } + + case class DoneExression[F[_]:Type, A:Type, S:Type, R:Type](ch: Expr[ReadChannel[F,A]], f: Expr[Unit=>S]) extends SelectorCaseExpr[F,S,R]: + def appended[L <: SelectListeners[F,S,R]: Type](base: Expr[L])(using Quotes): Expr[L] = + '{ $base.onRead($ch.done)($f) } + + + def selectListenerBuilder[F[_]:Type, S:Type, R:Type, L <: SelectListeners[F,S,R]:Type]( + constructor: Expr[L], + caseDefs: List[SelectorCaseExpr[F,S,R]])(using Quotes): Expr[L] = + val s0 = constructor + caseDefs.foldLeft(s0){(s,e) => + e.appended(s) + } + + + def buildSelectListenerRun[F[_]:Type, S:Type, R:Type, L <: SelectListeners[F,S,R]:Type]( + constructor: Expr[L], + caseDefs: List[SelectorCaseExpr[F,S,R]], + api:Expr[Gopher[F]], + monadContext: Expr[CpsMonadContext[F]], + )(using Quotes): Expr[R] = + val g = selectListenerBuilder(constructor, caseDefs) + // dotty bug if g.run + val r = '{ await($g.runAsync())(using $monadContext, CpsMonadConversion.identityConversion[F]) } + r.asExprOf[R] + + def buildSelectListenerRunAsync[F[_]:Type, S:Type, R:Type, L <: SelectListeners[F,S,R]:Type]( + constructor: Expr[L], + caseDefs: List[SelectorCaseExpr[F,S,R]], + api:Expr[Gopher[F]])(using Quotes): Expr[F[R]] = + val g = selectListenerBuilder(constructor, caseDefs) + // dotty bug if g.run + val r = '{ $g.runAsync() } + r.asExprOf[F[R]] + + + + def onceImpl[F[_]:Type, A:Type](pf: Expr[PartialFunction[Any,A]], api: Expr[Gopher[F]], monadContext: Expr[CpsMonadContext[F]])(using Quotes): Expr[A] = + def builder(caseDefs: List[SelectorCaseExpr[F,A,A]]):Expr[A] = { + val s0 = '{ + new SelectGroup[F,A]($api) + } + buildSelectListenerRun(s0, caseDefs, api, monadContext) + } + runImpl(builder, pf) + + def loopImpl[F[_]:Type](pf: Expr[PartialFunction[Any,Boolean]], api: Expr[Gopher[F]], monadContext: Expr[CpsMonadContext[F]])(using Quotes): Expr[Unit] = + def builder(caseDefs: List[SelectorCaseExpr[F,Boolean,Unit]]):Expr[Unit] = { + val s0 = '{ + new SelectLoop[F]($api) + } + buildSelectListenerRun(s0, caseDefs, api, monadContext) + } + runImpl( builder, pf) + + + def foreverImpl[F[_]:Type](pf: Expr[PartialFunction[Any,Unit]], api:Expr[Gopher[F]], monadContext: Expr[CpsMonadContext[F]])(using Quotes): Expr[Unit] = + def builder(caseDefs: List[SelectorCaseExpr[F,Unit,Unit]]):Expr[Unit] = { + val s0 = '{ + new SelectForever[F]($api) + } + buildSelectListenerRun(s0, caseDefs, api, monadContext) + } + runImpl(builder, pf) + + def aforeverImpl[F[_]:Type](pf: Expr[PartialFunction[Any,Unit]], api:Expr[Gopher[F]])(using Quotes): Expr[F[Unit]] = + import quotes.reflect._ + def builder(caseDefs: List[SelectorCaseExpr[F,Unit,Unit]]):Expr[F[Unit]] = { + val s0 = '{ + new SelectForever[F]($api) + } + buildSelectListenerRunAsync(s0, caseDefs, api) + } + runImplTree(builder, pf.asTerm) + + + def runImpl[F[_]:Type, A:Type,B :Type](builder: List[SelectorCaseExpr[F,A,B]]=>Expr[B], + pf: Expr[PartialFunction[Any,A]])(using Quotes): Expr[B] = + import quotes.reflect._ + runImplTree[F,A,B,B](builder, pf.asTerm) + + def runImplTree[F[_]:Type, A:Type, B:Type, C:Type](using Quotes)( + builder: List[SelectorCaseExpr[F,A,B]] => Expr[C], + pf: quotes.reflect.Term + ): Expr[C] = + import quotes.reflect._ + pf match + case Lambda(valDefs, body) => + runImplTree[F,A,B,C](builder, body) + case Inlined(_,List(),body) => + runImplTree[F,A,B,C](builder, body) + case Match(scrutinee,cases) => + //val caseExprs = cases map(x => parseCaseDef[F,A](x)) + //if (caseExprs.find(_.isInstanceOf[DefaultExpression[?]]).isDefined) { + // report.error("default is not supported") + //} + val unorderedCases = cases.map(parseCaseDef[F,A,B](_)) + // done should be + val (isDone,notDone) = unorderedCases.partition{ x => + x match + case DoneExression(_,_) => true + case ReadExpression(_,_,isDone) => isDone + case _ => false + } + val doneFirstCases = isDone ++ notDone + builder(doneFirstCases) + + + def parseCaseDef[F[_]:Type,S:Type,R:Type](using Quotes)(caseDef: quotes.reflect.CaseDef): SelectorCaseExpr[F,S,R] = + import quotes.reflect._ + + val caseDefGuard = parseCaseDefGuard(caseDef) + + def handleRead(bind: Bind, valName: String, channel:Term, tp:TypeRepr): SelectorCaseExpr[F,S,R] = + val readFun = makeLambda(valName,tp,bind.symbol,caseDef.rhs) + if (channel.tpe <:< TypeRepr.of[ReadChannel[F,?]]) + tp.asType match + case '[a] => + val isDone = channel match + case quotes.reflect.Select(ch1,"done") if (ch1.tpe <:< TypeRepr.of[ReadChannel[F,?]]) => true + case _ => false + ReadExpression(channel.asExprOf[ReadChannel[F,a]],readFun.asExprOf[a=>S],isDone) + case _ => + reportError("can't determinate read type", caseDef.pattern.asExpr) + else + reportError("read pattern is not a read channel", channel.asExpr) + + def handleWrite(bind: Bind, valName: String, channel:Term, tp:TypeRepr): SelectorCaseExpr[F,S,R] = + val writeFun = makeLambda(valName,tp, bind.symbol, caseDef.rhs) + val e = caseDefGuard.getOrElse(valName, + reportError(s"not found binding ${valName} in write condition", channel.asExpr) + ) + if (channel.tpe <:< TypeRepr.of[WriteChannel[F,?]]) then + tp.asType match + case '[a] => + WriteExpression(channel.asExprOf[WriteChannel[F,a]],e.asExprOf[a], writeFun.asExprOf[a=>S]) + case _ => + reportError("Can't determinate type of write", caseDef.pattern.asExpr) + else + reportError("Write channel expected", channel.asExpr) + + def extractType[F[_]:Type](name: "read"|"write", channelTerm: Term, pat: Tree): TypeRepr = + import quotes.reflect._ + pat match + case Typed(_,tp) => tp.tpe + case _ => + TypeSelect(channelTerm,name).tpe + + def handleUnapply(chObj: Term, nameReadOrWrite: String, bind: Bind, valName: String, ePat: Tree, ch: String): SelectorCaseExpr[F,S,R] = + import quotes.reflect._ + if (chObj.tpe == '{gopher.Channel}.asTerm.tpe) + val chExpr = caseDefGuard.getOrElse(ch,reportError(s"select condition for ${ch} is not found",caseDef.pattern.asExpr)) + nameReadOrWrite match + case "Read" => + val elementType = extractType("read",chExpr, ePat) + handleRead(bind,valName,chExpr,elementType) + case "Write" => + val elementType = extractType("write",chExpr, ePat) + handleWrite(bind,valName,chExpr,elementType) + case _ => + reportError(s"Read or Write expected, we have ${nameReadOrWrite}", caseDef.pattern.asExpr) + else + reportError("Incorrect select pattern, expected or x:channel.{read,write} or Channel.{Read,Write}",chObj.asExpr) + + def safeShow(t:Tree): String = + try + t.show + catch + case NonFatal(ex) => + ex.printStackTrace() + s"(exception durign show:${ex.getMessage()})" + + caseDef.pattern match + case Inlined(_,List(),body) => + parseCaseDef(CaseDef(body, caseDef.guard, caseDef.rhs)) + case b@Bind(v, tp@Typed(expr, TypeSelect(ch,"read"))) => + handleRead(b,v,ch,tp.tpe) + case b@Bind(v, tp@Typed(expr, Annotated(TypeSelect(ch,"read"),_))) => + handleRead(b,v,ch,tp.tpe) + case tp@Typed(expr, TypeSelect(ch,"read")) => + // todo: introduce 'dummy' val + reportError("binding var in read expression is mandatory", caseDef.pattern.asExpr) + case b@Bind(v, tp@Typed(expr, TypeSelect(ch,"write"))) => + handleWrite(b,v,ch,tp.tpe) + case b@Bind(v, tp@Typed(expr, Annotated(TypeSelect(ch,"write"),_))) => + handleWrite(b,v,ch,tp.tpe) + case b@Bind(v, tp@Typed(expr, TypeSelect(ch,"after"))) => + val timeoutFun = makeLambda(v, tp.tpe, b.symbol, caseDef.rhs) + val e = caseDefGuard.getOrElse(v, reportError(s"can't find condifion for $v",caseDef.pattern.asExpr)) + if (ch.tpe <:< TypeRepr.of[gopher.Time] || ch.tpe <:< TypeRepr.of[gopher.Time.type]) + TimeoutExpression(e.asExprOf[FiniteDuration], timeoutFun.asExprOf[FiniteDuration => S]) + else + reportError(s"Expected Time, we have ${ch.show}", ch.asExpr) + case b@Bind(v, tp@Typed(expr, TypeSelect(ch,"done"))) => + val readFun = makeLambda(v,tp.tpe,b.symbol,caseDef.rhs) + tp.tpe.asType match + case '[a] => + if (ch.tpe <:< TypeRepr.of[ReadChannel[F,a]]) then + DoneExression(ch.asExprOf[ReadChannel[F,a]],readFun.asExprOf[Unit=>S]) + else + reportError("done base is not a read channel", ch.asExpr) + case _ => + reportError("can't determinate read type", caseDef.pattern.asExpr) + case pat@Unapply(TypeApply(quotes.reflect.Select( + quotes.reflect.Select(chObj,nameReadOrWrite), + "unapply"),targs), + impl,List(b@Bind(e,ePat),Bind(ch,chPat))) => + handleUnapply(chObj, nameReadOrWrite, b, e, ePat, ch) + case pat@TypedOrTest(Unapply(TypeApply(quotes.reflect.Select( + quotes.reflect.Select(chobj,nameReadOrWrite), + "unapply"),targs), + impl,List(b@Bind(e,ePat),Bind(ch,chPat))),a) => + handleUnapply(chobj, nameReadOrWrite, b, e, ePat, ch) + case _ => + report.error( + s""" + expected one of: + v: channel.read + v: channel.write if v == expr + v: Time.after if v == expr + we have + ${safeShow(caseDef.pattern)} + (tree: ${caseDef.pattern}) + """, caseDef.pattern.asExpr) + reportError(s"unparsed caseDef pattern: ${caseDef.pattern}", caseDef.pattern.asExpr) + + end parseCaseDef + + + def parseCaseDefGuard(using Quotes)(caseDef: quotes.reflect.CaseDef): Map[String,quotes.reflect.Term] = + import quotes.reflect._ + caseDef.guard match + case Some(condition) => + parseSelectCondition(condition, Map.empty) + case None => + Map.empty + + + def parseSelectCondition(using Quotes)(condition: quotes.reflect.Term, + entries:Map[String,quotes.reflect.Term]): Map[String,quotes.reflect.Term] = + import quotes.reflect._ + condition match + case Apply(quotes.reflect.Select(Ident(v1),"=="),List(expr)) => + entries.updated(v1, expr) + case Apply(quotes.reflect.Select(frs, "&&" ), List(snd)) => + parseSelectCondition(snd, parseSelectCondition(frs, entries)) + case _ => + reportError( + s"""Invalid select guard form, expected one of + channelName == channelEpxr + writeBind == writeExpresion + condition && condition + we have + ${condition.show} + """, + condition.asExpr) + + + def makeLambda(using Quotes)(argName: String, + argType: quotes.reflect.TypeRepr, + oldArgSymbol: quotes.reflect.Symbol, + body: quotes.reflect.Term): quotes.reflect.Term = + import quotes.reflect._ + val widenReturnType = TransformUtil.veryWiden(body.tpe) + val mt = MethodType(List(argName))(_ => List(argType.widen), _ => widenReturnType) + Lambda(Symbol.spliceOwner, mt, (owner,args) => + substIdent(body,oldArgSymbol, args.head.asInstanceOf[Term], owner).changeOwner(owner)) + + + def substIdent(using Quotes)(term: quotes.reflect.Term, + fromSym: quotes.reflect.Symbol, + toTerm: quotes.reflect.Term, + owner: quotes.reflect.Symbol): quotes.reflect.Term = + import quotes.reflect._ + val argTransformer = new TreeMap() { + override def transformTerm(tree: Term)(owner: Symbol):Term = + tree match + case Ident(name) if tree.symbol == fromSym => toTerm + case _ => super.transformTerm(tree)(owner) + } + argTransformer.transformTerm(term)(owner) + + + def reportError(message: String, posExpr: Expr[?])(using Quotes): Nothing = + import quotes.reflect._ + report.error(message, posExpr) + throw new RuntimeException(s"Error in macro: $message") + + + diff --git a/shared/src/main/scala/gopher/Time.scala b/shared/src/main/scala/gopher/Time.scala new file mode 100644 index 00000000..ec4f1309 --- /dev/null +++ b/shared/src/main/scala/gopher/Time.scala @@ -0,0 +1,176 @@ +package gopher + +import cps._ +import gopher.impl._ + +import scala.concurrent._ +import scala.concurrent.duration._ +import java.util.concurrent.TimeUnit + +import scala.language.experimental.macros +import scala.util.Try +import scala.util.Failure +import scala.util.Success +import java.util.concurrent.atomic.AtomicBoolean +import java.util.TimerTask + + +/** + * Time API, simular to one in golang standard library. + * @see gopherApi#time + */ +abstract class Time[F[_]](gopherAPI: Gopher[F]) { + + /** + * type for using in `select` paterns. + * @see [gopher.Select] + **/ + type after = FiniteDuration + + /** + * return channel, then after `duration` ellapses, send signal to this channel. + **/ + def after(duration: FiniteDuration): ReadChannel[F,FiniteDuration] = + { + val ch = gopherAPI.makeOnceChannel[FiniteDuration]() + schedule( () => { + val now = FiniteDuration(System.currentTimeMillis, TimeUnit.MILLISECONDS) + ch.awrite(now) + }, + duration + ) + ch + } + + /** + * return future which will be filled after time will ellapse. + **/ + def asleep(duration: FiniteDuration): F[FiniteDuration] = + { + var fun: Try[FiniteDuration] => Unit = _ => () + val retval = gopherAPI.asyncMonad.adoptCallbackStyle[FiniteDuration](listener => fun = listener) + schedule(() => { + val now = FiniteDuration(System.currentTimeMillis, TimeUnit.MILLISECONDS) + fun(Success(now)) + }, + duration) + retval + } + + /** + * synonim for `await(asleep(duration))`. Should be used inside async block. + **/ + transparent inline def sleep(duration: FiniteDuration)(using CpsMonadContext[F]): FiniteDuration = + given CpsSchedulingMonad[F] = gopherAPI.asyncMonad + await(asleep(duration)) + + /** + * create ticker. When somebody read this ticker, than one receive duration + * messages. When nobody reading - messages are expired. + * @param duration + * @return + */ + def tick(duration: FiniteDuration): ReadChannel[F,FiniteDuration] = + { + newTicker(duration).channel + } + + /** + * ticker which hold channel with expirable tick messages and iterface to stop one. + **/ + class Ticker(duration: FiniteDuration) { + + val channel = gopherAPI.makeChannel[FiniteDuration](0).withExpiration(duration, false) + + private val scheduled = schedule(tick, duration) + private val stopped = AtomicBoolean(false) + + def stop(): Unit = { + scheduled.cancel() + stopped.set(true) + } + + private def tick():Unit = { + if (!stopped.get()) then + channel.addWriter(SimpleWriter(now(),{ + case Success(_) => // ok, somebody readed + case Failure(ex) => + ex match + case ex: ChannelClosedException => + scheduled.cancel() + stopped.lazySet(true) + case ex: TimeoutException => // + case other => // impossible, + gopherAPI.logImpossible(other) + })) + schedule(tick, duration) + } + + } + + /** + * create ticker with given `duration` between ticks. + *@see [gopher.Time.Ticker] + **/ + def newTicker(duration: FiniteDuration): Ticker = + { + new Ticker(duration) + } + + + def now(): FiniteDuration = + FiniteDuration(System.currentTimeMillis(),TimeUnit.MILLISECONDS) + + + /** + * Low level interface for scheduler + */ + def schedule(fun: () => Unit, delay: FiniteDuration): Time.Scheduled + + +} + +object Time: + + /** + * Used in selector shugar for specyfying tineout. + *``` + * select{ + * ...... + * case t: Time.after if t > expr => doSomething + * } + *``` + * is a sugar for to selectGroup.{..}.setTimeout(expr, t=>doSomething) + *@see Select + **/ + type after = FiniteDuration + + + /** + * return channl on which event will be delivered after `duration` + **/ + def after[F[_]](duration: FiniteDuration)(using Gopher[F]): ReadChannel[F,FiniteDuration] = + summon[Gopher[F]].time.after(duration) + + + def asleep[F[_]](duration: FiniteDuration)(using Gopher[F]): F[FiniteDuration] = + summon[Gopher[F]].time.asleep(duration) + + transparent inline def sleep[F[_]](duration: FiniteDuration)(using Gopher[F], CpsMonadContext[F]): FiniteDuration = + summon[Gopher[F]].time.sleep(duration) + + + /** + * Task, which can be cancelled. + **/ + trait Scheduled { + + def cancel(): Boolean + + def onDone( listener: Try[Boolean]=>Unit ): Unit + + } + + + + diff --git a/shared/src/main/scala/gopher/WriteChannel.scala b/shared/src/main/scala/gopher/WriteChannel.scala new file mode 100644 index 00000000..a98c0960 --- /dev/null +++ b/shared/src/main/scala/gopher/WriteChannel.scala @@ -0,0 +1,64 @@ +package gopher + +import cps._ +import gopher.impl._ + +import scala.annotation.targetName +import scala.concurrent.duration.FiniteDuration +import scala.util.Try + + +trait WriteChannel[F[_], A]: + + type write = A + + def asyncMonad: CpsAsyncMonad[F] + + def awrite(a:A):F[Unit] = + asyncMonad.adoptCallbackStyle(f => + addWriter(SimpleWriter(a, f)) + ) + + //object write: + // inline def apply(a:A): Unit = await(awrite(a))(using asyncMonad) + // inline def unapply(a:A): Some[A] = ??? + + transparent inline def write(inline a:A)(using CpsMonadContext[F]): Unit = await(awrite(a)) + + @targetName("write1") + transparent inline def <~ (inline a:A)(using CpsMonadContext[F]): Unit = await(awrite(a)) + + @targetName("write2") + transparent inline def ! (inline a:A)(using CpsMonadContext[F]): Unit = await(awrite(a)) + + + //def Write(x:A):WritePattern = new WritePattern(x) + + //class WritePattern(x:A): + // inline def unapply(y:Any): Option[A] = + // Some(x) + + //TODO: make protected[gopher] + def addWriter(writer: Writer[A]): Unit + + def awriteAll(collection: IterableOnce[A]): F[Unit] = + inline given CpsAsyncMonad[F] = asyncMonad + async[F]{ + val it = collection.iterator + while(it.hasNext) { + val v = it.next() + write(v) + } + } + + transparent inline def writeAll(inline collection: IterableOnce[A])(using mc: CpsMonadContext[F]): Unit = + await(awriteAll(collection)) + + + def withWriteExpiration(ttl: FiniteDuration, throwTimeouts: Boolean)(using gopherApi: Gopher[F]): WriteChannelWithExpiration[F,A] = + new WriteChannelWithExpiration(this, ttl, throwTimeouts, gopherApi) + + + + + diff --git a/shared/src/main/scala/gopher/WriteChannelWithExpiration.scala b/shared/src/main/scala/gopher/WriteChannelWithExpiration.scala new file mode 100644 index 00000000..423ca35d --- /dev/null +++ b/shared/src/main/scala/gopher/WriteChannelWithExpiration.scala @@ -0,0 +1,45 @@ +package gopher + +import cps._ +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util._ +import gopher.impl._ + + +/** + * Channel, where messages can be exprited. + **/ +class WriteChannelWithExpiration[F[_],A](internal: WriteChannel[F,A], ttl: FiniteDuration, throwTimeouts: Boolean, gopherApi: Gopher[F]) extends WriteChannel[F,A]: + + + override def awrite(a:A):F[Unit] = + val expireTime = System.currentTimeMillis() + ttl.toMillis + asyncMonad.adoptCallbackStyle(f => + internal.addWriter(makeExpirableWriter(a, f, expireTime)) + ) + + def addWriter(writer: Writer[A]): Unit = + val expireTime = System.currentTimeMillis() + ttl.toMillis + internal.addWriter(wrapExpirable(writer,expireTime)) + + def asyncMonad: CpsAsyncMonad[F] = + internal.asyncMonad + + override def withWriteExpiration(ttl: FiniteDuration, throwTimeouts: Boolean)(using gopherApi: Gopher[F]): WriteChannelWithExpiration[F,A] = + new WriteChannelWithExpiration(internal, ttl, throwTimeouts, gopherApi) + + private def wrapExpirable(nested: Writer[A], expireTimeMillis: Long) = + if (throwTimeouts) then + NestedWriterWithExpireTimeThrowing(nested, expireTimeMillis, gopherApi) + else + NesteWriterWithExpireTime(nested, expireTimeMillis) + + private def makeExpirableWriter(a:A, f: Try[Unit]=>Unit, expireTimeMillis: Long): Writer[A] = + if (throwTimeouts) + NestedWriterWithExpireTimeThrowing(SimpleWriter(a,f), expireTimeMillis, gopherApi) + else + SimpleWriterWithExpireTime(a,f,expireTimeMillis) + + + diff --git a/shared/src/main/scala/gopher/impl/AppendReadChannel.scala b/shared/src/main/scala/gopher/impl/AppendReadChannel.scala new file mode 100644 index 00000000..e58cb812 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/AppendReadChannel.scala @@ -0,0 +1,72 @@ +package gopher.impl + +import gopher._ + +import scala.util._ +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicReference + + + +/** + * Input, which reed from the first channel, and after first channel is closed - from second + * + * can be created with 'append' operator. + * + * {{{ + * val x = read(x|y) + * }}} + */ +case class AppendReadChannel[F[_],A](x: ReadChannel[F,A], y: ReadChannel[F,A]) extends ReadChannel[F,A]: + + + override def gopherApi: Gopher[F] = x.gopherApi + + val xClosed: AtomicBoolean = new AtomicBoolean(false) + + + class InterceptReader(nested: Reader[A]) extends Reader[A] { + + val inUsage = AtomicBoolean(false) + + def canExpire: Boolean = nested.canExpire + + def isExpired: Boolean = nested.isExpired + + def capture():Expirable.Capture[Try[A]=>Unit] = + nested.capture().map{ readFun => + { + case r@Success(a) => if (inUsage.get()) then + nested.markUsed() + readFun(r) + case r@Failure(ex) => + if (ex.isInstanceOf[ChannelClosedException]) then + xClosed.set(true) + nested.markFree() + y.addReader(nested) + else + if (inUsage.get()) then + nested.markUsed() + readFun(r) + } + } + + def markUsed(): Unit = + inUsage.set(true) + + def markFree(): Unit = + nested.markFree() + + } + + def addReader(reader: Reader[A]): Unit = + if (xClosed.get()) { + y.addReader(reader) + } else { + x.addReader(new InterceptReader(reader)) + } + + + def addDoneReader(reader: Reader[Unit]): Unit = + y.addDoneReader(reader) + diff --git a/shared/src/main/scala/gopher/impl/ChFlatMappedChannel.scala b/shared/src/main/scala/gopher/impl/ChFlatMappedChannel.scala new file mode 100644 index 00000000..9d2170f5 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/ChFlatMappedChannel.scala @@ -0,0 +1,17 @@ +package gopher.impl + +import gopher._ + +class ChFlatMappedChannel[F[_],W,RA,RB](internal: Channel[F,W,RA], f: RA=>ReadChannel[F,RB]) extends ChFlatMappedReadChannel[F,RA,RB](internal, f) + with Channel[F,W,RB]: + + override def addWriter(writer: Writer[W]): Unit = + internal.addWriter(writer) + + override def close(): Unit = + internal.close() + + override def isClosed: Boolean = + internal.isClosed + + diff --git a/shared/src/main/scala/gopher/impl/ChFlatMappedReadChannel.scala b/shared/src/main/scala/gopher/impl/ChFlatMappedReadChannel.scala new file mode 100644 index 00000000..ba7fe7a4 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/ChFlatMappedReadChannel.scala @@ -0,0 +1,47 @@ +package gopher.impl + +import cps._ +import gopher._ +import scala.util._ + +class ChFlatMappedReadChannel[F[_], A, B](prev: ReadChannel[F,A], f: A=>ReadChannel[F,B]) extends ReadChannel[F,B] { + + def addReader(reader: Reader[B]): Unit = + bChannel.addReader(reader) + + + def addDoneReader(reader: Reader[Unit]): Unit = { + bChannel.addDoneReader(reader) + } + + def gopherApi:Gopher[F] = prev.gopherApi + + val bChannel = gopherApi.makeChannel[B]() + + def run(): F[Unit] = + given CpsSchedulingMonad[F] = gopherApi.asyncMonad + async[F]{ + while{ + prev.optRead() match + case Some(a) => + val internal = f(a) + while{ + internal.optRead() match + case Some(b) => + bChannel.write(b) + true + case None => + false + } do () + true + case None => + false + } do () + bChannel.close() + } + + gopherApi.spawnAndLogFail(run()) + + + +} \ No newline at end of file diff --git a/shared/src/main/scala/gopher/impl/ChFlatMappedTryReadChannel.scala b/shared/src/main/scala/gopher/impl/ChFlatMappedTryReadChannel.scala new file mode 100644 index 00000000..d39f56ed --- /dev/null +++ b/shared/src/main/scala/gopher/impl/ChFlatMappedTryReadChannel.scala @@ -0,0 +1,51 @@ +package gopher.impl + +import cps._ +import gopher._ +import scala.util._ +import scala.util.control._ + +class ChFlatMappedTryReadChannel[F[_], A, B](prev: ReadChannel[F,Try[A]], f: Try[A]=>ReadChannel[F,Try[B]]) extends ReadChannel[F,Try[B]] { + + def addReader(reader: Reader[Try[B]]): Unit = + bChannel.addReader(reader) + + + def addDoneReader(reader: Reader[Unit]): Unit = { + bChannel.addDoneReader(reader) + } + + def gopherApi:Gopher[F] = prev.gopherApi + + val bChannel = gopherApi.makeChannel[Try[B]]() + + def run(): F[Unit] = { + given CpsSchedulingMonad[F] = gopherApi.asyncMonad + async[F]{ + while{ + prev.optRead() match + case None => false + case Some(v) => + val internal: ReadChannel[F,Try[B]] = + try + f(v) + catch + case NonFatal(ex) => + ReadChannel.fromValues[F,Try[B]](Failure(ex))(using gopherApi) + while{ + internal.optRead() match + case None => false + case Some(v) => + bChannel.write(v) + true + } do () + true + } do () + bChannel.close() + } + } + + gopherApi.spawnAndLogFail(run()) + + +} diff --git a/shared/src/main/scala/gopher/impl/DuppedInput.scala b/shared/src/main/scala/gopher/impl/DuppedInput.scala new file mode 100644 index 00000000..04e1632c --- /dev/null +++ b/shared/src/main/scala/gopher/impl/DuppedInput.scala @@ -0,0 +1,41 @@ +package gopher + +import cps._ +import scala.annotation._ +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util._ +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.atomic.AtomicInteger + +import java.util.logging.{Level => LogLevel} + + + +class DuppedInput[F[_],A](origin:ReadChannel[F,A], bufSize: Int=1)(using api:Gopher[F]) +{ + + def pair = (sink1, sink2) + + val sink1 = makeChannel[A](bufSize,false) + val sink2 = makeChannel[A](bufSize,false) + + given CpsSchedulingMonad[F] = api.asyncMonad + + val runner = async{ + while + origin.optRead() match + case Some(a) => + sink1.write(a) + sink2.write(a) + true + case None => + false + do () + sink1.close() + sink2.close() + } + + api.spawnAndLogFail(runner) + +} diff --git a/shared/src/main/scala/gopher/impl/Expirable.scala b/shared/src/main/scala/gopher/impl/Expirable.scala new file mode 100644 index 00000000..0a5a3d08 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/Expirable.scala @@ -0,0 +1,60 @@ +package gopher.impl + +import cps._ + +/** +* Object, which can be expired +* (usually - reader or writer in SelectGroup) +* Usage protocol is next: +* capture +* if A inside is used, call markUsed and use A +* if A inside is unused for some reason -- call markFree +**/ +trait Expirable[A]: + + /** + * called when reader/writer can become no more available for some reason + */ + def canExpire: Boolean + + /** + * if this object is expired and should be deleted from queue + * (for example: when reader is belong to select group and some other action in this select group was performed) + **/ + def isExpired: Boolean + + /** + * capture object, and after this we can or use one (markUsed will be called) or abandon (markFree) + **/ + def capture(): Expirable.Capture[A] + + /** + * Called when we submitt to task executor readFunction and now is safe to make exprire all other readers/writers in the + * same select group + **/ + def markUsed(): Unit + + /** + * Called when we can't use captured function (i.e. get function but ). + **/ + def markFree(): Unit + + + +object Expirable: + + enum Capture[+A]: + case Ready(value: A) + case WaitChangeComplete + case Expired + + def map[B](f: A=>B): Capture[B] = + this match + case Ready(a) => Ready(f(a)) + case WaitChangeComplete => WaitChangeComplete + case Expired => Expired + + def foreach(f: A=>Unit): Unit = + this match + case Ready(a) => f(a) + case _ => \ No newline at end of file diff --git a/shared/src/main/scala/gopher/impl/FilteredChannel.scala b/shared/src/main/scala/gopher/impl/FilteredChannel.scala new file mode 100644 index 00000000..dca8b27b --- /dev/null +++ b/shared/src/main/scala/gopher/impl/FilteredChannel.scala @@ -0,0 +1,30 @@ +package gopher.impl + +import gopher._ + +class FilteredChannel[F[_],W,R](internal: Channel[F,W,R], p: R => Boolean) extends FilteredReadChannel[F,R](internal, p) + with Channel[F,W,R]: + + override def addWriter(writer: Writer[W]): Unit = + internal.addWriter(writer) + + override def close(): Unit = + internal.close() + + override def isClosed: Boolean = + internal.isClosed + + +class FilteredAsyncChannel[F[_],W,R](internal: Channel[F,W,R], p: R => F[Boolean]) extends FilteredAsyncReadChannel[F,R](internal, p) + with Channel[F,W,R]: + + override def addWriter(writer: Writer[W]): Unit = + internal.addWriter(writer) + + override def close(): Unit = + internal.close() + + override def isClosed: Boolean = + internal.isClosed + + \ No newline at end of file diff --git a/shared/src/main/scala/gopher/impl/FilteredReadChannel.scala b/shared/src/main/scala/gopher/impl/FilteredReadChannel.scala new file mode 100644 index 00000000..916debaa --- /dev/null +++ b/shared/src/main/scala/gopher/impl/FilteredReadChannel.scala @@ -0,0 +1,108 @@ +package gopher.impl + +import gopher._ + +import scala.util._ +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.atomic.AtomicBoolean + +class FilteredReadChannel[F[_],A](internal: ReadChannel[F,A], p: A=>Boolean) extends ReadChannel[F,A] { + + + class FilteredReader(nested: Reader[A]) extends Reader[A] { + + val markedUsed = new AtomicBoolean(false) + + def wrappedFun(fun: Try[A]=>Unit): (Try[A] => Unit) = { + case Success(a) => + if p(a) then + if (markedUsed.get()) { + nested.markUsed() + } + fun(Success(a)) + else + nested.markFree() + internal.addReader(this) + case Failure(ex) => + fun(Failure(ex)) + } + + override def capture(): Expirable.Capture[Try[A]=>Unit] = + nested.capture().map{ fun => + wrappedFun(fun) + } + + override def canExpire: Boolean = nested.canExpire + + override def isExpired: Boolean = nested.isExpired + + override def markUsed(): Unit = + markedUsed.lazySet(true) + + override def markFree(): Unit = nested.markFree() + + } + + def addReader(reader: Reader[A]): Unit = + internal.addReader(FilteredReader(reader)) + + def addDoneReader(reader: Reader[Unit]): Unit = internal.addDoneReader(reader) + + def gopherApi:Gopher[F] = internal.gopherApi + +} + + +class FilteredAsyncReadChannel[F[_],A](internal: ReadChannel[F,A], p: A=>F[Boolean]) extends ReadChannel[F,A] { + + + class FilteredReader(nested: Reader[A]) extends Reader[A] { + + val markedUsed = new AtomicBoolean(false) + + def wrappedFun(fun: (Try[A] => Unit) ): (Try[A] => Unit) = { + case Success(a) => + gopherApi.spawnAndLogFail( + gopherApi.asyncMonad.mapTry(p(a)){ + case Success(v) => + if (v) { + if (markedUsed.get()) { + nested.markUsed() + } + fun(Success(a)) + } else { + nested.markFree() + internal.addReader(this) + } + case Failure(ex) => + fun(Failure(ex)) + } + ) + case Failure(ex) => + fun(Failure(ex)) + } + + override def capture(): Expirable.Capture[Try[A]=>Unit] = + nested.capture().map{ fun => + wrappedFun(fun) + } + + override def canExpire: Boolean = nested.canExpire + + override def isExpired: Boolean = nested.isExpired + + override def markUsed(): Unit = markedUsed.lazySet(true) + + override def markFree(): Unit = nested.markFree() + + } + + def addReader(reader: Reader[A]): Unit = + internal.addReader(FilteredReader(reader)) + + def addDoneReader(reader: Reader[Unit]): Unit = internal.addDoneReader(reader) + + def gopherApi:Gopher[F] = internal.gopherApi + +} + diff --git a/shared/src/main/scala/gopher/impl/MappedChannel.scala b/shared/src/main/scala/gopher/impl/MappedChannel.scala new file mode 100644 index 00000000..ad1fac12 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/MappedChannel.scala @@ -0,0 +1,28 @@ +package gopher.impl + +import gopher._ + +class MappedChannel[F[_],W,RA,RB](internal: Channel[F,W,RA], f: RA=>RB) extends MappedReadChannel[F,RA,RB](internal, f) + with Channel[F,W,RB]: + + override def addWriter(writer: Writer[W]): Unit = + internal.addWriter(writer) + + override def close(): Unit = + internal.close() + + override def isClosed: Boolean = + internal.isClosed + + +class MappedAsyncChannel[F[_],W,RA,RB](internal: Channel[F,W,RA], f: RA=>F[RB]) extends MappedAsyncReadChannel[F,RA,RB](internal, f) + with Channel[F,W,RB]: + + override def addWriter(writer: Writer[W]): Unit = + internal.addWriter(writer) + + override def close(): Unit = + internal.close() + + override def isClosed: Boolean = + internal.isClosed diff --git a/shared/src/main/scala/gopher/impl/MappedReadChannel.scala b/shared/src/main/scala/gopher/impl/MappedReadChannel.scala new file mode 100644 index 00000000..8fe111c4 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/MappedReadChannel.scala @@ -0,0 +1,94 @@ +package gopher.impl + +import gopher._ +import scala.util._ +import scala.util.control.NonFatal + +class MappedReadChannel[F[_],A, B](internal: ReadChannel[F,A], f: A=> B) extends ReadChannel[F,B] { + + + class MReader(nested: Reader[B]) extends Reader[A] { + + def wrappedFun(fun: (Try[B] => Unit) ): (Try[A] => Unit) = { + case Success(a) => + try + val b = f(a) + fun(Success(b)) + catch + case NonFatal(ex) => + fun(Failure(ex)) + case Failure(ex) => + fun(Failure(ex)) + } + + //TODO: think, are we want to pass error to the next level ? + override def capture(): Expirable.Capture[Try[A]=>Unit] = + nested.capture().map{ fun => + wrappedFun(fun) + } + + override def canExpire: Boolean = nested.canExpire + + override def isExpired: Boolean = nested.isExpired + + override def markUsed(): Unit = nested.markUsed() + + override def markFree(): Unit = nested.markFree() + + } + + def addReader(reader: Reader[B]): Unit = + internal.addReader(MReader(reader)) + + def addDoneReader(reader: Reader[Unit]): Unit = internal.addDoneReader(reader) + + def gopherApi:Gopher[F] = internal.gopherApi + +} + +class MappedAsyncReadChannel[F[_],A, B](internal: ReadChannel[F,A], f: A=> F[B]) extends ReadChannel[F,B] { + + def addDoneReader(reader: Reader[Unit]): Unit = internal.addDoneReader(reader) + + class MReader(nested: Reader[B]) extends Reader[A] { + + def wrappedFun(fun: (Try[B] => Unit) ): (Try[A] => Unit) = { + case Success(a) => + gopherApi.spawnAndLogFail( + try + asyncMonad.mapTry(f(a))(fun) + catch + case NonFatal(ex) => + fun(Failure(ex)) + asyncMonad.pure(()) + ) + case Failure(ex) => + fun(Failure(ex)) + } + + //TODO: think, are we want to pass error to the next level ? + override def capture(): Expirable.Capture[Try[A]=>Unit] = + nested.capture().map{ fun => + wrappedFun(fun) + } + + override def canExpire: Boolean = nested.canExpire + + override def isExpired: Boolean = nested.isExpired + + override def markUsed(): Unit = nested.markUsed() + + override def markFree(): Unit = nested.markFree() + + } + + def addReader(reader: Reader[B]): Unit = + internal.addReader(MReader(reader)) + + def gopherApi:Gopher[F] = internal.gopherApi + +} + + + + diff --git a/shared/src/main/scala/gopher/impl/OrReadChannel.scala b/shared/src/main/scala/gopher/impl/OrReadChannel.scala new file mode 100644 index 00000000..3d6367c6 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/OrReadChannel.scala @@ -0,0 +1,157 @@ +package gopher.impl + +import gopher._ + +import scala.util._ +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicReference + + + +/** + * Input, which combine two other inputs. + * + * can be created with '|' operator. + * + * {{{ + * val x = read(x|y) + * }}} + */ +case class OrReadChannel[F[_],A](x: ReadChannel[F,A], y: ReadChannel[F,A]) extends ReadChannel[F,A]: + + + val xClosed: AtomicBoolean = new AtomicBoolean(false) + val yClosed: AtomicBoolean = new AtomicBoolean(false) + + abstract class CommonBase[B](nested: Reader[B]) { + val inUse = new AtomicReference[ReadChannel[F,A]|Null](null) + val used = new AtomicBoolean(false) + + def intercept(readFun:Try[B]=>Unit): Try[B] => Unit + + /** + * Can be called only insed wrapper fun, + * set current inUse be closed, if n + * precondition: inUse.get !== null + * return: true, if bith x and y are closed + **/ + protected def setClosed(): Boolean = { + if (inUse.get() eq x) then + if (!xClosed.get()) then + xClosed.set(true) + return yClosed.get() + else + if !yClosed.get() then + yClosed.set(true) + return xClosed.get() + } + + protected def passToNested(v: Try[B], readFun:Try[B]=>Unit) = { + if (used.get()) then + nested.markUsed() + readFun(v) + } + + protected def passIfClosed(v: Try[B], readFun: Try[B]=>Unit): Unit = { + if (setClosed()) { + passToNested(v, readFun) + } else { + inUse.set(null) + } + } + + def capture(fromChannel: ReadChannel[F,A]): Expirable.Capture[Try[B]=>Unit] = + if inUse.compareAndSet(null,fromChannel) then + nested.capture() match + case Expirable.Capture.Ready(readFun) => Expirable.Capture.Ready(intercept(readFun)) + case Expirable.Capture.WaitChangeComplete => + inUse.set(null) + Expirable.Capture.WaitChangeComplete + case Expirable.Capture.Expired => inUse.set(null) + Expirable.Capture.Expired + else + Expirable.Capture.WaitChangeComplete + + def markFree(fromChannel: ReadChannel[F,A]): Unit = + if(inUse.get() eq fromChannel) then + nested.markFree() + inUse.set(null) + + def markUsed(fromChannel: ReadChannel[F,A]): Unit = + if (inUse.get() eq fromChannel) then + used.set(true) + + def isExpired(fromChannel: ReadChannel[F,A]): Boolean = + nested.isExpired + + def canExpire: Boolean = + nested.canExpire + + } + + class CommonReader(nested: Reader[A]) extends CommonBase[A](nested) { + + def intercept(readFun:Try[A]=>Unit): Try[A] => Unit = { + case r@Success(a) => + passToNested(r, readFun) + case f@Failure(ex) => + if (ex.isInstanceOf[ChannelClosedException]) { + passIfClosed(f, readFun) + } else { + passToNested(f,readFun) + } + } + + } + + class WrappedReader[B](common: CommonBase[B], owner: ReadChannel[F,A]) extends Reader[B] { + + def capture(): Expirable.Capture[Try[B]=>Unit] = + common.capture(owner) + + def canExpire: Boolean = common.canExpire + + def isExpired: Boolean = common.isExpired(owner) + + def markFree(): Unit = common.markFree(owner) + + def markUsed(): Unit = common.markUsed(owner) + + } + + def addReader(reader: Reader[A]): Unit = + val common = new CommonReader(reader) + addCommonReader(common,(c,ch)=>ch.addReader(WrappedReader(common,ch))) + + + class DoneCommonReader(nested: Reader[Unit]) extends CommonBase[Unit](nested): + + def intercept(nestedFun: Try[Unit]=>Unit): Try[Unit] => Unit = { + case r@Success(x) => + passIfClosed(r, nestedFun) + case r@Failure(ex) => + passToNested(r, nestedFun) + } + + + def addDoneReader(reader: Reader[Unit]): Unit = + addCommonReader(new DoneCommonReader(reader), (c,ch) => ch.addDoneReader(WrappedReader(c,ch))) + + // | is left-associative, so (x|y|z|v).gopherApi better be v.api, + def gopherApi: Gopher[F] = y.gopherApi + + override def toString() = s"(${x}|${y})" + + + def addCommonReader[C](common:C, addReaderFun: (C, ReadChannel[F,A]) => Unit): Unit = + var readerAdded = false + if !xClosed.get() then + readerAdded = true + addReaderFun(common,x) + if !yClosed.get() then + readerAdded = true + addReaderFun(common,y) + // if all closed, than we should add to any, to receive ChannelClosedException + if !readerAdded then + addReaderFun(common,y) + diff --git a/shared/src/main/scala/gopher/impl/Reader.scala b/shared/src/main/scala/gopher/impl/Reader.scala new file mode 100644 index 00000000..e0baf284 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/Reader.scala @@ -0,0 +1,6 @@ +package gopher.impl + +import scala.util.Try + +trait Reader[A] extends Expirable[Try[A]=>Unit] + diff --git a/shared/src/main/scala/gopher/impl/Writer.scala b/shared/src/main/scala/gopher/impl/Writer.scala new file mode 100644 index 00000000..e8d4b8b1 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/Writer.scala @@ -0,0 +1,20 @@ +package gopher.impl + +import scala.util.Try + +trait Writer[A] extends Expirable[(A,Try[Unit]=>Unit)] + + +class SimpleWriter[A](a:A, f: Try[Unit]=>Unit) extends Writer[A]: + + def canExpire: Boolean = false + + def isExpired: Boolean = false + + def capture(): Expirable.Capture[(A,Try[Unit]=>Unit)] = Expirable.Capture.Ready((a,f)) + + def markUsed(): Unit = () + + def markFree(): Unit = () + + diff --git a/shared/src/main/scala/gopher/impl/WriterWithExpireTime.scala b/shared/src/main/scala/gopher/impl/WriterWithExpireTime.scala new file mode 100644 index 00000000..c01341e1 --- /dev/null +++ b/shared/src/main/scala/gopher/impl/WriterWithExpireTime.scala @@ -0,0 +1,87 @@ +package gopher.impl + +import gopher._ +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util._ +import java.util.concurrent.TimeUnit + + +class SimpleWriterWithExpireTime[A](a:A, f: Try[Unit] => Unit, expireTimeMillis: Long) extends Writer[A]: + + def canExpire: Boolean = true + + def isExpired: Boolean = + //TODO: way to mock current time + System.currentTimeMillis >= expireTimeMillis + + def capture(): Expirable.Capture[(A,Try[Unit]=>Unit)] = Expirable.Capture.Ready((a,f)) + + def markUsed(): Unit = () + + def markFree(): Unit = () + + // TODO: pass time source +class NesteWriterWithExpireTime[A](nested: Writer[A], expireTimeMillis: Long) extends Writer[A]: + + def canExpire: Boolean = true + + def isExpired: Boolean = + (System.currentTimeMillis >= expireTimeMillis) || nested.isExpired + + def capture(): Expirable.Capture[(A,Try[Unit]=>Unit)] = + if (isExpired) Expirable.Capture.Expired else nested.capture() + + def markUsed(): Unit = nested.markUsed() + + def markFree(): Unit = nested.markFree() + +class NestedWriterWithExpireTimeThrowing[F[_],A](nested: Writer[A], expireTimeMillis: Long, gopherApi: Gopher[F]) extends Writer[A]: + + val scheduledThrow = gopherApi.time.schedule( + () => checkExpire(), + FiniteDuration(expireTimeMillis - gopherApi.time.now().toMillis, TimeUnit.MILLISECONDS) + ) + + def canExpire: Boolean = true + + def isExpired: Boolean = + (gopherApi.time.now().toMillis >= expireTimeMillis) || nested.isExpired + + def capture(): Expirable.Capture[(A,Try[Unit]=>Unit)] = + if (gopherApi.time.now().toMillis > expireTimeMillis) then + Expirable.Capture.Expired + else + nested.capture() + + def markUsed(): Unit = + scheduledThrow.cancel() + nested.markUsed() + + def markFree(): Unit = + nested.markFree() + checkExpire() + + def checkExpire(): Unit = + if (gopherApi.time.now().toMillis > expireTimeMillis) then + if (!nested.isExpired) then + nested.capture() match + case Expirable.Capture.Ready((a,f)) => + nested.markUsed() + try + f(Failure(new TimeoutException())) + catch + case ex: Throwable => + ex.printStackTrace() + case Expirable.Capture.WaitChangeComplete => + gopherApi.time.schedule( + () => checkExpire(), + FiniteDuration(100, TimeUnit.MILLISECONDS) ) + case Expirable.Capture.Expired => + // none, will be colled after markFree is needed. + + + + + + \ No newline at end of file diff --git a/shared/src/main/scala/gopher/monads/ReadChannelCpsMonad.scala b/shared/src/main/scala/gopher/monads/ReadChannelCpsMonad.scala new file mode 100644 index 00000000..24f17f01 --- /dev/null +++ b/shared/src/main/scala/gopher/monads/ReadChannelCpsMonad.scala @@ -0,0 +1,30 @@ +package gopher.monads + +import gopher._ +import cps._ + +import gopher.impl._ + + + +given ReadChannelCpsMonad[F[_]](using Gopher[F]): CpsPureMonadInstanceContext[[A] =>> ReadChannel[F,A]] with + + + def pure[T](t:T): ReadChannel[F,T] = + ReadChannel.fromValues[F,T](t) + + def map[A,B](fa: ReadChannel[F,A])(f: A=>B): ReadChannel[F,B] = + fa.map(f) + + def flatMap[A,B](fa: ReadChannel[F,A])(f: A=>ReadChannel[F,B]): ReadChannel[F,B] = + new ChFlatMappedReadChannel[F,A,B](fa,f) + + +given futureToReadChannel[F[_]](using Gopher[F]): CpsMonadConversion[F, [A] =>> ReadChannel[F,A]] with + + def apply[T](ft: F[T]): ReadChannel[F,T] = futureInput(ft) + + + + + diff --git a/shared/src/main/scala/gopher/monads/ReadTryChannelCpsMonad.scala b/shared/src/main/scala/gopher/monads/ReadTryChannelCpsMonad.scala new file mode 100644 index 00000000..02fafe69 --- /dev/null +++ b/shared/src/main/scala/gopher/monads/ReadTryChannelCpsMonad.scala @@ -0,0 +1,67 @@ +package gopher.monads + +import scala.util._ +import gopher._ +import cps._ + +import gopher.impl._ + + +given ReadTryChannelCpsMonad[F[_]](using Gopher[F]): CpsAsyncMonad[[A] =>> ReadChannel[F,Try[A]]] with CpsMonadInstanceContext[[A] =>> ReadChannel[F,Try[A]]] with + + type FW[T] = [A] =>> ReadChannel[F,Try[A]] + + def pure[T](t:T): ReadChannel[F,Try[T]] = + ReadChannel.fromValues[F,Try[T]](Success(t)) + + def map[A,B](fa: ReadChannel[F,Try[A]])(f: A=>B): ReadChannel[F,Try[B]] = + fa.map{ + case Success(a) => + try{ + Success(f(a)) + } catch { + case ex: Throwable => Failure(ex) + } + case Failure(ex) => Failure(ex) + } + + def flatMap[A,B](fa: ReadChannel[F,Try[A]])(f: A=>ReadChannel[F,Try[B]]): ReadChannel[F,Try[B]] = + new ChFlatMappedTryReadChannel(fa,{ + case Success(a) => f(a) + case Failure(ex) => ReadChannel.fromValues[F,Try[B]](Failure(ex )) + }) + + def flatMapTry[A,B](fa: ReadChannel[F,Try[A]])(f: Try[A] => ReadChannel[F,Try[B]]): ReadChannel[F,Try[B]] = + new ChFlatMappedTryReadChannel(fa,f) + + def error[A](e: Throwable): ReadChannel[F,Try[A]] = + val r = makeChannel[Try[A]]() + given fm: CpsSchedulingMonad[F] = summon[Gopher[F]].asyncMonad + summon[Gopher[F]].spawnAndLogFail{ async[F] { + r.write(Failure(e)) + r.close() + } } + r + + + def adoptCallbackStyle[A](source: (Try[A]=>Unit) => Unit): ReadChannel[F,Try[A]] = { + val r = makeOnceChannel[Try[A]]() + given fm: CpsSchedulingMonad[F] = summon[Gopher[F]].asyncMonad + val fv = fm.adoptCallbackStyle(source) + summon[Gopher[F]].spawnAndLogFail{ + fm.flatMapTry( fv ){ tryV => + r.awrite(tryV) + } + } + r + } + + + +given readChannelToTryReadChannel[F[_]](using Gopher[F]): + CpsMonadConversion[ [A]=>>ReadChannel[F,A], [A]=>>ReadChannel[F,Try[A]]] with + + def apply[T](ft: ReadChannel[F,T]): ReadChannel[F,Try[T]] = ft.map(x => Success(x)) + + + diff --git a/src/test/scala/example/FibonaccyFoldSuite.scala b/shared/src/test/scala/examples/FibonaccyFoldSuite.scala similarity index 56% rename from src/test/scala/example/FibonaccyFoldSuite.scala rename to shared/src/test/scala/examples/FibonaccyFoldSuite.scala index f093ec00..8138dd5f 100644 --- a/src/test/scala/example/FibonaccyFoldSuite.scala +++ b/shared/src/test/scala/examples/FibonaccyFoldSuite.scala @@ -1,13 +1,15 @@ package example import gopher._ -import gopher.channels._ -import scala.language._ +import cps._ +import munit._ + import scala.concurrent._ -import scala.concurrent.duration._ +import scala.language._ + +import cps.monads.FutureAsyncMonad +import scala.concurrent.ExecutionContext.Implicits.global -import org.scalatest._ -import gopher.tags._ /* * code from go tutorial: http://tour.golang.org/#66 but with fold instead foreach @@ -16,17 +18,18 @@ import gopher.tags._ object FibonaccyFold { - import scala.concurrent.ExecutionContext.Implicits.global - import CommonTestObjects.gopherApi._ + given Gopher[Future] = SharedGopherAPI.apply[Future]() + - def fibonacci(c: Output[Long], quit: Input[Int]): Future[(Long,Long)] = - select.afold((0L,1L)) { case ((x,y),s) => - s match { - case x: c.write => (y, x+y) + def fibonacci(c: WriteChannel[Future,Long], quit: ReadChannel[Future,Int]): Future[(Long,Long)] = async { + select.fold((0L,1L)) { case (x,y) => + select{ + case wx: c.write if wx == x => (y, x+y) case q: quit.read => - select.exit((x,y)) + SelectFold.Done((x,y)) } } + } def run(n:Int, acceptor: Long => Unit ): Future[(Long,Long)] = { @@ -43,10 +46,10 @@ object FibonaccyFold { class FibonaccyFoldSuite extends FunSuite { + test("fibonaccy must be processed up to 50") { - val last = Await.result( FibonaccyFold.run(50, _ => () ), 10 seconds )._2 - assert(last != 0) + FibonaccyFold.run(50, _ => () ).map(last => assert(last._2 != 0)) } } diff --git a/shared/src/test/scala/examples/Sieve.scala b/shared/src/test/scala/examples/Sieve.scala new file mode 100644 index 00000000..77f1669b --- /dev/null +++ b/shared/src/test/scala/examples/Sieve.scala @@ -0,0 +1,45 @@ +package examples + +import scala.concurrent.{Channel=>_,_} +import cps._ +import cps.monads.FutureAsyncMonad +import gopher._ + +import scala.concurrent.ExecutionContext.Implicits.global + + + +object Sieve1 { + + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + def run(in: Channel[Future,Int, Int], out: Channel[Future,Int,Int]): Future[Unit] = async[Future] { + var middle: ReadChannel[Future,Int] = in + while (!in.isClosed) { + val x = middle.read() + out.write(x) + middle = middle.filter(_ % x != 0) + } + } + + + + def runRec(in: Channel[Future,Int, Int], out: Channel[Future,Int, Int]): Future[Unit] = async[Future] { + val x = in.read() + out.write(x) + await(runRec(in.filter(_ % x != 0), out)) + } + + + def runFold(in:Channel[Future,Int, Int], out: Channel[Future,Int, Int]) = async { // ??? + + select.fold(in){ s => + val x = s.read() + out.write(x) + s.filter(_ % x != 0) + } + } + + +} diff --git a/src/test/scala/example/SieveSuite.scala b/shared/src/test/scala/examples/SieveSuite.scala similarity index 56% rename from src/test/scala/example/SieveSuite.scala rename to shared/src/test/scala/examples/SieveSuite.scala index cee8eb02..a8423bda 100644 --- a/src/test/scala/example/SieveSuite.scala +++ b/shared/src/test/scala/examples/SieveSuite.scala @@ -1,17 +1,15 @@ package example import gopher._ -import gopher.channels._ -import CommonTestObjects.gopherApi._ -import scala.concurrent.{Channel=>_,_} -import scala.concurrent.duration._ -import scala.concurrent.ExecutionContext.Implicits.global +import cps._ +import munit._ +import scala.concurrent.{Channel => _, _} import scala.language.postfixOps -import org.scalatest._ +import cps.monads.FutureAsyncMonad +import scala.concurrent.ExecutionContext.Implicits.global -import gopher.tags._ /** * this is direct translation from appropriative go example. @@ -19,7 +17,11 @@ import gopher.tags._ object Sieve { - def generate(n:Int, quit:Promise[Boolean]):Channel[Int] = + + given gopherApi: Gopher[Future] = SharedGopherAPI.apply[Future]() + + + def generate(n:Int, quit:Promise[Boolean]):Channel[Future,Int,Int] = { val channel = makeChannel[Int]() channel.awriteAll(2 to n) foreach (_ => quit success true) @@ -28,14 +30,14 @@ object Sieve // direct translation from go - def filter0(in:Channel[Int]):Input[Int] = + def filter0(in:Channel[Future,Int,Int]):ReadChannel[Future,Int] = { val filtered = makeChannel[Int]() - var proxy: Input[Int] = in; - go { + var proxy: ReadChannel[Future, Int] = in; + async { // since proxy is var, we can't select from one in forever loop. while(true) { - val prime = proxy.read + val prime = proxy.read() proxy = proxy.filter(_ % prime != 0) filtered.write(prime) } @@ -43,24 +45,14 @@ object Sieve filtered } - // use effected input - def filter(in:Channel[Int]):Input[Int] = - { - val filtered = makeChannel[Int]() - val sieve = makeEffectedInput(in) - sieve.aforeach { prime => - sieve apply (_.filter(_ % prime != 0)) - filtered <~ prime - } - filtered - } - def filter1(in:Channel[Int]):Input[Int] = + def filter1(in:Channel[Future,Int,Int]):ReadChannel[Future,Int] = { + //implicit val printCode = cps.macros.flags.PrintCode val q = makeChannel[Int]() val filtered = makeChannel[Int]() - select.afold(in){ (ch, s) => - s match { + select.afold(in){ ch => + select{ case prime: ch.read => filtered.write(prime) ch.filter(_ % prime != 0) @@ -69,36 +61,43 @@ object Sieve filtered } - def primes(n:Int, quit: Promise[Boolean]):Input[Int] = - filter(generate(n,quit)) + def primes(n:Int, quit: Promise[Boolean]):ReadChannel[Future,Int] = + filter1(generate(n,quit)) } class SieveSuite extends FunSuite { + import Sieve.gopherApi + + test("last prime before 1000") { val quit = Promise[Boolean]() - val quitInput = futureInput(quit.future) + val quitInput = quit.future.asChannel val pin = Sieve.primes(1000,quit) var lastPrime=0; - val future = select.forever { + async { + select.loop{ case p: pin.read => if (false) { System.err.print(p) System.err.print(" ") } lastPrime=p + true case q: quitInput.read => //System.err.println() - CurrentFlowTermination.exit(()); - } - Await.ready(future, 10 seconds) - assert( lastPrime == 997) + false + } + assert( lastPrime == 997 ) + } } + + } diff --git a/shared/src/test/scala/gopher/channels/AsyncChannelTests.scala b/shared/src/test/scala/gopher/channels/AsyncChannelTests.scala new file mode 100644 index 00000000..9f75f425 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/AsyncChannelTests.scala @@ -0,0 +1,37 @@ +package gopher.channels + +import cps._ +import cps.monads.FutureAsyncMonad +import scala.concurrent._ +import scala.concurrent.ExecutionContext.Implicits.global +import gopher._ + +import munit._ + +class AsyncChannelTests extends FunSuite { + + + val gopherApi = SharedGopherAPI[Future]() + val MAX_N = 100 + + test("async base: channel write, channel read") { + + val channel = gopherApi.makeChannel[Int](10) + channel.awriteAll(1 to MAX_N) + + val consumer = async{ + var sum = 0 + while{val a = channel.read() + sum += a + a < MAX_N + } do () + sum + } + + consumer.map{ s => + assert(s == (1 to MAX_N).sum) + } + + } + +} \ No newline at end of file diff --git a/shared/src/test/scala/gopher/channels/ChannelCloseSuite.scala b/shared/src/test/scala/gopher/channels/ChannelCloseSuite.scala new file mode 100644 index 00000000..eb7395c9 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/ChannelCloseSuite.scala @@ -0,0 +1,148 @@ +package gopher.channels + +import gopher._ +import scala.concurrent.Future +import scala.util.Try +import scala.util.Success +import scala.util.Failure + +import cps._ +import cps.monads.FutureAsyncMonad + +import munit._ + +class ChannelCloseSuite extends FunSuite +{ + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("writing after close is impossile") { + + val channel = makeChannel[Int](100) + + channel.close() + + val producer = channel.awriteAll(1 to 1000) + + producer.transform{ + case Success(u) => assert("" == "expected ChannelClosedException") + Failure(RuntimeException("fail")) + case Failure(ex) => assert(ex.isInstanceOf[ChannelClosedException]) + Success("ok") + } + + } + + def checkThrowWhenWriteClose(name: String, channel: gopher.Channel[Future,Int,Int])(implicit loc:munit.Location)= { + test(s"in async we must see throw for $name") { + channel.close() + @volatile var catched = false + @volatile var notCatched = false + val p = async { + channel.write(1) + notCatched=true + } + p.recover{ + case ex: ChannelClosedException => catched = true + }.map(_ => assert(!notCatched && catched)) + } + } + + checkThrowWhenWriteClose("buffered", makeChannel[Int](100)) + checkThrowWhenWriteClose("unbuffered", makeChannel[Int]()) + checkThrowWhenWriteClose("promise", makeOnceChannel[Int]()) + + + test("after close we can read but not more, than was send (buffered)") { + val channel = makeChannel[Int](100) + @volatile var q1, q2 = 0 + val p = async { + channel <~ 1 + channel.close() + q1 = channel.read() + } + val afterClose = p flatMap { _ => async{ + val a = channel.read() + q2 = 2 + } } + + afterClose.transform{ + case Failure(ex) => + assert(ex.isInstanceOf[ChannelClosedException]) + Success(()) + case Success(v) => + assert("Ok" == "ChannelClosedException") + Success(v) + } map (_ => { + assert(q1 == 1 && q2 != 2 ) + }) + + } + + test("after close we can read but not more, than was send (unbuffered)") { + val channel = makeChannel[Int]() + @volatile var q1, q2, q3 = 0 + val p = async { + channel <~ 1 + channel.close() + q1 = channel.read() // will be unblocked after close and tbrwo exception + } + val consumer = async{ + q3 = channel.read() // will be run + q2 = 2 + } + + val afterClose = p.flatMap(_ => consumer) + + p.transform{ + case Failure(ex) => + assert(ex.isInstanceOf[ChannelClosedException]) + Success(()) + case Success(v) => + assert("Ok" == "ChannelClosedException") + Success(v) + } map (_ => { + assert(q1 == 0) + assert(q2 == 2) + assert(q3 == 1) + }) + + } + + def checkCloseSignal(name: String, channel: gopher.Channel[Future,Int,Int])(implicit loc:munit.Location)= { + test(s"close signal must be send ($name)") { + channel.close() + @volatile var q = 0 + val fp = async { + val done = channel.done.read() + q = 1 + } + fp map (_ => assert(q == 1)) + } + } + + checkCloseSignal("buffered", makeChannel[Int](100)) + checkCloseSignal("unbuffered", makeChannel[Int](100)) + + + test("awrite to close must produce ChannelClosedFailure in Future") { + val channel = makeChannel[Int](100) + channel.close + var x = 1 + val f0 = async { + try { + channel.write(1) + assert("" == "Here should be unreachange") + }catch{ + case ex: ChannelClosedException => + // all ok + } + } + } + + +} + + diff --git a/shared/src/test/scala/gopher/channels/ChannelFilterSuite.scala b/shared/src/test/scala/gopher/channels/ChannelFilterSuite.scala new file mode 100644 index 00000000..53eb3480 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/ChannelFilterSuite.scala @@ -0,0 +1,37 @@ +package gopher.channels + +import gopher._ +import scala.concurrent.Future +import scala.util.Try +import scala.util.Success +import scala.util.Failure + +import cps._ +import cps.monads.FutureAsyncMonad + +import munit._ + + +class ChannelFilterSuite extends FunSuite: + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("odd filter should leave only odd numbers in filtered channel") { + + val ch = makeChannel[Int]() + + val filtered = ch.filter(_ % 2 == 0) + + ch.awriteAll(1 to 100) + async { + var i = 0 + while(i < 50) { + val x = filtered.read() + assert( x % 2 == 0) + i=i+1 + } + } + + } \ No newline at end of file diff --git a/shared/src/test/scala/gopher/channels/DuppedChannelsSuite.scala b/shared/src/test/scala/gopher/channels/DuppedChannelsSuite.scala new file mode 100644 index 00000000..9656e724 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/DuppedChannelsSuite.scala @@ -0,0 +1,76 @@ +package gopher.channels + +import cps._ +import cps.monads.FutureAsyncMonad +import gopher._ +import munit._ + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.language.postfixOps +import scala.util._ + +class DuppedChannelsSuite extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("duped input must show two") { + val ch = makeChannel[String]() + val dupped = ch.dup() + val r0 = ch.awrite("1") + val r1 = dupped._1.aread() + val r2 = dupped._2.aread() + val r = for(v1 <- r1; v2 <- r2) yield (v1,v2) + + r map {x => + assert(x == ("1","1")) + } + + } + + + test("output is blocked by both inputs") { + //import CommonTestObjects.FutureWithTimeout + val ch = makeChannel[Int]() + val aw=ch.awriteAll(1 to 100) + val (in1, in2) = ch.dup() + val at1 = in1.atake(100) + // TODO:make withTimeout as extension ? + //val awt = aw.withTimeout(1 second) + async { + assert(!aw.isCompleted && !at1.isCompleted) + val res = await(in2.atake(100)) + await(aw) + } + } + + + test("on closing of main stream dupped outputs also closed.") { + val ch = makeChannel[Int](1) + val (in1, in2) = ch.dup() + val f1 = async{ + ch.write(1) + ch.close() + } + for{ fx <- f1 + x <- in1.aread() + r <- in1.aread().transformWith { + case Success(u) => + Future failed new IllegalStateException("Must be closed") + case Failure(u) => + Future successful (assert(x == 1)) + } + } yield { + r + } + + } + + + + +} + + diff --git a/shared/src/test/scala/gopher/channels/ExpireChannelSuite.scala b/shared/src/test/scala/gopher/channels/ExpireChannelSuite.scala new file mode 100644 index 00000000..edd71c9e --- /dev/null +++ b/shared/src/test/scala/gopher/channels/ExpireChannelSuite.scala @@ -0,0 +1,90 @@ +package gopher.channels + + +import cps._ +import gopher._ +import cps.monads.FutureAsyncMonad + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util._ +import scala.language.postfixOps + +import munit._ + +class ExpireChannelSuite extends FunSuite { + + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("if message not readed, it expires") { + val ch = makeChannel[Int](10).withExpiration(300 milliseconds, false) + val emptyRead = for {_ <- ch.awrite(1) + _ <- Time.asleep(400 milliseconds) + r <- ch.aread() + } yield r + + async { + + try { + await( emptyRead.withTimeout(300 milliseconds) ) + assert("Here" == "should not be accessible") + }catch{ + case ex: TimeoutException => + assert(true) + } + + } + + } + + + test("before expire we can read message") { + val ch = makeChannel[Int](10).withExpiration(300 milliseconds, false) + for { + _ <- ch.awrite(1) + _ <- Time.asleep(10 milliseconds) + r <- ch.aread() + } yield assert(r==1) + } + + + + test("unbuffered expriew channel: return from write when value expired") { + val ch = makeChannel[Int](0).withExpiration(300 milliseconds, true) + ch.awrite(1).withTimeout(2 seconds).transform{ + case Failure(ex) => + assert(ex.isInstanceOf[TimeoutException]) + Success(()) + case Success(u) => + assert(""=="TimeoutException expected") + Failure(new RuntimeException()) + } + } + + + + test("expire must be an order") { + val ch = makeChannel[Int](10).withExpiration(300 milliseconds, false) + val fr1 = ch.aread() + val fr2 = ch.aread() + for { + _ <- ch.awriteAll(List(1,2)) + _ <- Time.asleep(10 milliseconds) + fr3 = ch.aread() + r3 <- fr3.withTimeout(1 second).transform{ + case Failure(ex: TimeoutException) => + Success(()) + case other => + assert(""==s"TimeoutException expected, we have $other") + Failure(new RuntimeException()) + } + w4 <- ch.awrite(4) + r31 <- fr3 + } yield assert(r31 == 4) + } + + +} diff --git a/shared/src/test/scala/gopher/channels/FibonnachySimpleTests.scala b/shared/src/test/scala/gopher/channels/FibonnachySimpleTests.scala new file mode 100644 index 00000000..e7e5083d --- /dev/null +++ b/shared/src/test/scala/gopher/channels/FibonnachySimpleTests.scala @@ -0,0 +1,81 @@ +package gopher.channels + +import scala.concurrent._ +import cps._ +import cps.monads.FutureAsyncMonad +import gopher._ + +import munit._ + +class FibbonachySimpleTest extends FunSuite { + + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + def fibonaccy0(c: WriteChannel[Future,Long], quit: ReadChannel[Future,Int]): Future[Unit] = + async[Future]{ + var (x,y) = (0L,1L) + var done = false + while(!done) { + // TODO: add select group to given + select.group[Unit].onWrite(c, x){ x0 => + x=y + y=x0+y + } + .onRead(quit){ v => + done = true + } + .run() + } + } + + def fibonaccy1(c: WriteChannel[Future,Long], quit: ReadChannel[Future,Int]): Future[Unit] = + async[Future]{ + var (x,y) = (0L,1L) + var done = false + while(!done) { + select{ + case z: c.write if (z == x) => + x = z + y = z + y + case q: quit.read => + done = true + } + } + } + + + def run(starter: (WriteChannel[Future,Long], ReadChannel[Future,Int])=> Future[Unit], + acceptor: Long => Unit, n:Int): Future[Unit] = { + val fib = makeChannel[Long]() + val q = makeChannel[Int]() + val start = starter(fib,q) + async{ + for( i <- 1 to n) { + val x = fib.read() + acceptor(x) + } + q <~ 1 + } + } + + test("simple fibonnachy fun (no macroses)") { + @volatile var last: Long = 0L + async{ + await(run(fibonaccy0, last = _, 40)) + assert(last != 0) + } + } + + test("fibonnachy fun (select macros)") { + @volatile var last: Long = 0L + async{ + await(run(fibonaccy0, last = _, 40)) + assert(last != 0) + } + } + + +} diff --git a/shared/src/test/scala/gopher/channels/FoldSelectSuite.scala b/shared/src/test/scala/gopher/channels/FoldSelectSuite.scala new file mode 100644 index 00000000..bea1a2a4 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/FoldSelectSuite.scala @@ -0,0 +1,94 @@ +package gopher.channels + +import cps._ +import gopher._ +import munit._ + +import scala.concurrent.{Channel=>_,_} +import scala.language.postfixOps + +import cps.monads.FutureAsyncMonad + +class FoldSelectSuite extends FunSuite +{ + + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("fold-over-selector with changed read") { + //implicit val printCode = cps.macroFlags.PrintCode + + val in = makeChannel[Int]() + val out = makeChannel[Int]() + var r0 = IndexedSeq[Int]() + + // dotty bug, + val generator = async { + select.fold(in){ ch => + select{ + case p: ch.read => + r0 = r0 :+ p + out.write(p) + ch.filter{ _ % p != 0 } + } + } + } + + //generator.failed.foreach{ _.printStackTrace() } + val writer = async { + for(i <- 2 to Int.MaxValue) { + in.write(i) + } + } + + val read = async { + for(i <- 1 to 100) yield { + val x = out.read() + x + } + } + + read map (r => assert(r(18) == 67 && r.last == 541) ) + + } + + + + test("fold-over-selector with swap read") { + + val in1 = makeChannel[Int]() + val in2 = makeChannel[Int]() + val quit = makeChannel[Boolean]() + + val generator = async { + select.fold((in1,in2,0)){ case (in1,in2,n) => + select { + case x:in1.read => + if (x >= 100) { + SelectFold.Done((in1, in2, n)) + } else { + (in2, in1, n + x) + } + case x:in2.read => + (in2,in1,n-x) + } + } + } + + in1.awriteAll(1 to 101) + + //val r = Await.result(generator, 1 second) + + // 0 + 1 - 2 + 3 - 4 + 5 - 6 ... +99 - 100 + 101 + // - 1 2 -2 3 - 3 +50 - 50 + generator.map(r => assert(r._3 == -50)) + + } + + + +} + + diff --git a/shared/src/test/scala/gopher/channels/ForeverTerminationSuite.scala b/shared/src/test/scala/gopher/channels/ForeverTerminationSuite.scala new file mode 100644 index 00000000..5a9f64a5 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/ForeverTerminationSuite.scala @@ -0,0 +1,41 @@ +package gopher.channels + + +import cps._ +import gopher._ +import munit._ + +import scala.concurrent.{Channel=>_,_} +import scala.language.postfixOps + +import cps.monads.FutureAsyncMonad + + +class ForeverSuite extends FunSuite +{ + + import ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("forevr not propagate signals after exit") { + implicit val printCode = cps.macros.flags.PrintCode + val channel = makeChannel[Int](100) + var sum = 0 + val f0: Future[Unit] = select.aforever{ + case x: channel.read => { + sum += x + throw ChannelClosedException() + } + } + for {r2 <- channel.awrite(1) + r3 <- channel.awrite(2) + r0 <- f0 } yield assert(sum == 1) + + } + + + +} + + diff --git a/shared/src/test/scala/gopher/channels/FutureWithTimeout.scala b/shared/src/test/scala/gopher/channels/FutureWithTimeout.scala new file mode 100644 index 00000000..31de76bf --- /dev/null +++ b/shared/src/test/scala/gopher/channels/FutureWithTimeout.scala @@ -0,0 +1,22 @@ +package gopher.channels + +import gopher._ +import scala.concurrent.Future +import scala.concurrent.Promise +import scala.concurrent.ExecutionContext +import scala.concurrent.TimeoutException +import scala.concurrent.duration.FiniteDuration + + +extension [T](f: Future[T]) { + + def withTimeout(d: FiniteDuration)(using gopherApi: Gopher[Future], ec: ExecutionContext): Future[T] = + val p = Promise[T] + f.onComplete(p.tryComplete) + gopherApi.time.schedule({ () => + p.tryFailure(new TimeoutException()) + }, d) + p.future + +} + diff --git a/shared/src/test/scala/gopher/channels/IOTimeoutsSuite.scala b/shared/src/test/scala/gopher/channels/IOTimeoutsSuite.scala new file mode 100644 index 00000000..3e6e6944 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/IOTimeoutsSuite.scala @@ -0,0 +1,143 @@ +package gopher.channels + +import gopher._ +import cps._ +import gopher._ +import munit._ + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.language.postfixOps + +import cps.monads.FutureAsyncMonad + + +class IOTimeoutsSuite extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + test("messsaged from timeouts must be appear during reading attempt from empty channel") { + val ch = makeChannel[String]() + //val (chReady, chTimeout) = ch.withInputTimeouts(300 milliseconds) + async { + val f = select.once { + case x: ch.read => 1 + case t: Time.after if t == (300 milliseconds) => 2 + } + assert(f==2) + } + } + + + + test("when we have value, we have no timeouts") { + val ch = makeChannel[String]() + ch.awrite("qqq") + //val (chReady, chTimeout) = ch.withInputTimeouts(300 milliseconds) + async { + val x = select.once { + case x: ch.read => 1 + case t: Time.after if t == (300 milliseconds) => 2 + } + assert (x==1) + } + } + + + test("messsaged from timeouts must be appear during attempt to write to filled unbuffered channel") { + val ch = makeChannel[Int]() + //val (chReady, chTimeout) = ch.withOutputTimeouts(150 milliseconds) + async { + @volatile var count = 1 + select.loop{ + case x: ch.write if (x==count) => + count += 1 // will newer called, since we have no reader + true + case t: Time.after if t == (150 milliseconds) => + false + } + assert(count==1) + } + } + + + test("messsaged from timeouts must be appear during attempt to write to filled buffered channel") { + val ch = makeChannel[Int](1) + //val (chReady, chTimeout) = ch.withOutputTimeouts(150 milliseconds) + async{ + @volatile var count = 1 + select.loop { + case x: ch.write if (x==count) => + count += 1 + true + case t: Time.after if t == (150 milliseconds) => + false + } + assert(count==2) + } + } + + + test("when we have where to write -- no timeouts") { + val ch = makeChannel[Int](1) + //val (chReady, chTimeout) = ch.withOutputTimeouts(300 milliseconds) + async { + val x = select.once { + case x: ch.write if (x==1) => 1 + case t: Time.after if t == (150 milliseconds) => 2 + } + assert(x == 1) + } + } + + + + test("during 'normal' processing timeouts are absent") { + + //implicit val printCode = cps.macroFlags.PrintCode + + val ch = makeChannel[Int]() + //val (chInputReady, chInputTimeout) = ch.withInputTimeouts(300 milliseconds) + //val (chOutputReady, chOutputTimeout) = ch.withOutputTimeouts(300 milliseconds) + @volatile var count = 0 + @volatile var count1 = 0 + @volatile var wasInputTimeout = false + @volatile var wasOutputTimeout = false + val maxCount = 100 + + val fOut = async { + select.loop { + case x: ch.write if (x==count) => + if (count == maxCount) { + false + } else { + count += 1 + true + } + case t: Time.after if t == (300 milliseconds) => + wasOutputTimeout = true + true + } + } + val fIn = async { + select.loop { + case x: ch.read => + count1 = x + (x != maxCount) + case t: Time.after if t == (150 milliseconds) => + wasInputTimeout = true + true + } + } + for{ + _ <- fOut + _ <- fIn + _ = assert(count == maxCount) + _ = assert(count1 == maxCount) + } yield assert(!wasOutputTimeout && !wasInputTimeout) + } + + +} + diff --git a/shared/src/test/scala/gopher/channels/InputOpsSuite.scala b/shared/src/test/scala/gopher/channels/InputOpsSuite.scala new file mode 100644 index 00000000..72070919 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/InputOpsSuite.scala @@ -0,0 +1,368 @@ +package gopher.channels + +import cps._ +import gopher._ +import munit._ + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.language.postfixOps + +import cps.monads.FutureAsyncMonad + + +class InputOpsSuite extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + test("map operation for input") { + val ch = makeChannel[String]() + ch.awriteAll(List("AAA","123","1234","12345")) + val mappedCh = ch map (_.reverse) + mappedCh.atake(4) map { l => + assert(l(0) == "AAA" && + l(1) == "321" && + l(2) == "4321" && + l(3) == "54321") + } + } + + + test("filter operation for input") { + val ch = makeChannel[String]() + ch.awriteAll(List("qqq", "AAA","123","1234","12345")) + val filteredCh = ch filter (_.contains("A")) + filteredCh.aread() map { x => assert(x == "AAA") } + } + + + + test("zip operation for two simple inputs") { + val ch1 = makeChannel[String]() + ch1.awriteAll(List("qqq", "AAA","123","1234","12345")) + val ch2 = makeChannel[Int]() + ch2.awriteAll(List(1, 2, 3, 4, 5, 6)) + val zipped = ch1 zip ch2 + for{ r1 <- zipped.aread() + _ = assert( r1 == ("qqq",1) ) + r2 <- zipped.aread() + _ = assert( r2 == ("AAA",2) ) + r3 <- zipped.aread() + _ = assert( r3 == ("123",3) ) + r4 <- zipped.aread() + _ = assert( r4 == ("1234",4) ) + r5 <- zipped.aread() + l = assert( r5 == ("12345",5) ) + } yield l + } + + + test("zip operation from two finite channels") { + val ch1 = List(1,2).asReadChannel + val ch2 = List(1,2,3,4,5,6).asReadChannel + val zipped = ch1 zip ch2 + for{ + r1 <- zipped.aread() + a1 = assert(r1 == (1, 1)) + r2 <- zipped.aread() + a2 = assert( (r2 == (2,2)) ) + r3 <- async{ + try + zipped.read() + assert(""=="exception should be called before") + catch + case ex: Throwable => + assert(ex.isInstanceOf[ChannelClosedException]) + } + } yield r3 + } + + + test("take from zip") { + val ch1 = List(1,2,3,4,5).asReadChannel + val ch2 = List(1,2,3,4,5,6).asReadChannel + val zipped = ch1 zip ch2 + for {ar <- zipped.atake(5) + _ = assert(ar(0) == (1, 1)) + l = assert(ar(4) == (5, 5)) + } yield l + } + + + test("taking from iterator-input") { + val ch1 = List(1,2,3,4,5).asReadChannel + for( ar <- ch1.atake(5) ) yield assert(ar(4)==5) + } + + + test("zip with self will no dup channels, but generate (odd, even) pairs. It's a feature, not a bug") { + val ch = makeChannel[Int]() + val zipped = ch zip ch + ch.awriteAll(List(1,2,3,4,5,6,7,8)) + for{ r1 <- zipped.aread() + a1 = assert( Set((1,2),(2,1)) contains r1 ) + r2 <- zipped.aread() + a2 = assert( Set((3,4),(4,3)) contains r2 ) + r3 <- zipped.aread() + a3 = assert( Set((5,6),(6,5)) contains r3 ) + } yield a3 + } + + + test("reading from Q1|Q2") { + + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int]() + + val ar1 = (ch1 | ch2).aread() + ch1.awrite(1) + for{ + r1 <- ar1 + ar2 = (ch1 | ch2).aread() + _ = ch2.awrite(2) + r2 <- ar2 + } yield { + assert( r1 == 1 ) + assert( r2 == 2) + } + + } + + + test("simultanuos reading from Q1|Q2") { + + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int]() + + val ar1 = (ch1 | ch2).aread() + val ar2 = (ch1 | ch2).aread() + + ch1.awrite(1) + ch2.awrite(2) + + for {r1 <- ar1 + r2 <- ar2 + _ = if (r1 == 1) { + assert(r2 == 2) + } else { + assert(r2 == 1) + } + //r3 <- recoverToSucceededIf[TimeoutException] { + // timeouted( (ch1 | ch2).aread, 300 milliseconds) + //} + r3 <- async { + try { + await((ch1 | ch2).aread().withTimeout(300 milliseconds)) + } catch { + case ex: TimeoutException => + assert(true) + } + } + } yield r3 + + } + + + test("reflexive or Q|Q") { + val ch = makeChannel[Int]() + val aw1 = ch.awrite(1) + val ar1 = (ch | ch).aread() + for {r1 <- ar1 + _ = assert(r1 == 1) + ar2 = (ch | ch).aread() + //r2_1 <- recoverToSucceededIf[TimeoutException] { + // timeouted(ar2, 300 milliseconds) + //} + r2_1 <- async { + try { + ar2.withTimeout(300 milliseconds) + } catch { + case ex: TimeoutException => + assert(true) + } + } + _ = ch.awrite(3) + r2 <- ar2 + a = assert(r2 == 3) + } yield a + } + + + test("two items read from Q1|Q2") { + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int]() + val aw1 = ch1.awrite(1) + val aw2 = ch2.awrite(2) + val chOr = (ch1 | ch2) + val ar1 = chOr.aread() + val ar2 = chOr.aread() + for {r1 <- ar1 + r2 <- ar2 + } yield assert( ((r1,r2)==(1,2)) ||((r1,r2)==(2,1)) ) + } + + + test("atake read from Q1|Q2") { + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int]() + + val aw1 = ch1.awriteAll(1 to 2) + val aw2 = ch2.awriteAll(1 to 2) + val at = (ch1 | ch2).atake(4) + for( r <- at) yield assert(r.nonEmpty) + } + + + test("awrite/take ") { + val ch = makeChannel[Int]() + val aw = ch.awriteAll(1 to 100) + val at = ch.atake(100) + for (r <- at) yield assert(r.size == 100) + } + + + test("Input foreach on closed stream must do nothing ") { + val ch = makeChannel[Int]() + @volatile var flg = false + val f = async { for(s <- ch) { + flg = true + } } + ch.close() + f map (_ => assert(!flg)) + } + + + test("Input foreach on stream with 'N' elements inside must run N times ") { + //val w = new Waiter + val ch = makeChannel[Int]() + @volatile var count = 0 + val cf = async { for(s <- ch) { + count += 1 + } } + val ar = ch.awriteAll(1 to 10) map (_ -> ch.close) + val acf = for(c <- cf) yield assert(count == 10) + + ar.flatMap(_ => acf).withTimeout(10 seconds) + } + + + test("Input afold on stream with 'N' elements inside ") { + val ch = makeChannel[Int]() + val f = ch.afold(0)((s,e)=>s+1) + val ar = ch.awriteAll(1 to 10) + ar.onComplete{ case _ => ch.close() } + for(r <- f) yield assert(r==10) + } + + + test("forech with mapped closed stream") { + def one(i:Int):Future[Boolean] = { + val ch = makeChannel[Int]() + val mapped = ch map (_ * 2) + @volatile var count = 0 + val f = async{ for(s <- mapped) { + // error in compiler + assert((s % 2) == 0) + count += 1 + } } + val ar = ch.awriteAll(1 to 10) map (_ => ch.close) + for{ + r <- f + a <- ar + } yield count == 10 + } + Future.sequence(for(i <- 1 to 10) yield one(i)) map ( _.last ) + } + + + test("forech with filtered closed stream") { + val ch = makeChannel[Int]() + val filtered = ch filter (_ %2 == 0) + @volatile var count = 0 + val f = async { for(s <- filtered) { + count += 1 + } } + val ar = ch.awriteAll(1 to 10) map (_ => ch.close) + for{ a <- ar + r <- f + } yield assert(count==5) + } + + + test("append for finite stream") { + val ch1 = makeChannel[Int](10) + val ch2 = makeChannel[Int](10) + val appended = ch1 append ch2 + var sum = 0 + var prev = 0 + var monotonic = true + val f = async { for(s <- appended) { + // bug in compiler 2.11.7 + //w{assert(prev < s)} + //if (prev >= s) w{assert(false)} + //println(s"readed $s") + if (prev >= s) monotonic=false + prev = s + sum += s + } } + + val a1 = ch1.awriteAll(1 to 10) map { _ => ch1.close(); } + val a2 = ch2.awriteAll((1 to 10)map(_*100)) + for{ r1 <- a1 + r2 <- a2} yield assert(monotonic) + } + + + test("order of reading from unbuffered channel") { + val ch = makeChannel[Int]() + ch.awriteAll(List(10,12,34,43)) + + for{ + r1 <- ch.aread() + r2 <- ch.aread() + r3 <- ch.aread() + r4 <- ch.aread() + } yield assert((r1,r2,r3,r4) == (10,12,34,43) ) + + } + + + + test("append for empty stream") { + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int]() + val appended = ch1 append ch2 + val f = appended.atake(10).map(_.sum) + ch1.close() + val a2 = ch2.awriteAll(1 to 10) + for(r <- f) yield assert(r==55) + } + + + test("channel fold with async operation inside") { + val ch1 = makeChannel[Int](10) + val ch2 = makeChannel[Int](10) + val fs = async { + val sum = ch1.fold(0){ (s,n) => + val n1 = ch2.read() + //s+(n1+n2) -- stack overflow in 2.11.8 compiler. TODO: submit bug + s+(n+n1) + } + sum + } + async { + ch1.writeAll(1 to 10) + ch2.writeAll(1 to 10) + ch1.close() + } + async { + val r = await(fs) + assert(r == 110) + } + } + + +} + + diff --git a/shared/src/test/scala/gopher/channels/MacroSelectSuite.scala b/shared/src/test/scala/gopher/channels/MacroSelectSuite.scala new file mode 100644 index 00000000..82a40bac --- /dev/null +++ b/shared/src/test/scala/gopher/channels/MacroSelectSuite.scala @@ -0,0 +1,499 @@ +package gopher.channels + +import cps._ +import gopher._ +import munit._ + +import scala.concurrent.{Channel=>_,_} +import scala.concurrent.duration._ +import scala.util._ +import scala.language.postfixOps + +import cps.monads.FutureAsyncMonad + +class MacroSelectSuite extends FunSuite +{ + + import ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("select emulation with macroses") { + + val channel = makeChannel[Int](100) + + async[Future] { + var i = 1 + while(i <= 1000) { + channel <~ i + i+=1 + } + //TODO: implement for in goas preprocessor to async + // dotty bug: position not set + //for( i <- 1 to 1000) + // channel <~ i + } + + var sum = 0 + val consumer = async[Future] { + select.loop{ + case i: channel.read => + //System.err.println("received:"+i) + sum = sum + i + i < 1000 + } + sum + } + + for{ + _ <- consumer + xsum = (1 to 1000).sum + } yield assert(xsum == sum) + + } + + test("select operation with async-op insode ") { + + val channel = makeChannel[Int](100) + val middle = makeChannel[Int](100) + + async[Future] { + var i = 1 + while(i <= 1000) { + channel <~ i + i+=1 + } + //TODO: implement for in goas preprocessor to async + // dotty bug: position not set + //for( i <- 1 to 1000) + // channel <~ i + } + + var sum = 0 + val consumer1 = async[Future] { + select.loop{ + case i: channel.read => + //System.err.println("received:"+i) + middle.write(i) + i < 1000 + } + sum + } + + val consumer2 = async[Future] { + select.loop{ + case i: middle.read => + //System.err.println("received:"+i) + sum = sum + i + i < 1000 + } + sum + } + + for{ + _ <- consumer2 + xsum = (1 to 1000).sum + } yield assert(xsum == sum) + + } + + + + test("select with run-once") { + val channel1 = makeChannel[Int](100) + val channel2 = makeChannel[Int](100) + + val g = async[Future] { + var nWrites=0 + select{ + case x: channel1.write if (x==1) => { + nWrites = nWrites + 1 + } + case x: channel2.write if (x==1) => { + nWrites = nWrites + 1 + } + } + + var nReads=0 + select { + case x: channel1.read => { {}; nReads = nReads + 1 } + case x: channel2.read => { {}; nReads = nReads + 1 } + } + + (nWrites, nReads) + } + + g map { case(nWrites,nReads) => assert(nWrites==1 && nReads==1)} + + } + + + test("select from futureInput") { + async[Future] { + val channel = makeChannel[Int](100) + val future = Future successful 10 + val fu = futureInput(future) + var res = 0 + val r = select{ + case x: channel.read => + Console.println(s"readed from channel: ${x}") + true + case x: fu.read => + //Console.println(s"readed from future: ${x}") + res = x + false + // syntax for using channels/futures in cases without + // setting one in stable identifers. + //case x: Int if (x == future.read) => + // res = x + } + assert(res == 10) + } + } + + + /* + TODO: think, are we want to keep this syntax in 2.0.0 (?) + test("select syntax with read/writes in guard") { + import gopherApi._ + val channel1 = makeChannel[Int](100) + val channel2 = makeChannel[Int](100) + var res = 0 + val r = select.loop{ + case x: Int if (x==channel1.write(3)) => + Console.println(s"write to channel1: ${x} ") + true + case x: Int if (x==channel2.read) => + Console.println(s"readed from channel2: ${x}") + true + case x: Int if (x==(Future successful 10).read) => + res=x + false + } + r map (_ => assert(res==10)) + } + */ + + + + test("select syntax with @unchecked annotation") { + val channel1 = makeChannel[List[Int]](100) + val channel2 = makeChannel[List[Int]](100) + var res = 0 + channel1.awrite(List(1,2,3)) + async { + select.once{ + case x: channel1.read @ unchecked => + res=1 + case x: channel2.read @ unchecked => + res=2 + } + assert(res==1) + } + + } + + + test("tuple in caseDef as one symbol") { + async { + val ch = makeChannel[(Int,Int)](100) + var res = 0 + ch.awrite((1,1)) + val r = select.once{ + case xpair: ch.read @unchecked => + // fixed error in compiler: Can't find proxy + val (a,b)=xpair + res=a + } + assert(res == 1) + } + } + + + test("multiple readers for one write") { + val ch = makeChannel[Int](10) + var x1 = 0 + var x2 = 0 + var x3 = 0 + var x4 = 0 + var x5 = 0 + val f1 = async { + select.once{ + case x:ch.read => + x1=1 + } + } + val f2 = async { + select.once{ + case x:ch.read => + x2=1 + } + } + val f3 = async { + select.once{ + case x:ch.read => + x3=1 + } + } + val f4 = async { + select.once{ + case x:ch.read => + x4=1 + } + } + val f5 = async{ + select.once{ + case x:ch.read => + x5=1 + } + } + for {_ <- ch.awrite(1) + _ <- Future.firstCompletedOf(List(f1, f2, f3, f4, f5)) + _ = ch.close() + _ <- Future.sequence(List(f1, f2, f3, f4, f5)).recover{ + case _ :ChannelClosedException => () + } + } yield assert(x1+x2+x3+x4+x5==1) + } + + + test("fold over selector") { + val ch = makeChannel[Int](10) + val back = makeChannel[Int]() + val quit = Promise[Boolean]() + val quitChannel = futureInput(quit.future) + val r = async { + select.fold(0){ x => + select { + case a:ch.read => back <~ a + x+a + case q: quitChannel.read => + SelectFold.Done(x) + } + } + } + ch.awriteAll(1 to 10) + back.aforeach{ x => + if (x==10) { + quit success true + } + } + r map (sum => assert(sum==(1 to 10).sum)) + } + + + test("fold over selector with idle-1") { + val ch1 = makeChannel[Int](10) + val ch2 = makeChannel[Int](10) + ch1.awrite(1) + //implicit val printCode = cps.macroFlags.PrintCode + //implicit val debugLevel = cps.macroFlags.DebugLevel(20) + for { + _ <- Future.successful(()) + sf = select.afold((0, 0, 0)) { case (n1, n2, nIdle) => + select{ + case x: ch1.read => + val nn1 = n1 + 1 + if (nn1 > 100) { + SelectFold.Done((nn1, n2, nIdle)) + } else { + ch2.write(x) + (nn1, n2, nIdle) + } + case x: ch2.read => + ch1.write(x) + (n1, n2 + 1, nIdle) + case t : Time.after if (t == (50 milliseconds)) => + (n1, n2, nIdle + 1) + } + } + (n1, n2, ni) <- sf + _ = assert(n1 + n2 + ni > 100) + sf2 = select.afold((0, 0)) { case (n1, nIdle) => + select{ + case x: ch1.read => + (n1 + 1, nIdle) + case t: Time.after if t == (50 milliseconds) => + val nni = nIdle + 1 + if (nni > 3) { + SelectFold.Done((n1, nni)) + } else { + (n1, nni) + } + } + } + (n21, n2i) <- sf2 + } yield + assert(n2i>3) + } + + + test("map over selector") { + val ch1 = makeChannel[Int](10) + val ch2 = makeChannel[Int](10) + val quit = Promise[Boolean]() + val quitChannel = quit.future.asChannel + val out = select.mapAsync[Int]{ s => + val v = async{ + s.apply{ + case x:ch1.read => + x*2 + case Channel.Read(x:Int,ch) if ch == ch2 => + x*3 + case Channel.Read(q, ch) if ch == quitChannel => + throw ChannelClosedException() + } + } + v + } + ch1.awriteAll(1 to 10) + ch2.awriteAll(100 to 110) + val f: Future[Int] = out.afold(0){ (s,x) => s+x } + async { + Time.sleep(1 second) + quit success true + val x = await(f) + //println(s"x==$x") + assert(x > 3000) + } + } + + + + + test("input fold") { + val ch1 = makeChannel[Int]() + ch1.awriteAll(1 to 10) map { _ => ch1.close() } + async { + val x = ch1.fold(0){ case (s,x) => s+x } + assert(x==55) + } + } + + + test("map over selector") { + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int](1) + val f1 = ch1.awrite(1) + val f2 = ch2.awrite(2) + async { + val chs = for(s <- select) yield { + s.apply{ + case x:ch1.read => x*3 + case x:ch2.read => x*5 + } + } + val fs1 = chs.aread() + val fs2 = chs.aread() + val s1 = await(fs1) + val s2 = await(fs2) + assert(s1==3 || s1==10) + } + } + + + + test("one-time channel make") { + val ch = makeOnceChannel[Int]() + val f1 = ch.awrite(1) + val f2 = ch.awrite(2) + async { + val x = await(ch.aread()) + val x2 = Try(await(f2.failed)) + assert(x == 1) + assert(x2.get.isInstanceOf[ChannelClosedException]) + } + } + + + + test("check for done signal from one-time channel") { + val ch = makeOnceChannel[Int]() + val sf = select.afold((0)){ x => + select{ + case v: ch.read => + x + v + case v: ch.done.read => + SelectFold.Done(x) + } + } + val f1 = ch.awrite(1) + async { + await(f1) + val r = await(sf) + assert(r==1) + } + } + + + test("check for done signal from unbuffered channel") { + val ch = makeChannel[Int]() + val sf = select.afold((0)){ x => + select{ + case v: ch.read => x + v + case v: ch.done.read => SelectFold.Done(x) + } + } + val f1 = ch.awriteAll(1 to 5) map (_ =>ch.close) + async { + val r = await(sf) + assert(r==15) + } + } + + + test("check for done signal from buffered channel") { + val ch = makeChannel[Int](10) + val sf = select.afold((0)){ x => + select { + case v: ch.read => x + v + case c: ch.done.read => SelectFold.Done(x) + } + } + val f1 = async { + ch.writeAll(1 to 5) + // let give all buffers to processe + Time.sleep(200 millis) + ch.close() + } + async { + val r = await(sf) + assert(r == 15) + } + } + + + test("check for done signal from select map") { + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int]() + val q = makeChannel[Boolean]() + async{ + val chs: ReadChannel[Future,Int] = for(s <- select) yield { + s.select{ + case x: ch1.read => x*3 + case x: ch2.read => x*2 + case x: q.read => + throw ChannelClosedException() + } + } + val chs2 = select.afold(0){ n => + select{ + case x:chs.read => + n + x + case x:chs.done.read => + SelectFold.Done(n) + } + } + // note, that if we want call of quit after last write, + // ch1 and ch2 must be unbuffered. + val sendf = for{ _ <- ch1.awriteAll(1 to 10) + _ <- ch2.awriteAll(1 to 10) + _ <- q.awrite(true) } yield 1 + val r = await(chs2) + + assert( r == (1 to 10).map(_ * 5).sum ) + } + } + + +} diff --git a/shared/src/test/scala/gopher/channels/ReadChannelFactoryTest.scala b/shared/src/test/scala/gopher/channels/ReadChannelFactoryTest.scala new file mode 100644 index 00000000..527cda0b --- /dev/null +++ b/shared/src/test/scala/gopher/channels/ReadChannelFactoryTest.scala @@ -0,0 +1,68 @@ +package gopher.channels + +import gopher._ +import cps._ +import munit._ + +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.{Channel=>_,_} +import scala.concurrent.duration._ + +import cps.monads.FutureAsyncMonad + +class ReadChannelFactoryTest extends FunSuite { + + given Gopher[Future] = Gopher[Future]() + + + test("unfoldAsync produce stream simple") { + val ch = ReadChannel.unfoldAsync(0){ + (x: Int) => + if (x > 10) then + Future successful None + else + Future successful Some(x,x+1) + } + + ch.atake(20).map{ values => + assert(values(0) == 0) + assert(values(1) == 1) + assert(values(2) == 2) + assert(values.size == 11) + } + + } + + + test("unfoldAsync prodce stream with error") { + val ch = ReadChannel.unfoldAsync(0){ + (x: Int) => + if (x > 3) then + Future failed new RuntimeException("state is too big") + else + Future successful Some(x,x+1) + } + + async { + val r0 = ch.read() + assert(r0 == 0) + val r1 = ch.read() + assert(r1 == 1) + val r2 = ch.read() + assert(r2 == 2) + val r3 = ch.read() + assert(r3 == 3) + var wasTooBig = false + try { + val r4 = ch.read() + }catch{ + case e: RuntimeException => + wasTooBig = true + } + assert(wasTooBig) + } + + + } + +} diff --git a/src/test/scala/gopher/channels/ReadCoroutinesSuite.scala b/shared/src/test/scala/gopher/channels/ReadCoroutinesSuite.scala similarity index 77% rename from src/test/scala/gopher/channels/ReadCoroutinesSuite.scala rename to shared/src/test/scala/gopher/channels/ReadCoroutinesSuite.scala index aacbf8d5..824c9784 100644 --- a/src/test/scala/gopher/channels/ReadCoroutinesSuite.scala +++ b/shared/src/test/scala/gopher/channels/ReadCoroutinesSuite.scala @@ -1,13 +1,14 @@ package gopher.channels import gopher._ -import gopher.channels._ -import scala.concurrent._ -import scala.concurrent.duration._ - -import org.scalatest._ +import cps._ +import munit._ import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.{Channel=>_,_} +import scala.concurrent.duration._ + +import cps.monads.FutureAsyncMonad /* * Go analog: @@ -37,21 +38,22 @@ import scala.concurrent.ExecutionContext.Implicits.global * */ object ReadCoroutines { - - lazy val integers:InputOutput[Int] = - { - val y = gopherApi.makeChannel[Int]() + + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + lazy val integers:Channel[Future,Int,Int] = { + val y = makeChannel[Int]() @volatile var count = 0 - go { + async { while(true) { - y <~ count + y.write(count) count = count + 1; } } y } - def gopherApi = CommonTestObjects.gopherApi + } @@ -59,10 +61,11 @@ object ReadCoroutines { class ReadCoroutinesSuite extends FunSuite { import ReadCoroutines._ + import language.postfixOps test("get few numbers from generarator") { - val p = go { + val p = async { val x0 = (integers ?) assert(x0 == 0) val x1 = (integers ?) @@ -70,7 +73,6 @@ class ReadCoroutinesSuite extends FunSuite { val x2 = (integers ?) assert(x2 == 2) } - Await.ready(p, 10 seconds) } diff --git a/shared/src/test/scala/gopher/channels/SelectErrorSuite.scala b/shared/src/test/scala/gopher/channels/SelectErrorSuite.scala new file mode 100644 index 00000000..6fe887f1 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/SelectErrorSuite.scala @@ -0,0 +1,190 @@ +package gopher.channels + +import cps._ +import gopher._ +import munit._ + +import scala.language.postfixOps +import scala.concurrent._ +import scala.concurrent.duration._ + +import cps.monads.FutureAsyncMonad + + +class SelectErrorSuite extends FunSuite +{ + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("select error handling for foreach") { + val channel = makeChannel[Int](100) + + var svEx: Throwable = null + + + var nWrites = 0 + var nErrors = 0 + + //implicit val printCode = cps.macroFlags.PrintCode + //implicit val debugLevel = cps.macroFlags.DebugLevel(10) + + + val g = async{ + try { + + select.loop{ + case x: channel.write if x == nWrites => + nWrites = nWrites + 1 + if (nWrites == 50) then + throw new RuntimeException("Be-be-be") + (nWrites != 100) + case t: Time.after if t == (100 milliseconds) => + false + } + } catch { + case ex: RuntimeException => + svEx = ex + } + } + + + async { + val tf = channel.atake(50) + await(g) + assert(svEx.getMessage == "Be-be-be") + } + + + } + + + test("select error handling for once") { + val channel = makeChannel[Int](100) + + var svEx: Throwable = null + + + val g = async { + try { + select.once { + case x: channel.write if (x==1) => + throw new RuntimeException("Be-be-be") + 2 + //case ex: select.error => + //{ }; svEx = ex // macro-system errors: assignments accepts as default argument + // 3 + } + } catch { + case ex: RuntimeException => + svEx = ex + 3 + } + } + + async { + val r = await(g) + assert(svEx.getMessage == "Be-be-be") + assert(r == 3) + } + + } + + + test("select error handling for input") { + val channel = makeChannel[Int](100) + + var svEx: Throwable = null + + async { + + val out = select.map { s => + var wasError = false + s.apply{ + case x: channel.read => + try { + if (x==55) { + throw new RuntimeException("Be-be-be") + } + } catch { + case ex: RuntimeException => + wasError = true + svEx = ex + } + //case ex: select.error => + // {}; svEx = ex + // 56 + if (wasError) then + 56 + else + x + } + } + + channel.awriteAll(1 to 100) + + val g = out.atake(80) + + val r = await(g) + + assert(svEx.getMessage == "Be-be-be") + + assert(r.filter(_ == 56).size == 2) + + } + + } + + + test("select error handling for fold") { + val ch1 = makeChannel[Int]() + val ch2 = makeChannel[Int]() + val ch3 = makeChannel[Int]() + + var svEx: Throwable = null + + val g = + select.afold((ch1,ch2,0,List[Int]())) { case (x,y,z,l) => + try { + select{ + case z1: ch3.read => + if (z1==10) { + throw new RuntimeException("Be-be-be!") + } + (x,y,z1,z1::l) + case a:x.read => + if (z > 20) { + throw new RuntimeException("Be-be-be-1") + } + (y,x,z+a,z::l) + case b:y.read => + (y,x,z+100*b,z::l) + } + }catch{ + case ex: RuntimeException => + svEx = ex + if (z > 20) { + SelectFold.Done((x,y,z,z::l)) + } else { + (x,y,z,l) + } + } + } + + ch3.awriteAll(1 to 11).flatMap{ + x => ch2.awriteAll(1 to 5) + } + + async { + + val r =await(g) + + assert(svEx.getMessage=="Be-be-be-1") + + } + + } + + +} diff --git a/shared/src/test/scala/gopher/channels/SelectGroupTest.scala b/shared/src/test/scala/gopher/channels/SelectGroupTest.scala new file mode 100644 index 00000000..57a662d5 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/SelectGroupTest.scala @@ -0,0 +1,66 @@ +package gopher.channels + +import gopher._ +import cps._ +import cps.monads.FutureAsyncMonad +import scala.concurrent.Future +import scala.concurrent.duration._ +import scala.language.postfixOps +import java.util.concurrent.atomic._ + + + +import munit._ + +class SelectGroupTest extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + def exclusiveProcess(name: String, ch1: Channel[Future,Int, Int], ch2: Channel[Future,Int, Int])(implicit loc:munit.Location) = { + test( s"select group should not run few async processes in parallel ($name)" ){ + + + val group = new SelectGroup[Future,Unit](summon[Gopher[Future]]) + + val inW1 = new AtomicInteger(0) + val inW2 = new AtomicInteger(0) + val commonCounter = new AtomicInteger(0) + val myFirst = new AtomicBoolean(false) + + + group.onReadAsync(ch1){ (a) => + val x1 = inW1.incrementAndGet() + commonCounter.incrementAndGet() + Time.asleep(1 second).map{_ => + assert(inW2.get() == 0) + val x2 = inW1.incrementAndGet() + } + }.onReadAsync(ch2){ (a) => + val x1 = inW2.incrementAndGet() + commonCounter.incrementAndGet() + Time.asleep(1 second).map{ _ => + assert(inW1.get() == 0) + val x2 = inW2.incrementAndGet() + } + } + + ch1.awrite(1) + ch2.awrite(2) + + async{ + group.run() + assert(commonCounter.get()==1) + } + + } + } + + exclusiveProcess("unbuffered-unbuffered", makeChannel[Int](), makeChannel[Int]() ) + exclusiveProcess("buffered-buffered", makeChannel[Int](10), makeChannel[Int](10) ) + exclusiveProcess("promise-promise", makeOnceChannel[Int](), makeOnceChannel[Int]() ) + exclusiveProcess("buffered-unbuffered", makeChannel[Int](10), makeChannel[Int]() ) + + +} diff --git a/shared/src/test/scala/gopher/channels/SelectSimpleSuite.scala b/shared/src/test/scala/gopher/channels/SelectSimpleSuite.scala new file mode 100644 index 00000000..17e08703 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/SelectSimpleSuite.scala @@ -0,0 +1,44 @@ +package gopher.channels + + +import munit._ +import scala.language.postfixOps +import scala.concurrent._ +import scala.concurrent.duration._ + +import cps._ +import gopher._ +import cps.monads.FutureAsyncMonad + +class SelectSimpleSuite extends FunSuite +{ + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("simple select in a loop") { + val ch = makeChannel[Int]() + var sum = 0 + val loop = async { + var done = false + while(!done) { + select { + case x: ch.read => + sum = sum+x + if (x > 100) { + done = true + } + } + } + } + ch.awriteAll(1 to 200) + + async { + await(loop) + assert( sum == (1 to 101).sum) + } + } + + +} diff --git a/shared/src/test/scala/gopher/channels/SelectSuite.scala b/shared/src/test/scala/gopher/channels/SelectSuite.scala new file mode 100644 index 00000000..60200000 --- /dev/null +++ b/shared/src/test/scala/gopher/channels/SelectSuite.scala @@ -0,0 +1,315 @@ +package gopher.channels + + +import munit._ +import scala.language.postfixOps +import scala.concurrent._ +import scala.concurrent.duration._ + +import cps._ +import gopher._ +import cps.monads.FutureAsyncMonad + + +class SelectSuite extends FunSuite +{ + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + + test("basic select with reading syntax sugar") { + + val channel = makeChannel[Int](100) + + val producer = channel.awriteAll(1 to 1000) + + async { + + @volatile var sum = 0; + val consumer = select.loop.reading(channel){ i => + sum = sum+i + i < 1000 + }.runAsync() + await(consumer) + + val xsum = (1 to 1000).sum + assert(xsum == sum) + } + } + + + + test("basic select with async reading form oter stream in apply") { + + async{ + val channel1 = makeChannel[Int](100) + val channel2 = makeChannel[Int](100) + + val producer1 = channel1.awriteAll(1 to 1000) + val producer2_1 = channel2.awriteAll(1 to 10) + + + @volatile var sum = 0; + // but when reading instead onRead + // TODO: submit bug to doty + val consumer = select.loop.onRead(channel1) { i1 => + val i2 = channel2.read() + sum = sum+i1 + i2 + (i1 < 1000) + } .runAsync() + + assert(consumer.isCompleted == false, "consumer must not be complete after reading first stream" ) + assert(producer1.isCompleted == false) + + val producer2_2 = channel2.awriteAll(1 to 1000) + + await(consumer) + + assert(consumer.isCompleted) + } + + } + + + + test("basic select write with apply") { + + val channel = makeChannel[Int](1) + + async { + @volatile var x = 1 + @volatile var y = 1 + val producer = select.loop.writing(channel,x) { _ => + var z = x + y + x=y + y=z + if (z > 1000) { + channel.close() + false + } else { + true + } + }.runAsync() + + @volatile var last = 0 + channel.foreach{ i=> + //System.out.printn(i) + last=i + } + + assert(last!=0) + await(producer) // should be completed shortly + + } + + } + + + test("basic select timeout (was idle in 0.99) with apply") { + + @volatile var x = 0 + val selector = select.loop.onTimeout(5 millisecond){ dt => + x = x + 1 + x < 10 + }.runAsync() + + async { + await(selector) + assert(x == 10) + } + + } + + + test("basic compound select with apply") { + + async { + val channel1 = makeChannel[Int](1) + val channel2 = makeChannel[Int](1) + val channel3 = makeChannel[Int](1) + val channel4 = makeChannel[Int](1) + + val producer = channel1.awriteAll(1 to 1000) + + @volatile var x=0 + @volatile var nw=0 + @volatile var q = false + @volatile var ch1s=0 + + val selector = select.loop.reading(channel1) { i => + // read ch1 in selector + channel4.awrite(i) + ch1s=i + true + }.reading(channel2) { i => + //{}; // workarround for https://issues.scala-lang.org/browse/SI-8846 + x=i + //Console.println(s"reading from ch2, i=${i}") + true + }.writing(channel3,x) { x => + //{}; // workarround for https://issues.scala-lang.org/browse/SI-8846 + nw=nw+1 + //Console.println(s"writing ${x} to ch3, nw=${nw}") + true + }.onTimeout(5 milliseconds) { dt => + //Console.println(s"idle, exiting") + //{}; + channel4.close() + q=true + false + }.runAsync() + + + for(c <- channel4) + channel2.write(c) + + await(selector) + assert(q==true) + + } + } + + test("basic compound select with loop select syntax") { + + + val channel1 = makeChannel[Int](1) + val channel2 = makeChannel[Int](1) + val channel3 = makeChannel[Int](1) + val channel4 = makeChannel[Int](1) + + val producer = channel1.awriteAll(1 to 1000) + + @volatile var q = false + + val selector = async { + @volatile var x=0 + @volatile var nw=0 + @volatile var ch1s=0 + + //pending + // for syntax will be next: + select.loop{ + case ir: channel1.read => + channel4.awrite(ir) + ch1s=ir + true + case iw: channel3.write if (iw==(x+1)) => + nw = nw+1 + true + case t: Time.after if t == (5 milliseconds) => q=true + false + } + + } + + val copier = async{ + for(c <- channel4) channel2.write(c) + } + + async{ + await(selector) + + assert(q==true) + } + + } + + + + test("basic select.group with reading syntax sugar") { + + async { + val channel1 = makeChannel[String](1) + val channel2 = makeChannel[String](1) + val selector = select.group[String].onRead(channel1)(x=>x) + .onRead(channel2)(x=>x) + .runAsync() + channel2.awrite("A") + val r = await(selector) + assert(r=="A") + + } + } + + + test("basic select.once with writing syntax sugar") { + async { + val channel1 = makeChannel[Int](100) + val channel2 = makeChannel[Int](100) + @volatile var s:Int = 0 + val selector = (select.group.onWrite(channel1,s){ (q:Int) =>"A"} + .onWrite(channel2,s){s=>"B"} + ).runAsync() + //println("before awaiting selector") + val r = await(selector) + //println("after awaiting selector") + + // hi, Captain Obvious + assert(Set("A","B") contains r ) + channel1.close() + channel2.close() + } + } + + + + test("basic select.once with idle syntax sugar".only) { + async{ + val ch = makeChannel[String](1) + val selector = (select.once[String].onRead(ch)(x=>x) + .onTimeout(5 milliseconds)(t => "IDLE") + ).runAsync() + val r = await(selector) + assert(r=="IDLE") + ch.close() + } + } + + + + test("basic select.foreach with partial-function syntax sugar") { + val info = makeChannel[Long](1) + val quit = makeChannel[Int](2) + @volatile var (x,y)=(0L,1L) + + val writer = async { + select.loop{ + case z:info.write if (z==x) => + x = y + y = y + x + true + case q:quit.read => + false + } + } + + @volatile var sum=0L + val reader = { + //implicit val printCode = cps.macroFlags.PrintCode + //implicit val debugLevel = cps.macroFlags.DebugLevel(20) + async{ + select.loop{ + case z:info.read => sum += z + if (sum > 100000) { + //quit.write(1) + await(quit.awrite(1)) + false + } else { + true + } + } + } + } + + async{ + await(writer) + await(reader) + assert(sum > 100000) + } + } + + + +} diff --git a/shared/src/test/scala/gopher/channels/SelectTimeoutSuite.scala b/shared/src/test/scala/gopher/channels/SelectTimeoutSuite.scala new file mode 100644 index 00000000..fd2fe7de --- /dev/null +++ b/shared/src/test/scala/gopher/channels/SelectTimeoutSuite.scala @@ -0,0 +1,118 @@ +package gopher.channels + +import cps._ +import gopher._ +import munit._ + +import scala.language.postfixOps +import scala.concurrent._ +import scala.concurrent.duration._ + +import cps.monads.FutureAsyncMonad + +class SelectTimeoutSuite extends FunSuite +{ + + import scala.concurrent.ExecutionContext.Implicits.global + + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("select with constant timeout which not fire") { + async { + val ch1 = makeChannel[Int](10) + val r = select.map{ s => + s.apply{ + case x:ch1.read => + //System.err.println(s"readed ${x}") + x + case y: Time.after if (y==500.milliseconds) => + //System.err.println(s"timeout ${y}") + -1 + } + } + val f1 = ch1.awrite(1) + val x = r.read() + assert(x==1) + } + } + + + test("select with constant timeout which fire") { + async { + val ch1 = makeChannel[Int](10) + val r = select.map{ s => + s.apply{ + case x:ch1.read => + //System.err.println(s"readed ${x}") + x + case x:Time.after if (x==500.milliseconds) => + //System.err.println(s"timeout ${x}") + -1 + } + } + val x = r.read() + assert(x == -1) + } + } + + + test("timeout in select.loop") { + async { + val ch1 = makeChannel[Int](10) + val ch2 = makeChannel[Int]() + val chS = makeChannel[String](10) + var s = 0 + select.loop{ + case x: ch1.read => + chS.write("1") + true + case x: ch2.read => + chS.write("2") + true + case x: Time.after if x == (100 millis) => + s += 1 + chS.write("t") + (! (s > 2) ) + } + assert(s > 2) + } + } + + + test("timeout in select.fold") { + val ch1 = makeChannel[Int](10) + val f = async { + select.fold(0) { state => + select{ + case x: ch1.read => state+1 + case x: Time.after if (x == 100.milliseconds) => + SelectFold.Done((state+10)) + } + } + } + ch1.awrite(1) + async { + val x = await(f) + assert(x==11) + } + } + + + test("timeout in select.once") { + val ch1 = makeChannel[Int](10) + var x = 0 + async { + select.once{ + case y: ch1.read => //println("ch1 readed") + x=1 + case y: Time.after if y == (100 milliseconds) => + //println("ch2 readed") + x=10 + } + assert(x==10) + } + } + + +} diff --git a/shared/src/test/scala/gopher/channels/UnbufferedSelectSuite.scala b/shared/src/test/scala/gopher/channels/UnbufferedSelectSuite.scala new file mode 100644 index 00000000..030732bd --- /dev/null +++ b/shared/src/test/scala/gopher/channels/UnbufferedSelectSuite.scala @@ -0,0 +1,100 @@ +package gopher.channels + +import cps._ +import gopher._ +import munit._ + +import scala.language.postfixOps +import scala.concurrent._ +import scala.concurrent.duration._ + + +class UnbufferedSelectSuite extends FunSuite +{ + + import cps.monads.FutureAsyncMonad + import scala.concurrent.ExecutionContext.Implicits.global + + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("write without read must block ") { + val channel1 = makeChannel[Int](0) + val w1 = channel1.awrite(1) + + assert(!w1.isCompleted) + + val r1 = channel1.aread() + + async { + await(w1) + await(r1) + val rd = await(r1) + assert(rd==1) + } + + } + + + + test("fold over selector with one-direction flow") { + + val ch = makeChannel[Int](0) + val quit = Promise[Boolean]() + val quitChannel = quit.future.asChannel + val r = async { + select.fold(0){ x => + select{ + case a:ch.read => x+a + case q: quitChannel.read => SelectFold.Done(x) + } + } + } + ch.awriteAll(1 to 10) onComplete { _ => quit success true } + async { + val sum = await(r) + assert(sum==(1 to 10).sum) + } + } + + + test("append for finite unbuffered stream") { + val ch1 = makeChannel[Int](0) + val ch2 = makeChannel[Int](0) + val appended = ch1 append ch2 + var sum = 0 + var prev = 0 + var monotonic = true + val f = async { for(s <- appended) { + // bug in compiler 2.11.7 + //w{assert(prev < s)} + //if (prev >= s) w{assert(false)} + if (prev >= s) monotonic=false + prev = s + sum += s + } } + val a1 = ch1.awriteAll(1 to 10) + val a2 = ch2.awriteAll((1 to 10)map(_*100)) + // it works, but for buffered channeld onComplete can be scheduled before. So, <= instead == + + async { + await(a1) + while (sum < 55) { + Time.sleep(50 milliseconds) + } + assert(sum == 55) + + // after this - read from 2 + ch1.close() + + await(a2) + while(sum < 5555) { + Time.sleep(50 milliseconds) + } + assert(sum == 5555) + assert(monotonic) + } + } + + +} diff --git a/shared/src/test/scala/gopher/channels/history/AsyncSelectSuite.scala b/shared/src/test/scala/gopher/channels/history/AsyncSelectSuite.scala new file mode 100644 index 00000000..31a864ab --- /dev/null +++ b/shared/src/test/scala/gopher/channels/history/AsyncSelectSuite.scala @@ -0,0 +1,157 @@ +package gopher.channels.history + +import gopher._ +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.language.postfixOps +import scala.util._ +import cps.monads.FutureAsyncMonad + +import munit._ + +class AsyncSelectSuite extends FunSuite { + + val MAX_N=100 + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("async base: channel write, select read") { + + val channel = makeChannel[Int](10) + + channel.awriteAll(1 to MAX_N) + + var sum = 0; + + /* + val consumer = gopherApi.select.loop.onRead(channel){ a + sum = sum + a + (a:Int, cont:ContRead[Int,Unit]) => sum = sum + a + if (a < MAX_N) { + cont + } else { + Done((),cont.flowTermination) + } + }.go + */ + + val consumer = select.loop.onRead(channel){ a => + sum = sum + a + a < MAX_N + }.runAsync() + + //val consumer = go { + // for(s <- select) { + // s match { + // case `channel` ~> (i:Int) => + // //System.err.println("received:"+i) + // sum = sum + i + // if (i==1000) s.shutdown() + // } + // } + // sum + //} + + consumer map { x => + val xsum = (1 to MAX_N).sum + assert(xsum == sum) + } + + } + + + test("async base: select write, select read".only) { + + val channel = makeChannel[Int](10) + + var sum=0 + var curA=0 + val process = select.loop. + onRead(channel){ a => sum = sum + a + //System.err.println("received:"+a) + a < MAX_N + }.onWrite(channel, curA){ a => + curA = curA + 1 + curA < MAX_N + }.runAsync() + + process map { _ => + assert(curA == MAX_N) + } + + } + + + test("async base: select read, timeout action") { + + val channel = makeChannel[Int](10) + + val consumer = channel.atake(100) + + var i = 1 + var d = 1 + val process = select.loop.onWrite(channel, i) { a => + i=i+1 + i < 1000 + }.onTimeout(100 millisecond){ t => + if (i < 100) { + d=d+1 + true + } else { + false + } + }.runAsync() + + for{rp <- process + rc <- consumer } yield assert(i > 100) + + } + + + test("async base: catch exception in read") { + val ERROR_N = 10 + var lastReaded = 0 + val channel = makeChannel[Int](10) + val process = select.loop. + onRead(channel){ + (a:Int) => lastReaded=a + if (a == ERROR_N) { + throw new IllegalStateException("qqq") + } + true + }.runAsync() + + channel.awriteAll(1 to MAX_N) + + process.transform{ + case Failure(ex: IllegalStateException) => + Success(assert(true)) + case Success(_) => + assert("" == "processs should failed wit IllegalStateException") + Failure(new RuntimeException("fail")) + } + + } + + test("async base: catch exception in idle") { + val process = select.loop.onTimeout(100 milliseconds)( + t => + throw new IllegalStateException("qqq") + ).runAsync() + + process.transform{ + case Failure(ex: IllegalStateException) => + Success(assert(true)) + case Success(_) => + assert("" == "processs should failed wit IllegalStateException") + Failure(new RuntimeException("fail")) + } + + } + + + +} + diff --git a/shared/src/test/scala/gopher/monads/ChannelMonadSuite.scala b/shared/src/test/scala/gopher/monads/ChannelMonadSuite.scala new file mode 100644 index 00000000..7cfc1338 --- /dev/null +++ b/shared/src/test/scala/gopher/monads/ChannelMonadSuite.scala @@ -0,0 +1,117 @@ +package gopher.monadexample + +import cps.* +import gopher.* +import munit.* + +import scala.concurrent.* +import scala.concurrent.duration.* +import scala.collection.SortedSet + +import cps.monads.{given,*} +import gopher.monads.{given,*} + +class ChannelMonadSuite extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("using channel as monad and read inside") { + + + val chX = ReadChannel.fromValues[Future,Int](1,2,3,4,5) + val chY = ReadChannel.fromValues[Future,Int](1,2,3,4,5) + + + val squares: ReadChannel[Future,Int] = async[[X] =>> ReadChannel[Future,X]] { + val x: Int = await(chX) + //println(s"reading from X $x") + val y: Int = chY.read() + //println(s"reading from Y $y") + x*y + } + + + + async[Future] { + val a1 = squares.read() + //println(s"a1==${a1}") + assert(a1 == 1) + val a2 = squares.read() + //println(s"a2==${a2}") + assert(a2 == 4) + val a3 = squares.read() + assert(a3 == 9) + val a4 = squares.read() + assert(a4 == 16) + val a5 = squares.read() + assert(a5 == 25) + } + + } + + + test("using channel with flatMap") { + + val chX = ReadChannel.fromValues(1,2,3,4,5) + + val r = async[[X] =>> ReadChannel[Future,X]] { + val x = await(chX) + val fy = if (x %2 == 0) ReadChannel.empty[Future,Int] else ReadChannel.fromIterable(1 to x) + await(fy)*x + } + + async[Future] { + val seq = r.take(20) + //println(seq) + assert(seq(0)==1) + assert(seq(1)==3) + assert(seq(2)==6) + assert(seq(3)==9) + assert(seq(4)==5) + assert(seq(5)==10) + } + + + } + + + test("sieve inside channel monad") { + + val n = 100 + val r = async[[X] =>> ReadChannel[Future,X]] { + var initial = ReadChannel.fromIterable[Future,Int](2 to n) + val drop = ReadChannel.empty[Future,Int] + var prevs = IndexedSeq.empty[Int] + var done = false + val x = await(initial) + if !prevs.isEmpty then + var pi = 0 + while{ + var p = prevs(pi) + if (x % p == 0) then + val r = await(drop) + pi = pi+1 + p*p < x + } do () + prevs = prevs :+ x + x + } + + async[Future] { + val primes = r.take(20) + //println(s"primes: $primes") + assert(primes(0)==2) + assert(primes(1)==3) + assert(primes(2)==5) + assert(primes(3)==7) + assert(primes(6)==17) + assert(primes(9)==29) + } + + + } + + +} diff --git a/shared/src/test/scala/gopher/monads/ChannelTryMonadSuite.scala b/shared/src/test/scala/gopher/monads/ChannelTryMonadSuite.scala new file mode 100644 index 00000000..421049b1 --- /dev/null +++ b/shared/src/test/scala/gopher/monads/ChannelTryMonadSuite.scala @@ -0,0 +1,68 @@ +package gopher.monadexample + +import cps.* +import gopher.* +import munit.* + +import scala.concurrent.* +import scala.concurrent.duration.* +import scala.util.* + +import cps.monads.FutureAsyncMonad +import gopher.monads.given + +class ChannelTryMonadSuite extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + + test("failure inside async with ReadTryChannelCpsMonad") { + + val r = async[[X]=>>ReadChannel[Future,Try[X]]] { + val ch1 = ReadChannel.fromIterable[Future,Int](1 to 10) + val x = await(ch1) + if (x % 3 == 0) then + throw new RuntimeException("AAA") + x + } + + async[Future] { + val a1 = r.read() + val a2 = r.read() + val a3 = r.read() + assert(a1 == Success(1)) + assert(a2 == Success(2)) + assert(a3.isFailure) + } + + } + + test("using try/catch along with ReadTryChannelCpsMonad") { + + val r = async[[X]=>>ReadChannel[Future,Try[X]]] { + val ch1 = ReadChannel.fromIterable[Future,Int](1 to 10) + val x = await(ch1) + try { + if (x % 3 == 0) { + throw (new RuntimeException("AAA")) + } + x + } catch { + case ex: RuntimeException => + 100 + } + } + + async[Future] { + val a1 = r.read() + val a2 = r.read() + val a3 = r.read() + assert(a1 == Success(1)) + assert(a2 == Success(2)) + assert(a3 == Success(100)) + } + + } + +} \ No newline at end of file diff --git a/shared/src/test/scala/gopher/monads/Queens.scala b/shared/src/test/scala/gopher/monads/Queens.scala new file mode 100644 index 00000000..89edc8fc --- /dev/null +++ b/shared/src/test/scala/gopher/monads/Queens.scala @@ -0,0 +1,65 @@ +package gopher.monadexample +import cps.* +import gopher.* +import munit.* + +import scala.concurrent.* +import scala.concurrent.duration.* +import scala.collection.SortedSet + +import cps.monads.FutureAsyncMonad +import gopher.monads.given + + +class QueensSuite extends FunSuite { + + import scala.concurrent.ExecutionContext.Implicits.global + given Gopher[Future] = SharedGopherAPI.apply[Future]() + + type State = Vector[Int] + + extension(queens:State) { + + def isUnderAttack(i:Int, j:Int): Boolean = + queens.zipWithIndex.exists{ (qj,qi) => + qi == i || qj == j || i-j == qi-qj || i+j == qi+qj + } + + def asPairs:Vector[(Int,Int)] = + queens.zipWithIndex.map(_.swap) + + } + + val N = 8 + + def putQueen(state:State): ReadChannel[Future,State] = + val ch = makeChannel[State]() + async[Future] { + val i = state.length + if i < N then + for{ j <- 0 until N if !state.isUnderAttack(i,j) } + ch.write(state appended j) + ch.close() + } + ch + + def solutions(state: State): ReadChannel[Future,State] = + async[[X] =>> ReadChannel[Future,X]] { + if(state.length < N) then + val nextState = await(putQueen(state)) + await(solutions(nextState)) + else + state + } + + + test("two first solution for 8 queens problem") { + async[Future] { + val r = solutions(Vector.empty).take(2) + assert(!r.isEmpty) + println(r.map(_.asPairs)) + } + } + + +} diff --git a/shared/src/test/scala/gopher/stream/BasicGeneratorSuite.scala b/shared/src/test/scala/gopher/stream/BasicGeneratorSuite.scala new file mode 100644 index 00000000..5e49b682 --- /dev/null +++ b/shared/src/test/scala/gopher/stream/BasicGeneratorSuite.scala @@ -0,0 +1,92 @@ +package gopher.stream + +import scala.concurrent.* +import scala.concurrent.ExecutionContext.Implicits.global + +import cps.* +import cps.monads.given + +import gopher.* +import gopher.util.Debug + +import munit.* + + +class BasicGeneratorSuite extends FunSuite { + + val N = 10000 + + given Gopher[Future] = SharedGopherAPI[Future]() + + val inMemoryLog = new Debug.InMemoryLog() + + + summon[Gopher[Future]].setLogFun( Debug.inMemoryLogFun(inMemoryLog) ) + + test("simple loop in gopher ReadChannel") { + + val channel = asyncStream[ReadChannel[Future,Int]] { out => + for(i <- 1 to N) { + out.emit(i) + } + } + + + async[Future] { + val r = channel.fold(0)(_ + _) + assert(r == (1 to N).sum) + } + + } + + test("M small loop in gopher ReadChannel") { + + val M = 1000 + val N = 100 + + val folds: Seq[Future[Int]] = for(i <- 1 to M) yield { + val channel = asyncStream[ReadChannel[Future,Int]] { out => + for(i <- 1 to N) { + out.emit(i) + //println("emitted: "+i) + } + } + async[Future]{ + channel.fold(0)(_ + _) + } + } + + val expected = (1 to N).sum + + + folds.foldLeft(Future.successful(())){ (s,e) => + s.flatMap{ r => + e.map{ x => + assert(x == expected) + } } + } + + + } + + test("exception should break loop in gopher generator") { + val channel = asyncStream[ReadChannel[Future, Int]] { out => + for(i <- 1 to N) { + if (i == N/2) then + throw new RuntimeException("bye") + out.emit(i) + } + } + + val res = async[Future] { + val r = channel.fold(0)(_ + _) + assert(r == (1 to N).sum) + } + + res.failed.map(ex => assert(ex.getMessage()=="bye")) + + } + + + +} \ No newline at end of file diff --git a/shared/src/test/scala/hofasync/TestSharedMin.scala b/shared/src/test/scala/hofasync/TestSharedMin.scala new file mode 100644 index 00000000..c03461b9 --- /dev/null +++ b/shared/src/test/scala/hofasync/TestSharedMin.scala @@ -0,0 +1,27 @@ +package hofasync + +import cps._ +import cps.monads.FutureAsyncMonad +import gopher._ +import scala.concurrent.Future + + + +class TestSharedMin extends munit.FunSuite { + + test("snhared-init") { + import scala.concurrent.ExecutionContext.Implicits.global + val gopherApi = SharedGopherAPI.apply[Future]() + val ch = gopherApi.makeChannel[Int](1) + val fw1 = ch.awrite(2) + val fr1 = ch.aread() + //implicit val printCode = cps.macroFlags.PrintCode + async[Future] { + val r1 = await(fr1) + assert( r1 == 2 ) + } + } + +} + + diff --git a/src/main/scala/gopher/ChannelClosedException.scala b/src/main/scala/gopher/ChannelClosedException.scala deleted file mode 100644 index 62c5b770..00000000 --- a/src/main/scala/gopher/ChannelClosedException.scala +++ /dev/null @@ -1,10 +0,0 @@ -package gopher - -/** - * throwed when channel is closed: - * - **/ -class ChannelClosedException extends IllegalStateException("Channel is closed") diff --git a/src/main/scala/gopher/Defers.scala b/src/main/scala/gopher/Defers.scala deleted file mode 100644 index 3d51d12f..00000000 --- a/src/main/scala/gopher/Defers.scala +++ /dev/null @@ -1,163 +0,0 @@ -package gopher - -import scala.annotation.tailrec -import scala.util._ -import scala.util.control._ -import scala.reflect.runtime.universe.{Try => _, _} -import java.util.concurrent.atomic._ - -/** - * Construction Defers: defer/recover is alternative mechanism to exception handling, - * simular to one in Go language. - * - * We use one hidden in go and goScope construct are transformed to withDefers usage with - * help of macroses. - * - * It is also possible to use one unsugared (as in next example), but this is a bit verbose. - * - * - *
- *def parseCsv(fname: String): Either[String, Seq[Seq[Double]]] =
- *  withDefer[Either[String,Seq[Seq[Double]]]]{ d =>
- *    val in = Source.fromFile(fname)
- *    d.defer{ 
- *       var r = d.recover{
- *                 case FileNotFoundException => Left("fileNotFound")
- *               }
- *       if (!r) in.close() 
- *       d.recover {
- *         case ex: Throwable => Left(ex.getMessage)
- *       }
- *    }
- *    val retval:Either[String,Seq[Seq[Double]]] = Right{
- *        for( (line, nLine) <- in.getLines.toSeq zip Stream.from(1) ) yield withDefer[Seq[Double]] { d =>
- *           line.split(",") map { s=> 
- *                                 d.defer{
- *                                  d.recover{
- *                                     case ex: NumberFormatException =>
- *                                       throw new RuntimeException(s"parse error in line \${nLine} file \${fname} ")
- *                                  }
- *                                 }
- *                                 s.toDouble 
- *                               }
- *        }.toSeq
- *      }
- *    retval
- *}
- *
- **/ -class Defers[T] -{ - - /** - * can be used in main block - * (which can be plain or async) - * and store body for defered execution after - * evaluation of main block - **/ - def defer(body: =>Unit): Unit = - { - var prev = rl.get - var next = (()=>body)::prev - while(!rl.compareAndSet(prev,next)) { - prev = rl.get - next = (()=>body)::prev - } - } - - /** - * called after execution of main block, where - * all 'defered' blocks will be executed in one thread - * in LIFO order. - */ - def processResult(x: Try[T]):T = - { - tryProcess(x) match { - case Success(v) => v - case Failure(ex) => - throw ex - } - } - - def tryProcess(x: Try[T]):Try[T] = - { - last = x - unroll(rl getAndSet Nil) - last - } - - /** - * called inside defer blocks, where argument(t) - */ - def recover(f: PartialFunction[Throwable,T]): Boolean = { - var retval = false - for(e <- last.failed if (f.isDefinedAt(e) && !e.isInstanceOf[ControlThrowable])) { - last = Success(f(e)) - retval=true - } - retval - } - - @tailrec - private[this] def unroll(l: List[()=>Unit]):Try[T] = - l match { - case Nil => last - case head::tail => try { - head() - } catch { - case ex: Throwable => - last=Failure(ex) - } - // first component is for defer inside defer - unroll(rl.getAndSet(Nil) ++ tail) - } - - private[this] var last: Try[T] = Failure(Defers.NoResultException()) - - private[this] val rl: AtomicReference[List[()=>Unit]] = new AtomicReference(List()) -} - -object Defers -{ - - class NoResultException extends RuntimeException - - object NoResultException - { - def apply() = new NoResultException() - } - - /** - * same as scala.util.Try with one difference: - *ControlThrowable is catched and mapped to Failure. - */ - def controlTry[T](body: =>T):Try[T] = - { - try { - Success(body) - } catch { - case ex: ControlThrowable => Failure(ex) - case NonFatal(ex) => Failure(ex) - } - } - -} - -/** - * syntax sugar, for calling Defers. - */ -object withDefer -{ - - def apply[A](f: Defers[A] => A):A = - { val d = new Defers[A]() - d.processResult(Defers.controlTry(f(d))) - } - - def asTry[A](f: Defers[A] => A) = - { val d = new Defers[A]() - d.tryProcess(Defers.controlTry(f(d))) - } - -} - diff --git a/src/main/scala/gopher/FlowTermination.scala b/src/main/scala/gopher/FlowTermination.scala deleted file mode 100644 index 143315c0..00000000 --- a/src/main/scala/gopher/FlowTermination.scala +++ /dev/null @@ -1,45 +0,0 @@ -package gopher - -import scala.concurrent._ -import scala.annotation._ - -/** - * FlowTermination[-A] - termination of flow. - * - * Inside each block in select loop or - * select apply (once or forever) we have implicit - * FlowTermination entity, which we can use for - * exiting the loop. - * - *{{{ - * select.forever{ - * case x: info.read => Console.println(s"received from info \$x") - * case x: control.read => implicitly[FlowTermination[Unit]].doExit(()) - * } - *}}} - **/ -trait FlowTermination[-A] -{ - - /** - * terminate current flow with exception. - * Mostly used internally. - */ - def doThrow(e: Throwable): Unit - - /** - * terminate current flow and leave `a` as result of flow. - * have no effect if flow is already completed. - */ - def doExit(a:A): A@unchecked.uncheckedVariance - - /** - * check - if flow is completed. - */ - def isCompleted: Boolean - - def throwIfNotCompleted(ex: Throwable): Unit - - -} - diff --git a/src/main/scala/gopher/Gopher.scala b/src/main/scala/gopher/Gopher.scala deleted file mode 100644 index 49ef38e9..00000000 --- a/src/main/scala/gopher/Gopher.scala +++ /dev/null @@ -1,78 +0,0 @@ -package gopher - -import akka.actor._ -import com.typesafe.config._ -import scala.concurrent._ -import gopher.channels._ -import java.util.concurrent.Executors - -/** - * Akka extension which provide gopherApi interface - *@see GopherAPI - **/ -class GopherImpl(system: ExtendedActorSystem) - extends GopherAPI(system, - GopherAPIExtensionHelper.retrieveConfiguredExecutor(system)) - with Extension -{ - -} - -/** - * Factory object for Akka extension - * - *{{{ - * val actorSystem = ActorSystem("myapp") - * val gopherApi = Gopher(actorSystem) - *}}} - **/ -object Gopher extends ExtensionId[GopherImpl] - with ExtensionIdProvider -{ - override def lookup = Gopher - - override def createExtension(system: ExtendedActorSystem) - = new GopherImpl(system) - - override def get(system: ActorSystem): GopherImpl = super.get(system) - -} - -object GopherAPIExtensionHelper -{ - - def retrieveConfiguredExecutor(system: ExtendedActorSystem): ExecutionContext = { - val config = system.settings.config.atKey("gopher") - if (config.hasPath("threadPool")) { - var sType = "fixed" - try { - sType = config.getString("threadPool.size") - } catch { - case ex: ConfigException.Missing => - system.log.warning("gopher initialization, threadPool.type is missign, use default" +sType) - } - sType match { - case "fixed" => - var size = 4; - try { - size = config.getInt("threadPool.size") - } catch { - case ex: ConfigException.Missing => - system.log.warning("gopher initialization, threadPool.size is missing, use default: "+size) - } - ExecutionContext.fromExecutorService( Executors.newFixedThreadPool(size) ) - case "cached" => - ExecutionContext.fromExecutorService( Executors.newCachedThreadPool() ) - case "single" => - ExecutionContext.fromExecutorService( Executors.newSingleThreadExecutor() ) - case "global" => ExecutionContext.global - case _ => throw new IllegalStateException("""Invalid threadPool.type in config, must be one of "fixed", "cached", "single", "global" """) - } - } else { - // use defautl execution context. - ExecutionContext.global - } - } - - -} diff --git a/src/main/scala/gopher/GopherAPI.scala b/src/main/scala/gopher/GopherAPI.scala deleted file mode 100644 index 84e90280..00000000 --- a/src/main/scala/gopher/GopherAPI.scala +++ /dev/null @@ -1,199 +0,0 @@ -package gopher - -import akka.actor._ -import akka.pattern._ -import gopher.channels._ -import gopher.transputers._ -import scala.concurrent.{Channel=>_,_} -import scala.concurrent.duration._ -import scala.language.experimental.macros -import scala.language.postfixOps -import scala.reflect.macros.blackbox.Context -import scala.util._ -import java.util.concurrent.atomic.AtomicLong -import com.typesafe.config._ - -/** - * Api for providing access to channel and selector interfaces. - */ -class GopherAPI(as: ActorSystem, es: ExecutionContext) -{ - - /** - * obtain select factory - * - * {{{ - * goopherApi.select.once[String] { - * case x: a.read => s"\${x} from A" - * case x: b.read => s"\${x} from B" - * case _ => "IDLE" - * } - * }}} - */ - val select: SelectFactory = - new SelectFactory(this) - - /** - * Generic schema for making objects, which requiere gopherAPI for constructions. - * - **/ - def make[T](args: Any*): T = macro GopherAPI.makeImpl[T] - - /** - * obtain channel - * - *{{{ - * val channel = gopherApi.makeChannel[Int]() - * channel.awrite(1 to 100) - *}}} - */ - @inline - def makeChannel[A](capacity: Int = 0): Channel[A] = - Channel[A](capacity)(this) - - /** - * create effected input with given thread-policy - */ - def makeEffectedInput[A](in: Input[A], threadingPolicy: ThreadingPolicy = ThreadingPolicy.Single): EffectedInput[A] = - EffectedInput(in,threadingPolicy) - - def makeEffectedOutput[A](out: Output[A], threadingPolicy: ThreadingPolicy = ThreadingPolicy.Single) = - EffectedOutput(out,threadingPolicy) - - def makeEffectedChannel[A](ch: Channel[A], threadingPolicy: ThreadingPolicy = ThreadingPolicy.Single) = - EffectedChannel(ch,threadingPolicy) - - /** - * Represent Scala future as channel from which we can read one value. - *@see gopher.channels.FutureInput - */ - def futureInput[A](future:Future[A]): FutureInput[A] = new FutureInput(future, this) - - /** - * Represent Scala iterable as channel, where all values can be readed in order of iteration. - */ - def iterableInput[A](iterable:Iterable[A]): Input[A] = Input.asInput(iterable, this) - - - /** - * create and start instance of transputer with given recovery policy. - *@see gopher.Transputer - */ - def makeTransputer[T <: Transputer](recoveryPolicy:PartialFunction[Throwable,SupervisorStrategy.Directive]): T = macro GopherAPI.makeTransputerImpl2[T] - - def makeTransputer[T <: Transputer]: T = macro GopherAPI.makeTransputerImpl[T] - - /** - * create transputer which contains n instances of X - * where ports are connected to the appropriate ports of each instance in paraller. - * {{{ - * val persistStep = replicate[PersistTransputer](nDBConnections) - * }}} - */ - def replicate[T<: Transputer](n:Int): Transputer = macro Replicate.replicateImpl[T] - - /** - * actor system which was passed during creation - **/ - def actorSystem: ActorSystem = as - - /** - * execution context used for managing calculation steps in channels engine. - **/ - def executionContext: ExecutionContext = es - - /** - * the configuration of the gopher system. By default is contained under 'gopher' key in top-level config. - **/ - def config: Config = as.settings.config.atKey("gopher") - - lazy val idleTimeout: FiniteDuration = { - val m = try { - config.getInt("idle-detection-tick") - } catch { - case ex: ConfigException.Missing => 100 - } - m.milliseconds - } - - def currentFlow = CurrentFlowTermination - - //private[gopher] val idleDetector = new IdleDetector(this) - - private[gopher] val continuatedProcessorRef: ActorRef = { - val props = Props(classOf[ChannelProcessor], this) - actorSystem.actorOf(props,name="channelProcessor") - } - - private[gopher] val channelSupervisorRef: ActorRef = { - val props = Props(classOf[ChannelSupervisor], this) - actorSystem.actorOf(props,name="channels") - } - - private[gopher] val transputerSupervisorRef: ActorRef = { - val props = Props(classOf[TransputerSupervisor], this) - actorSystem.actorOf(props,name="transputerSupervisor") - } - - private[gopher] def newChannelId: Long = - channelIdCounter.getAndIncrement - - private[gopher] def continue[A](next:Future[Continuated[A]], ft:FlowTermination[A]): Unit = - next.onComplete{ - case Success(cont) => - continuatedProcessorRef ! cont - case Failure(ex) => ft.throwIfNotCompleted(ex) - }(executionContext) - - private[this] val channelIdCounter = new AtomicLong(0L) - - -} - -object GopherAPI -{ - - def makeImpl[T : c.WeakTypeTag](c:Context)(args: c.Expr[Any]*): c.Expr[T] = { - import c.universe._ - val wt = weakTypeOf[T] - if (wt.companion =:= NoType) { - c.abort(c.prefix.tree.pos,s"type ${wt.typeSymbol} have no companion") - } - val sym = wt.typeSymbol.companion - val r = q"${sym}.apply[..${wt.typeArgs}](..${args})(${c.prefix})" - c.Expr[T](r) - } - - def makeTransputerImpl[T <: Transputer : c.WeakTypeTag](c:Context):c.Expr[T] = { - import c.universe._ - c.Expr[T](q"${c.prefix}.makeTransputer[${weakTypeOf[T]}](gopher.Transputer.RecoveryPolicy.AlwaysRestart)") - } - - def makeTransputerImpl2[T <: Transputer : c.WeakTypeTag](c:Context)(recoveryPolicy:c.Expr[PartialFunction[Throwable,SupervisorStrategy.Directive]]):c.Expr[T] = { - import c.universe._ - //---------------------------------------------- - // generate incorrect code: see https://issues.scala-lang.org/browse/SI-8953 - //c.Expr[T](q"""{ def factory():${c.weakTypeOf[T]} = new ${c.weakTypeOf[T]} { - // def api = ${c.prefix} - // def recoverFactory = factory - // } - // val retval = factory() - // retval - // } - // """) - //---------------------------------------------- - // so, let's create subclass - val implName = c.freshName(c.symbolOf[T].name) - c.Expr[T](q"""{ - class ${implName} extends ${c.weakTypeOf[T]} { - def api = ${c.prefix} - def recoverFactory = () => new ${implName} - } - val retval = new ${implName} - retval.recoverAppend(${recoveryPolicy}) - retval - } - """) - } - -} diff --git a/src/main/scala/gopher/ThreadingPolicy.scala b/src/main/scala/gopher/ThreadingPolicy.scala deleted file mode 100644 index 8c30413a..00000000 --- a/src/main/scala/gopher/ThreadingPolicy.scala +++ /dev/null @@ -1,10 +0,0 @@ -package gopher - -sealed trait ThreadingPolicy - -object ThreadingPolicy -{ - case object Single extends ThreadingPolicy - case object Multi extends ThreadingPolicy -} - diff --git a/src/main/scala/gopher/Transputer.scala b/src/main/scala/gopher/Transputer.scala deleted file mode 100644 index 20cc4d5e..00000000 --- a/src/main/scala/gopher/Transputer.scala +++ /dev/null @@ -1,448 +0,0 @@ -package gopher - -import scala.language.experimental.macros -import gopher.channels._ -import gopher.util._ -import transputers._ -import scala.concurrent._ -import scala.concurrent.duration._ -import akka.actor._ - - -/** - * Reusable unit of application structure, which consists from - * set of input ports, set of output ports and behaviour - * - * Transputers can be created as elementary behaviour, descibed by select - * statement and then can be combined into larger structures - * - * Transputers can be recovered from execeptions (i.e. transputer can be restarted or resume execution) - * or escalated to parent transputers or root superviser. - * - */ -trait Transputer -{ - - class InPort[A](input:Input[A]) extends Input[A] - { - - override def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]],ft: FlowTermination[B]): Unit = - v.cbread(f,ft) - - def api: gopher.GopherAPI = Transputer.this.api - - def connect(x: Input[A]): Unit = - { v=x } - - def connect(outPort: Transputer#OutPort[A], bufferSize:Int = 1): Unit = - { - val ch = api.makeChannel[A](bufferSize) - v = ch - outPort.v = ch - } - - def <~~<(x: Transputer#OutPort[A]) = connect(x) - - // get other side of port input if this is possible. - def outputSide : Option[Output[A]] = - v match { - case out: Output[A] => Some(out) - case _ => None - } - - def *! : Output[A] = outputSide.getOrElse( - throw new IllegalStateException("Can't get output side of port input") - ) - - var v: Input[A] = input - - } - - object InPort - { - @inline def apply[A]():InPort[A] = new InPort(new LazyChannel[A](api)) - } - - class OutPort[A](output:Output[A]) extends Output[A] - { - override def cbwrite[B](f: ContWrite[A,B] => Option[(A, Future[Continuated[B]])], ft: FlowTermination[B]): Unit = - { - v.cbwrite(f, ft) - } - - def api: gopher.GopherAPI = Transputer.this.api - - def connect(x: Output[A]): Unit = - { v=x } - - def connect(inPort: Transputer#InPort[A], bufferSize:Int = 1): Unit = - { - val ch = api.makeChannel[A](bufferSize) - v = ch - inPort.connect(ch) - } - - def >~~> (x: Transputer#InPort[A]) = connect(x) - - def inputSide: Option[Input[A]] = - v match { - case in: Input[A] => Some(in) - case _ => None - } - - def *! :Input[A] = inputSide.getOrElse( - throw new IllegalStateException("Can't get input side of port output "+v) - ) - - var v: Output[A] = output - } - - object OutPort - { - @inline def apply[A]():OutPort[A] = new OutPort(new LazyChannel[A](api)) - } - - def +(p: Transputer) = new ParTransputer(api, Seq(this,p)) - - def start():Future[Unit] = - { - onStart() - api.transputerSupervisorRef ! TransputerSupervisor.Start(this) - flowTermination.future - } - - def goOnce: Future[Unit] - - def stop(): Unit - - /** - * set recover function - **/ - def recover(f: PartialFunction[Throwable,SupervisorStrategy.Directive]): this.type = - { recoveryFunction = f - this - } - - /** - * append recover function to existing - **/ - def recoverAppend(f: PartialFunction[Throwable,SupervisorStrategy.Directive]): this.type = - { recoveryFunction = recoveryFunction orElse f - this - } - - /** - * set failure limit. - * (when number of failures during windowsDuration is bigger than maxFailures, - * TooManyFailures exception is escalated to parent transputer. - **/ - def failureLimit(maxFailures:Int = recoveryLimits.maxFailures, - windowDuration: Duration = recoveryLimits.windowDuration): this.type = - { - recoveryLimits = Transputer.RecoveryLimits(maxFailures, windowDuration) - this - } - - def api: GopherAPI - - // internal API. - - /** - * copyState from previous instance when transputer is restarted. - * can be overriden in subclasses (by default: do nothing) - * - * Note, that port connection is restored before call of copyState - */ - def copyState(prev: Transputer): Unit = {} - - /** - * copy conection from previous instance when transputer is - * restarted. - **/ - def copyPorts(prev: Transputer): Unit = - { - import scala.reflect._ - import scala.reflect.runtime.{universe=>ru} - val mirror = ru.runtimeMirror(this.getClass.getClassLoader) - - def copyVar[T:ClassTag:ru.TypeTag,V:ClassTag](x:T, y: T, varName: String): Unit = - { - val imx = mirror.reflect(x); - val imy = mirror.reflect(y); - val field = ru.typeOf[T].decl(ru.TermName(varName)).asTerm.accessed.asTerm - - val v = imy.reflectField(field).get - imx.reflectField(field).set(v) - } - - def copyPorts[T:ru.TypeTag:ClassTag]:Unit = - { - val List(newIns, prevIns) = List(this, prev) map (ReflectUtil.retrieveVals[T,Transputer](ru)(mirror,_)) - for((x,y) <- newIns zip prevIns) copyVar(x,y,"v") - } - - copyPorts[InPort[_]]; - copyPorts[OutPort[_]]; - } - - - /** - * Used for recover failed instances - */ - def recoverFactory: ()=>Transputer - - /** - * called when transducer is started. - */ - protected def onStart() { } - - /** - * called when transducer is restarted. - * - *@param prev - previous (i.e. failed) instance of trnasputer. - */ - protected def onRestart(prev:Transputer) { } - - - /** - * called when transducer is choose to resume durign recovery. - */ - protected def onResume() { } - - /** - * called when failure is escalated. - **/ - protected def onEscalatedFailure(ex: Throwable) { } - - /** - * called when transputer is stopped. - */ - protected def onStop() { } - - private[gopher] def beforeResume() - { - flowTermination = createFlowTermination() - onResume(); - } - - private[gopher] def beforeRestart(prev: Transputer) - { - if (!(prev eq null)) { - recoveryStatistics = prev.recoveryStatistics - recoveryLimits = prev.recoveryLimits - recoveryFunction = prev.recoveryFunction - parent = prev.parent - } - onRestart(prev) - } - - private[gopher] var recoveryStatistics = Transputer.RecoveryStatistics( ) - private[gopher] var recoveryLimits = Transputer.RecoveryLimits( ) - private[gopher] var recoveryFunction: PartialFunction[Throwable, SupervisorStrategy.Directive] = PartialFunction.empty - private[gopher] var parent: Option[Transputer] = None - private[gopher] var flowTermination: PromiseFlowTermination[Unit] = createFlowTermination() - private[gopher] var replicaNumber = 1 - - private[this] def createFlowTermination() = new PromiseFlowTermination[Unit]() { - - override def doThrow(e:Throwable): Unit = - { - onEscalatedFailure(e) - super.doThrow(e) - } - - override def doExit(a:Unit): Unit = - { - super.doExit(()) - onStop() - } - - - } - - /** - * return replica number of current instance, if - * transponder run replicated. - **/ - protected def replica = replicaNumber - - import akka.event.LogSource - implicit def logSource: LogSource[Transputer] = new LogSource[Transputer] { - def genString(t: Transputer) = t.getClass.getName+"/"+t.replica - } - -} - -/** - * mix this trait to ypu transputer for access to akka logging. - **/ -trait TransputerLogging -{ - this: Transputer => - - val log = akka.event.Logging(api.actorSystem, this) -} - -object Transputer -{ - - - - case class RecoveryStatistics( - var nFailures: Int = 0, - var windowStart: Long = 0, - var firstFailure: Option[Throwable] = None, - var lastFailure: Option[Throwable] = None - ) { - - def failure(ex: Throwable, recoveryLimits: RecoveryLimits, nanoNow: Long): Boolean = - { - val same = sameWindow(recoveryLimits, nanoNow) - nFailures +=1 - if (firstFailure.isEmpty) { - firstFailure = Some(ex) - } - lastFailure = Some(ex) - return (same && nFailures >= recoveryLimits.maxFailures) - } - - - def sameWindow(recoveryLimits: RecoveryLimits, nanoNow: Long): Boolean = - { - if ((nanoNow - windowStart) > recoveryLimits.windowDuration.toNanos) { - nFailures = 0 - windowStart = nanoNow - firstFailure = None - lastFailure = None - false - } else { - true - } - } - - } - - - case class RecoveryLimits( - var maxFailures: Int = 10, - var windowDuration: Duration = 1 second - ) - - class TooManyFailures(t: Transputer) extends RuntimeException(s"Too many failures for ${t}", t.recoveryStatistics.firstFailure.get) - { - addSuppressed(t.recoveryStatistics.lastFailure.get) - } - - object RecoveryPolicy { - import scala.util.control._ - - val AlwaysRestart: PartialFunction[Throwable,SupervisorStrategy.Directive] = - { case x: TooManyFailures => SupervisorStrategy.Escalate - case NonFatal(ex) => SupervisorStrategy.Restart - } - - val AlwaysEscalate: PartialFunction[Throwable,SupervisorStrategy.Directive] = - { case ex => SupervisorStrategy.Escalate } - - } - - - - -} - -/** - * Transputer, where dehaviour can be described by selector function - * - **/ -trait SelectTransputer extends ForeverSelectorBuilder with Transputer -{ - - /** - * configure loop in selector - */ - def loop(f: PartialFunction[Any,Unit]): Unit = macro SelectorBuilderImpl.loop[Unit] - - - def stop():Unit = stopFlowTermination() - - /** - * When called inside loop - stop execution of selector, from outside - terminate transformer - */ - private[this] def stopFlowTermination(implicit ft:FlowTermination[Unit] = flowTermination): Unit = - ft.doExit(()) - - protected override def onEscalatedFailure(ex: Throwable): Unit = - { - super.onEscalatedFailure(ex) - selector.throwIfNotCompleted(ex) - } - - protected override def onStop(): Unit = - { - super.onStop() - if (!selector.isCompleted) { - selector.doExit(()) - } - } - - def goOnce: Future[Unit] = selectorRun - - private[gopher] override def beforeResume() : Unit = - { - super.beforeResume() - //selector.clear() - selectorInit() - } - - protected var selectorInit: ()=>Unit = - { () => throw new IllegalStateException("selectorInit us not initialized yet") } - -} - -class ParTransputer(override val api: GopherAPI, var childs:Seq[Transputer]) extends Transputer -{ - - childs.foreach(_.parent = Some(this)) - - def goOnce: Future[Unit] = { - implicit val ec: ExecutionContext = api.executionContext - @volatile var inStop = false - def withStopChilds[A](f: Future[A]):Future[A] = - { - f.onComplete{ _ => - if (!inStop) { - inStop = true - stopChilds() - } - } - f - } - withStopChilds( - Future.sequence(childs map( x=> withStopChilds(x.start()) ) ) - ) map (_ => ()) - } - - def stop(): Unit = - { - stopChilds() - } - - override def +(p: Transputer) = new ParTransputer(api, childs :+ p) - - private[this] def stopChilds(): Unit = - for(x <- childs if (!x.flowTermination.isCompleted) ) { - x.flowTermination.doExit(()) - } - - - def recoverFactory: () => Transputer = () => new ParTransputer(api,childs) - - private[gopher] override def beforeResume() : Unit = - { - super.beforeResume() - for(ch <- childs) ch.beforeResume() - } - -} - diff --git a/src/main/scala/gopher/channels/ActorBackedChannel.scala b/src/main/scala/gopher/channels/ActorBackedChannel.scala deleted file mode 100644 index f17af532..00000000 --- a/src/main/scala/gopher/channels/ActorBackedChannel.scala +++ /dev/null @@ -1,86 +0,0 @@ -package gopher.channels - - -import akka.actor._ -import akka.pattern._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.util._ -import scala.language.experimental.macros -import scala.language.postfixOps -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ - -class ActorBackedChannel[A](futureChannelRef: Future[ActorRef], override val api: GopherAPI) extends Channel[A] -{ - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]], flwt: FlowTermination[B] ): Unit = - { - val cont = ContRead(f,this, flwt) - def applyClosed() = - { - f(cont) foreach { f1 => try { - api.continue( f1(ContRead.ChannelClosed), flwt) - } catch { - case ex: Throwable => flwt.doThrow(ex) - } - } - } - implicit val ec = api.executionContext - if (closed) { - if (closedEmpty) { - applyClosed(); - } else { - // TODO: ask timeput on closed channel set in config. - futureChannelRef.foreach{ ref => val f = ref.ask(ClosedChannelRead(cont))(5 seconds) - f.onComplete{ - case Failure(e) => - if (e.isInstanceOf[AskTimeoutException]) { - applyClosed() - } - case Success(ChannelCloseProcessed(0)) => - closedEmpty = true - case _ => // do nothing - } - } - } - } else { - futureChannelRef.foreach( _ ! cont ) - } - } - - private def contRead[B](x:ContRead[A,B]): Unit = - futureChannelRef.foreach( _ ! x )(api.executionContext) - - def cbwrite[B](f: ContWrite[A,B] => Option[(A,Future[Continuated[B]])], flwt: FlowTermination[B] ): Unit = - if (closed) { - flwt.doThrow(new ChannelClosedException()) - } else { - futureChannelRef.foreach( _ ! ContWrite(f,this, flwt) )(api.executionContext) - } - - private def contWrite[B](x:ContWrite[A,B]): Unit = - futureChannelRef.foreach( _ ! x )(api.executionContext) - - //private[this] implicit val ec = api.executionContext - - def isClosed: Boolean = closed - - def close(): Unit = - { - futureChannelRef.foreach( _ ! ChannelClose )(api.executionContext) - closed=true - } - - - override protected def finalize(): Unit = - { - // allow channel actor be grabage collected - futureChannelRef.foreach( _ ! ChannelRefDecrement )(api.executionContext) - } - - private var closed = false - private var closedEmpty = false -} - diff --git a/src/main/scala/gopher/channels/BaseBufferedChannelActor.scala b/src/main/scala/gopher/channels/BaseBufferedChannelActor.scala deleted file mode 100644 index cc3b25b3..00000000 --- a/src/main/scala/gopher/channels/BaseBufferedChannelActor.scala +++ /dev/null @@ -1,52 +0,0 @@ -package gopher.channels - -import akka.actor._ -import scala.language._ -import scala.concurrent._ -import scala.collection.immutable._ -import gopher._ - - -/** - * ChannelActor - actor, which leave - */ -abstract class BaseBufferedChannelActor[A](id:Long, api: GopherAPI) extends ChannelActor[A](id,api) -{ - - def processReaders() : Boolean = - { - var retval = false - while(!readers.isEmpty && nElements > 0) { - val current = readers.head - readers = readers.tail - retval ||= processReader(current) - } - retval - } - - def stopIfEmpty: Boolean = - { - require(closed==true) - if (nElements == 0) { - stopReaders() - } - stopWriters() - if (nElements == 0) { - if (nRefs == 0) { - // here we leave 'closed' channels in actor-system untile they will be - // garbage-collected. TODO: think about actual stop ? - self ! GracefullChannelStop - } - true - } else - false - } - - protected[this] def processReader[B](reader:ContRead[A,B]): Boolean - - - protected[this] def getNElements(): Int = nElements - - protected[this] var nElements=0 - -} diff --git a/src/main/scala/gopher/channels/BufferedChannelActor.scala b/src/main/scala/gopher/channels/BufferedChannelActor.scala deleted file mode 100644 index 8ea92904..00000000 --- a/src/main/scala/gopher/channels/BufferedChannelActor.scala +++ /dev/null @@ -1,111 +0,0 @@ -package gopher.channels - -import akka.actor._ -import scala.language._ -import scala.concurrent._ -import scala.collection.immutable._ -import gopher._ - - -/** - * ChannelActor - actor, which leave - */ -class BufferedChannelActor[A](id:Long, capacity:Int, api: GopherAPI) extends BaseBufferedChannelActor[A](id,api) -{ - - - protected[this] def onContWrite(cwa: gopher.channels.ContWrite[A, _]): Unit = - { - if (closed) { - cwa.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - } else { - if (nElements==capacity) { - writers = writers :+ cwa - } else { - val prevNElements = nElements - if (processWriter(cwa) && prevNElements==0) { - processReaders() - } - } - } - } - - protected[this] def onContRead(cra: gopher.channels.ContRead[A, _]): Unit = - { - if (nElements==0) { - if (closed) { - processReaderClosed(cra) - } else { - readers = readers :+ cra - } - } else { - val prevNElements = nElements - if (processReader(cra)) { - if (closed) { - stopIfEmpty - } else if (prevNElements==capacity) { - checkWriters - } - } - } - } - - - protected[this] def processReader[B](reader:ContRead[A,B]): Boolean = - reader.function(reader) match { - case Some(f1) => - val readedElement = elementAt(readIndex) - nElements-=1 - readIndex+=1 - readIndex%=capacity - Future{ - val cont = f1(ContRead.In value readedElement ) - api.continue(cont, reader.flowTermination) - }(api.executionContext) - true - case None => - false - } - - - def checkWriters: Boolean = - { - var retval = false - while(!writers.isEmpty && nElements < capacity) { - val current = writers.head - writers = writers.tail - val processed = processWriter(current) - retval ||= processed - } - retval - } - - private[this] def processWriter[B](writer:ContWrite[A,B]): Boolean = - writer.function(writer) match { - case Some((a,cont)) => - nElements+=1 - setElementAt(writeIndex,a) - writeIndex+=1 - writeIndex%=capacity - api.continue(cont, writer.flowTermination) - true - case None => - false - } - - - @inline - private[this] def elementAt(i:Int): A = - buffer(i).asInstanceOf[A] - - @inline - private[this] def setElementAt(i:Int, a:A): Unit = - buffer(i) = a.asInstanceOf[AnyRef] - - - // boxed representation of type. - val buffer= new Array[AnyRef](capacity+1) - var readIndex=0 - var writeIndex=0 - -} diff --git a/src/main/scala/gopher/channels/Channel.scala b/src/main/scala/gopher/channels/Channel.scala deleted file mode 100644 index 9cbc4549..00000000 --- a/src/main/scala/gopher/channels/Channel.scala +++ /dev/null @@ -1,53 +0,0 @@ -package gopher.channels - - -import akka.actor._ -import akka.pattern._ -import scala.concurrent._ -import scala.concurrent.duration._ -import gopher._ -import scala.language.experimental.macros -import scala.language.postfixOps -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ - -trait Channel[A] extends InputOutput[A] -{ - - thisChannel => - - def close(): Unit - - // override some operations - - class Filtered(p:A=>Boolean) extends super.Filtered(p) - with Channel[A] - { - def cbwrite[B](f: ContWrite[A,B] => Option[(A,Future[Continuated[B]])],ft: FlowTermination[B]):Unit = - thisChannel.cbwrite(f,ft) - - def close() = thisChannel.close() - } - - override def filter(p:A=>Boolean): Channel[A] = new Filtered(p) - -} - -object Channel -{ - - def apply[A](capacity: Int = 0)(implicit api:GopherAPI):Channel[A] = - { - require(capacity >= 0) - import api._ - val nextId = newChannelId - val futureChannelRef = (channelSupervisorRef.ask( - NewChannel(nextId, capacity) - )(10 seconds) - .asInstanceOf[Future[ActorRef]] - ) - - new ActorBackedChannel[A](futureChannelRef, api) - } - -} diff --git a/src/main/scala/gopher/channels/ChannelActor.scala b/src/main/scala/gopher/channels/ChannelActor.scala deleted file mode 100644 index ef9c67e0..00000000 --- a/src/main/scala/gopher/channels/ChannelActor.scala +++ /dev/null @@ -1,97 +0,0 @@ -package gopher.channels - -import akka.actor._ -import scala.language._ -import scala.concurrent._ -import scala.collection.immutable._ -import gopher._ - - -/** - * ChannelActor - actor, which leave - */ -abstract class ChannelActor[A](id:Long, api: GopherAPI) extends Actor -{ - - def receive = { - case cw@ContWrite(_,_,ft) => - val cwa = cw.asInstanceOf[ContWrite[A,cw.R]] - onContWrite(cwa) - case cr@ContRead(_,_,ft) => - val cra = cr.asInstanceOf[ContRead[A,cr.R]] - onContRead(cra) - case ccr@ClosedChannelRead(_) => - self ! ccr.cont - sender ! ChannelCloseProcessed(getNElements()) - case ChannelClose => - closed=true - stopIfEmpty - case ChannelRefDecrement => - nRefs -= 1 - if (nRefs == 0) { - stopAll - } - case ChannelRefIncrement => - nRefs += 1 - case GracefullChannelStop => - context.stop(self) - } - - protected[this] def onContWrite(cw:ContWrite[A,_]):Unit - - protected[this] def onContRead(cr:ContRead[A,_]):Unit - - protected[this] def getNElements():Int - - protected[this] def processReaderClosed[B](reader:ContRead[A,B]): Boolean = - reader.function(reader) match { - case Some(f1) => api.continue(f1(ContRead.ChannelClosed), reader.flowTermination) - true - case None => false - } - - protected[this] def stopReaders(): Unit = - { - while(!readers.isEmpty) { - val reader = readers.head - val c = reader.asInstanceOf[ContRead[A,reader.R]] - readers = readers.tail - c.function(c) foreach { f1 => - api.continue(f1(ContRead.ChannelClosed), c.flowTermination) - } - } - } - - protected[this] def stopWriters(): Unit = - { - while(!writers.isEmpty) { - val writer = writers.head - val c = writer.asInstanceOf[ContWrite[A,writer.R]] - writers = writers.tail - c.function(c) foreach { - f1 => c.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - } - } - } - - def stopIfEmpty: Boolean - - def stopAll: Unit = - { - if (!closed) { - closed=true - } - if (!stopIfEmpty) { - // stop anyway - self ! GracefullChannelStop - } - } - - protected[this] implicit def ec: ExecutionContext = api.executionContext - - protected[this] var closed=false - var readers = Queue[ContRead[A,_]] () - var writers = Queue[ContWrite[A,_]] () - var nRefs = 1 - -} diff --git a/src/main/scala/gopher/channels/ChannelActorMessage.scala b/src/main/scala/gopher/channels/ChannelActorMessage.scala deleted file mode 100644 index bd35b3f9..00000000 --- a/src/main/scala/gopher/channels/ChannelActorMessage.scala +++ /dev/null @@ -1,45 +0,0 @@ -package gopher.channels - -import scala.language.existentials - -sealed trait ChannelActorMessage - -case object ChannelClose extends ChannelActorMessage - -/** - * this is message wich send to ChannelActor, when we - * know, that channel is closed. In such case, we don't - * konw: is actor stopped or not, So, we say this message - * (instead read) and wait for reply. If reply is not received - * within given timeout: think that channel is-dead. - */ -case class ClosedChannelRead(cont: ContRead[_,_]) extends ChannelActorMessage - -/** - * this message is send, when all references to - * some instance of this channel are unreachable, - * so if we have no other instances (i.e. remote - * channel incarnation), than we must destroy channel. - **/ -case object ChannelRefDecrement extends ChannelActorMessage - -/** - * this message is send, when we create new remote - * reference to channel, backed by this actor. - **/ -case object ChannelRefIncrement extends ChannelActorMessage - -/** - * result of CloseChannelRead, return number of elements - * left to read - */ -case class ChannelCloseProcessed(nElements: Int) extends ChannelActorMessage - -/** - * When we decide to stop channel, do it via special message, - * to process one after messages, which exists now in queue. - * - * Note, that channel-stop messages can be send only from ChannelActor - */ -case object GracefullChannelStop extends ChannelActorMessage - diff --git a/src/main/scala/gopher/channels/ChannelProcessor.scala b/src/main/scala/gopher/channels/ChannelProcessor.scala deleted file mode 100644 index 3d81d6af..00000000 --- a/src/main/scala/gopher/channels/ChannelProcessor.scala +++ /dev/null @@ -1,42 +0,0 @@ -package gopher.channels - -import akka.actor._ -import scala.concurrent._ -import gopher._ - -class ChannelProcessor(api: GopherAPI) extends Actor -{ - - def receive = { - case Done(r,ft) => - if (!ft.isCompleted) { - ft.doExit(r) - } - case sk@Skip(f,ft) => if (!ft.isCompleted) { - try{ - f(sk) match { - case Some(cont) => { - val nowSender = sender - cont.foreach( nowSender ! _ ) - } - case None => /* do nothing */ - } - }catch{ - case ex: Throwable => ft.doThrow(ex) - } - } - case cr@ContRead(f,ch, ft) => - if (!ft.isCompleted) { - ch.cbread[cr.R](f,ft) - } - case cw@ContWrite(f,ch, ft) => - if (!ft.isCompleted) { - ch.cbwrite[cw.R]( f , ft) - } - case Never => /* do nothing */ - } - - - implicit val ec: ExecutionContext = api.executionContext - -} diff --git a/src/main/scala/gopher/channels/ChannelSupervisor.scala b/src/main/scala/gopher/channels/ChannelSupervisor.scala deleted file mode 100644 index 162e3439..00000000 --- a/src/main/scala/gopher/channels/ChannelSupervisor.scala +++ /dev/null @@ -1,26 +0,0 @@ -package gopher.channels - -import akka.actor._ -import scala.concurrent._ -import gopher._ - -case class NewChannel(id: Long, capacity: Int) -case class CloseChannel(id: Long) - -class ChannelSupervisor(api: GopherAPI) extends Actor -{ - - def receive = { - case NewChannel(id,capacity) => - val actorClass = capacity match { - case 0 => classOf[UnbufferedChannelActor[_]] - case Int.MaxValue => classOf[GrowingBufferedChannelActor[_]] - case _ => classOf[BufferedChannelActor[_]] - } - val props = Props(actorClass,id, capacity, api) - sender ! context.actorOf(props, name=id.toString) - case CloseChannel(id) => - context.actorSelection(id.toString) ! ChannelClose - } - -} diff --git a/src/main/scala/gopher/channels/Continuated.scala b/src/main/scala/gopher/channels/Continuated.scala deleted file mode 100644 index e1abf34f..00000000 --- a/src/main/scala/gopher/channels/Continuated.scala +++ /dev/null @@ -1,143 +0,0 @@ -package gopher.channels; - -import scala.language._ -import scala.concurrent._ -import java.util.concurrent.atomic.AtomicBoolean -import gopher._ - -/** - * represent continuated computation from A to B. - */ -sealed trait Continuated[+A] -{ - type R = X forSome { type X <: A @annotation.unchecked.uncheckedVariance } -} - -sealed trait FlowContinuated[A] extends Continuated[A] -{ - def flowTermination: FlowTermination[A] -} - -sealed trait ReadyTestResult[+A] - -object ReadyTestResult -{ - sealed trait NotReady[+A] extends ReadyTestResult[A] - case object WillBeCheckedLater extends NotReady[Nothing] - case class CheckOther[A,B](other:A=>ReadyTestResult[B]) extends NotReady[B] - - case class Ready[A](value:A) extends ReadyTestResult[A] - -} - - - -case class Done[A](result:A, override val flowTermination: FlowTermination[A]) extends FlowContinuated[A] - -/** - * read A and compute B as result. - */ -case class ContRead[A,B]( - function: ContRead[A,B] => - Option[ - ContRead.In[A] => Future[Continuated[B]] - ], - channel: Input[A], - override val flowTermination: FlowTermination[B]) extends FlowContinuated[B] -{ - type El = A - type F = ContRead[A,B]=>Option[ContRead.In[A] => Future[Continuated[B]]] -} - -object ContRead -{ - - - sealed trait In[+A] - case class Value[+A](a:A) extends In[A] - case object Skip extends In[Nothing] - case object ChannelClosed extends In[Nothing] - case class Failure(ex:Throwable) extends In[Nothing] - - - object In - { - def value[A](a:A) = ContRead.Value(a) - def failure(ex:Throwable) = ContRead.Failure(ex) - def channelClosed = ContRead.ChannelClosed - def skip = ContRead.Skip - } - - @inline - def liftInValue[A,B](prev: ContRead[A,B])(f: Value[A] => Future[Continuated[B]] ): In[A] => Future[Continuated[B]] = - { - case v@Value(a) => f(v) - case Skip => Future successful prev - case ChannelClosed => prev.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - Never.future - case Failure(ex) => prev.flowTermination.doThrow(ex) - Never.future - } - - @inline - def liftIn[A,B](prev: ContRead[A,B])(f: A => Future[Continuated[B]] ): In[A] => Future[Continuated[B]] = - { - // liftInValue(prev)(f(_.a)) - // we do ilining by hands instead. - case Value(a) => f(a) - case Skip => Future successful prev - case ChannelClosed => prev.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - Never.future - case Failure(ex) => prev.flowTermination.doThrow(ex) - Never.future - } - - - def chainIn[A,B](prev: ContRead[A,B])(fn: (In[A], In[A] => Future[Continuated[B]]) => Future[Continuated[B]] ): - Option[In[A] => Future[Continuated[B]]] = - prev.function(prev) map (f1 => liftInValue(prev) { v => fn(v,f1) } ) - - type Aux[A,B] = ContRead[A,B]{ type El=A - type S=B - type F = ContRead[A,B]=>Option[ContRead.In[A]=>Future[Continuated[B]]] - } - - type AuxF[A,B] = ContRead[A,B]=>Option[ContRead.In[A]=>Future[Continuated[B]]] -} - - -/** - * write A and compute B as result - */ -case class ContWrite[A,B](function: ContWrite[A,B] => Option[(A,Future[Continuated[B]])], channel: Output[A], override val flowTermination: FlowTermination[B]) extends FlowContinuated[B] -{ - type El = A - type F = ContWrite[A,B]=>Option[(A,Future[Continuated[B]])] -} - -object ContWrite -{ - type Aux[A,B] = ContWrite[A,B] - type AuxF[A,B] = ContWrite[A,B]=>Option[(A,Future[Continuated[B]])] -} - -/** - * skip (i.e. do some operation not related to reading or writing.) - */ -case class Skip[A](function: Skip[A] => Option[Future[Continuated[A]]], override val flowTermination: FlowTermination[A]) extends FlowContinuated[A] - -object Skip -{ - type AuxF[A] = Skip[A]=>Option[Future[Continuated[A]]] -} - -/** - * never means the end of conversation - */ -case object Never extends Continuated[Nothing] -{ - val future = Future successful Never -} - - - diff --git a/src/main/scala/gopher/channels/CurrentFlowTermination.scala b/src/main/scala/gopher/channels/CurrentFlowTermination.scala deleted file mode 100644 index d0f14b3c..00000000 --- a/src/main/scala/gopher/channels/CurrentFlowTermination.scala +++ /dev/null @@ -1,46 +0,0 @@ -package gopher.channels - -import scala.language.experimental.macros -import scala.reflect.macros.whitebox.Context -import scala.reflect.api._ -import gopher._ -import scala.concurrent._ -import scala.annotation._ - -object CurrentFlowTermination -{ - - - @compileTimeOnly("exit must be used only inside goScope or selector callbacks") - def exit[A](a: A): A = ??? - - def exitDelayed[A](a: A): A = - macro exitImpl[A] - - - def doThrow(e: Throwable): Unit = - macro doThrowImpl - - - def exitImpl[A](c:Context)(a: c.Expr[A])(implicit wtt: c.WeakTypeTag[A]): c.Expr[A]= - { - import c.universe._ - c.Expr[A](q""" - implicitly[FlowTermination[${wtt}]].doExit(${a}) - """) - } - - def doThrowImpl(c:Context)(e: c.Expr[Throwable]): c.Expr[Unit]= - { - import c.universe._ - c.Expr[Unit](q"implicitly[FlowTermination[Any]].doThrow(${e})") - } - - def shutdownImpl(c:Context)(): c.Expr[Unit] = - { - import c.universe._ - exitImpl[Unit](c)(c.Expr[Unit](q"()")) - } - - -} diff --git a/src/main/scala/gopher/channels/DuppedInput.scala b/src/main/scala/gopher/channels/DuppedInput.scala deleted file mode 100644 index 39563223..00000000 --- a/src/main/scala/gopher/channels/DuppedInput.scala +++ /dev/null @@ -1,42 +0,0 @@ -package gopher.channels - -import gopher._ -import scala.annotation._ -import scala.concurrent._ -import scala.util._ -import java.util.concurrent.ConcurrentLinkedQueue -import java.util.concurrent.atomic.AtomicInteger -import async.Async._ - - - -class DuppedInput[A](origin:Input[A]) -{ - - def pair = (sink1, sink2) - - val sink1 = api.makeChannel[A](1) - val sink2 = api.makeChannel[A](1) - - // can't use macroses, so unroll by hands. - private val selector = api.select.forever; - selector.readingWithFlowTerminationAsync(origin, - (ec:ExecutionContext, ft: FlowTermination[Unit], a: A) => async{ - val f1 = sink1.awrite(a) - val f2 = sink2.awrite(a) - await(f1) - await(f2) - (); - }(ec) ) - selector.go.failed.foreach{ - case ex: ChannelClosedException => - sink1.close() - sink2.close() - } - - def api = origin.api - private implicit def ec:ExecutionContext = api.executionContext - - - -} diff --git a/src/main/scala/gopher/channels/EffectedChannel.scala b/src/main/scala/gopher/channels/EffectedChannel.scala deleted file mode 100644 index 489937c9..00000000 --- a/src/main/scala/gopher/channels/EffectedChannel.scala +++ /dev/null @@ -1,70 +0,0 @@ -package gopher.channels - - -import gopher._ -import gopher.util._ -import scala.concurrent._ - -trait EffectedChannel[A] extends Channel[A] with Effected[Channel[A]] -{ - def asInput(): EffectedInput[A] - def asOutput(): EffectedOutput[A] -} - - -object EffectedChannel -{ - def apply[A](in: Channel[A], policy: ThreadingPolicy): EffectedChannel[A] = - policy match { - case ThreadingPolicy.Single => new SinglethreadedEffectedChannel(in) - case ThreadingPolicy.Multi => new MultithreadedEffectedChannel(in) - } -} - - -class SinglethreadedEffectedChannel[A](ch:Channel[A]) extends SinglethreadedEffected[Channel[A]](ch) - with EffectedChannel[A] -{ - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]],ft: FlowTermination[B]): Unit = v.cbread(f,ft) - - def cbwrite[B](f: ContWrite[A,B] => Option[ - (A,Future[Continuated[B]]) - ], - ft: FlowTermination[B]): Unit = v.cbwrite(f,ft) - - def close() = v.close() - - def asInput() = api.makeEffectedInput(v, ThreadingPolicy.Single) - - def asOutput() = api.makeEffectedOutput(v, ThreadingPolicy.Single) - - def api: GopherAPI = v.api - - //override def filter(p:A=>Boolean):Channel[A] = new SinglethreadedEffectedChannel(v.filter(p)) - -} - -class MultithreadedEffectedChannel[A](ch:Channel[A]) extends MultithreadedEffected[Channel[A]](ch) - with EffectedChannel[A] -{ - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]],ft: FlowTermination[B]): Unit = v.get().cbread(f,ft) - - def cbwrite[B](f: ContWrite[A,B] => Option[ - (A,Future[Continuated[B]]) - ], - ft: FlowTermination[B]): Unit = v.get().cbwrite(f,ft) - - def close() = v.get().close() - - def asInput() = api.makeEffectedInput(v.get(), ThreadingPolicy.Multi) - - def asOutput() = api.makeEffectedOutput(v.get(), ThreadingPolicy.Multi) - - - def api: GopherAPI = v.get().api - -} - - diff --git a/src/main/scala/gopher/channels/EffectedInput.scala b/src/main/scala/gopher/channels/EffectedInput.scala deleted file mode 100644 index 2be8d4bb..00000000 --- a/src/main/scala/gopher/channels/EffectedInput.scala +++ /dev/null @@ -1,36 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.util._ -import scala.concurrent._ - -import scala.collection.mutable.{HashSet => MutableHashSet} -import java.util.concurrent.{ConcurrentHashMap=>JavaConcurrentHashMap} - -trait EffectedInput[A] extends Input[A] with Effected[Input[A]] -{ - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]],ft: FlowTermination[B]): Unit = { - val sv = current - sv.cbread((cr:ContRead[A,B]) => if (sv==current) f(cr.copy(channel=this)) else None, ft) - } - - def api: GopherAPI = current.api - -} - - -object EffectedInput -{ - def apply[A](in: Input[A], policy: ThreadingPolicy): EffectedInput[A] = - policy match { - case ThreadingPolicy.Single => new SinglethreadedEffectedInput(in) - case ThreadingPolicy.Multi => new MultithreadedEffectedInput(in) - } -} - -class SinglethreadedEffectedInput[A](in:Input[A]) extends SinglethreadedEffected[Input[A]](in) - with EffectedInput[A] - -class MultithreadedEffectedInput[A](in:Input[A]) extends MultithreadedEffected[Input[A]](in) - with EffectedInput[A] diff --git a/src/main/scala/gopher/channels/EffectedOutput.scala b/src/main/scala/gopher/channels/EffectedOutput.scala deleted file mode 100644 index e4e654b5..00000000 --- a/src/main/scala/gopher/channels/EffectedOutput.scala +++ /dev/null @@ -1,38 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.util._ -import scala.concurrent._ - -trait EffectedOutput[A] extends Effected[Output[A]] with Output[A] -{ - - def cbwrite[B](f: ContWrite[A,B] => Option[ - (A,Future[Continuated[B]]) - ], - ft: FlowTermination[B]): Unit = { - val sv = current - sv.cbwrite[B](cw => if (current eq sv) f(cw.copy(channel=this)) else None,ft) - } - - def api: GopherAPI = current.api - -} - -object EffectedOutput -{ - def apply[A](in: Output[A], policy: ThreadingPolicy): EffectedOutput[A] = - policy match { - case ThreadingPolicy.Single => new SinglethreadedEffectedOutput(in) - case ThreadingPolicy.Multi => new MultithreadedEffectedOutput(in) - } -} - -class SinglethreadedEffectedOutput[A](out:Output[A]) extends SinglethreadedEffected[Output[A]](out) - with EffectedOutput[A] - -class MultithreadedEffectedOutput[A](out:Output[A]) extends MultithreadedEffected[Output[A]](out) - with EffectedOutput[A] - - - diff --git a/src/main/scala/gopher/channels/FoldSelectorBuilder.scala b/src/main/scala/gopher/channels/FoldSelectorBuilder.scala deleted file mode 100644 index cab5b22e..00000000 --- a/src/main/scala/gopher/channels/FoldSelectorBuilder.scala +++ /dev/null @@ -1,695 +0,0 @@ -package gopher.channels - -import java.util.concurrent.atomic.AtomicInteger - -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ - -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.annotation.unchecked._ -import java.util.function.{BiConsumer => JBiConsumer} - -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer - - -abstract class FoldSelectorBuilder[T](nCases:Int) extends SelectorBuilder[T] -{ - - type R = T - - type HandleFunction[A] = (ExecutionContext, FlowTermination[T],A) => Future[T] - - - def reading[A](ch: Input[A])(f: A=>T): FoldSelectorBuilder[T] = - macro SelectorBuilder.readingImpl[A,T,FoldSelectorBuilder[T]] - - - def readingWithFlowTerminationAsync[A](ch: Input[A], - f: (ExecutionContext, FlowTermination[T], A) => Future[T]):this.type = - { - if (ch.isInstanceOf[FoldSelectorEffectedInput[_,_]]) { - // this can be generate before reading call instead. - // (todo: pass read generator) - val ech = ch.asInstanceOf[FoldSelectorEffectedInput[A,T]] - val i = ech.index - handleFunctions(i)=f - inputIndices.put(i,ech.current) - effectedInputs(i)=ech - //val dispatchRead = createReadDispatcher(ech) - //withReader[A](ech,normalizedEffectedReader1) - selector.addReader[A](ech.current,normalizedDispatchReader[A]) - this - }else{ - withReader[A](ch, normalizedPlainReader(f,ch)) - } - } - - def writing[A](ch: Output[A],x:A)(f: A=>T): FoldSelectorBuilder[T] = - macro SelectorBuilder.writingImpl[A,T,FoldSelectorBuilder[T]] - - - @inline - def writingWithFlowTerminationAsync[A](ch:Output[A], x: =>A, - f: (ExecutionContext, FlowTermination[T], A) => Future[T]): this.type = { - if (ch.isInstanceOf[FoldSelectorEffectedOutput[_,_]]) { - val ech = ch.asInstanceOf[FoldSelectorEffectedOutput[A,T]] - val i = ech.index - handleFunctions(i)=f - handleOutputVars(i) = (()=>x) - outputIndices.put(i,ech.current) - effectedOutputs(i)=ech - val dispathWrite = normalizedDispatchWriter[A] - selector.addWriter(ech.current,dispathWrite) - this - }else { - withWriter[A](ch, normalizedWriter(f,x,ch)) - } - } - - def timeout(t:FiniteDuration)(f: FiniteDuration => T): FoldSelectorBuilder[T] = - macro SelectorBuilder.timeoutImpl[T,FoldSelectorBuilder[T]] - - @inline - def timeoutWithFlowTerminationAsync(t:FiniteDuration, - f: (ExecutionContext, FlowTermination[T], FiniteDuration) => Future[T] - ): this.type = - withTimeout(t){ sk => Some(f(ec,sk.flowTermination,t) map Function.const(sk) ) } - - - def idle(body:T): FoldSelectorBuilder[T] = - macro SelectorBuilder.idleImpl[T,FoldSelectorBuilder[T]] - - - val inputIndices: IntIndexedCounterReverse[Input[_]] = new IntIndexedCounterReverse(nCases) - val outputIndices: IntIndexedCounterReverse[Output[_]] = new IntIndexedCounterReverse(nCases) - val handleFunctions: Array[HandleFunction[_]] = new Array(nCases) - val handleOutputVars: Array[ () => _ ] = new Array(nCases) - val effectedInputs: Array[FoldSelectorEffectedInput[_,T]] = new Array(nCases) - val effectedOutputs: Array[FoldSelectorEffectedOutput[_,T]] = new Array(nCases) - - def reregisterInputIndices(): Unit = - { - inputIndices.foreachIndex{(i,cr) => - if (cr.counter <= 0) { - cr.counter += 1 - val input = inputIndices.get(i).get.value - val typedInput: Input[input.read] = input.asInstanceOf[Input[input.read]] - val reader = normalizedDispatchReader[input.read] - val fun = selector.lockedRead(reader,typedInput,selector) - typedInput.cbread(fun,selector) - } - } - } - - def reregisterOutputIndices():Unit = - { - outputIndices.foreachIndex{(i,cw) => - if (cw.counter <= 0) { - cw.counter += 1 - val output = outputIndices.get(i).get.value - val typedOutput:Output[output.write] = output.asInstanceOf[Output[output.write]] - val writer = normalizedDispatchWriter[output.write] - val fun = selector.lockedWrite(writer,typedOutput,selector) - typedOutput.cbwrite(fun,selector) - } - } - } - - def reregisterIndices():Unit = - { - reregisterInputIndices() - reregisterOutputIndices() - } - - def normalizedPlainReader[A](f:HandleFunction[A], ch:Input[A]):ContRead.AuxF[A,T]= - { - def nf(prev:ContRead[A,T]):Option[ContRead.In[A]=>Future[Continuated[T]]] = Some{ - case ContRead.Value(a) => f(ec,selector,a) map { _ => ContRead[A,T](nf,ch,selector) } - case ContRead.Skip => { Future successful ContRead[A,T](nf,ch,selector) } - case ContRead.ChannelClosed => prev.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - Never.future - case ContRead.Failure(ex) => prev.flowTermination.throwIfNotCompleted(ex) - Never.future - } - nf - } - - def normalizedDispatchReader[A]:ContRead.AuxF[A,T]= { - // return never, becouse next step is generated via reregisterInputIndi - def nf(prev: ContRead[A, T]): Option[ContRead.In[A] => Future[Continuated[T]]] = { - val ch = prev.channel match { - case fe: FoldSelectorEffectedInput[_,_] => - System.err.println(s"normalizedEffectedReader:fromEffected ${fe.current} ${fe.index} fe=${fe} locked=${selector.isLocked}") - fe.current - case _ => prev.channel - } - val i = inputIndices.refIndexOf(ch) - //System.err.println(s"normalizedEffectedReader ch=$ch i=$i locked=${selector.isLocked}") - if (i == -1) - None - else { - inputIndices.values(i).counter -= 1 - val ff = handleFunctions(i).asInstanceOf[HandleFunction[A]] - Some { - case ContRead.Value(a) => ff(ec, selector, a) map { _ => reregisterIndices(); Never } - case ContRead.Skip => { - reregisterIndices() - Future successful Never - } - case ContRead.ChannelClosed => prev.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - Never.future - case ContRead.Failure(ex) => prev.flowTermination.throwIfNotCompleted(ex) - Never.future - } - } - } - nf - } - - - - - def normalizedDispatchWriter[A]:ContWrite.AuxF[A,T] = - { - prev => { - val i = outputIndices.refIndexOf(prev.channel) - if (i == -1) - None - else { - outputIndices.values(i).counter -= 1 - val ff = handleFunctions(i).asInstanceOf[HandleFunction[A]] - val xn = handleOutputVars(i).asInstanceOf[()=>A].apply() - Some((xn,ff(ec,prev.flowTermination,xn) map { _ => reregisterIndices(); Never } )) - } - } - } - - def normalizedWriter[A](f:HandleFunction[A],x: =>A, ch:Output[A]):ContWrite.AuxF[A,T]= { - def nf(prev: ContWrite[A, T]): Option[(A, Future[Continuated[T]])] = { - val xn = x - Some(xn, f(ec, prev.flowTermination, xn) map (_ => ContWrite(nf, ch, this.selector))) - } - nf - } - - - def beforeRefresh(): Unit = - { - // inputIndices.clear() - // outputIndices.clear() - } - - - -} - -/** - * Short name for use in fold signature - **/ -class FoldSelect[T](sf:SelectFactory, nCases: Int) extends FoldSelectorBuilder[T](nCases) -{ - override def api = sf.api -} - - -class FoldSelectorBuilderImpl(override val c:Context) extends SelectorBuilderImpl(c) -{ - import c.universe._ - - - /** - *``` - * selector.afold(s0) { (s, selector) => - * selector match { - * case x1: in1.read => f1 - * case x2: in2.read => f2 - * case x3: out3.write if (x3==v) => f3 - * case _ => f4 - * } - * } - *``` - * will be transformed to - *{{{ - * var s = s0 - * val bn = new FoldSelector(3) - * bn.reading(in1)(x1 => f1 map {s=_; s }) - * bn.reading(in2)(x2 => f2 map {s=_; s }) - * bn.writing(out3,v)(x2 => f2 map {s=_; s}) - * bn.idle(f4 map {s=_; s}) - *}}} - * - * also supported partial function syntax: - * - *{{{ - * selector.afold((0,1)){ - * case ((x,y),s) => s match { - * case x1: in1.read => f1 - * case x2: in2.read => f2 - * case x3: out3.write if (x3==v) => f3 - * case _ => f4 - * } - *}}} - * will be transformed to: - *{{{ - * var s = s0 - * val bn = new FoldSelector(3) - * bn.reading(in1)(x1 => async{ val x = s._1; - * val y = s._2; - * s = f1; writeBarrier; s} }) - * bn.reading(in2)(x2 => { val x = s._1; - * val y = s._2; - * s=f2; s} }) - * bn.writing(out3,v[x/s._1;y/s._2]) - * (x2 => s=f2; s}) - *}}} - * - * Using channels as part of fold state: - *{{{ - * selector.afold(ch){ case (ch,s) => - * s match { - * case x: ch.read => generated.write(x) - * ch.filter(_ % x == 0) - * } - * } - *}}} - * will be transformed to - *{{{ - * var s = ch - * val bn = new FoldSelector - * //val ef = new FoldSelectorEffectedInput(()=>s) - * bn.readingEffected(0)(x => async{ generated.write(x) - * s.filter(_ % x == 0)}) - *}}} - **/ - def afold[S:c.WeakTypeTag](s:c.Expr[S])(op:c.Expr[(S,FoldSelect[S])=>S]):c.Expr[Future[S]] = - { - val foldParse = parseFold(op) - val sName = foldParse.stateVal.name - val sNameStable = TermName(c.freshName("s")) - val bn = TermName(c.freshName("fold")) - val ncases = foldParse.selectCases.map(preTransformCaseDef(foldParse,bn,_,sNameStable)) - val tree = Block( - atPos(s.tree.pos)(q"var $sName = ${s}") :: - (q"val $sNameStable = $sName") :: - q"val ${bn} = new FoldSelect[${weakTypeOf[S]}](${c.prefix},${ncases.length})":: - wrapInEffected(foldParse,bn,transformSelectMatch(bn,ncases)), - q"${bn}.go" - ) - c.Expr[Future[S]](tree) - } - - def fold[S:c.WeakTypeTag](s:c.Expr[S])(op:c.Expr[(S,FoldSelect[S])=>S]):c.Expr[S] = - c.Expr[S](q"scala.async.Async.await(${afold(s)(op).tree})") - - sealed trait SelectRole - { - def active: Boolean - def generateRefresh(selector:TermName, state:TermName, i:Int): Option[c.Tree] - } - - object SelectRole { - - - - case object NoParticipate extends SelectRole { - def active = false - def generateRefresh(selector:TermName, state: TermName,i:Int) = None - } - - case object Read extends SelectRole { - def active = true - def generateRefresh(selector:TermName, state: TermName,i:Int) = - Some(q"$selector.inputIndices.put(${if (i<0) 0 else i},${genProj(state,i)})") - } - - case object Write extends SelectRole - { - def active = true - def generateRefresh(selector:TermName, state: TermName,i:Int) = - Some(q"$selector.outputIndices.put(${if (i<0) 0 else i},${genProj(state,i)})") - } - - def genProj(state:TermName, i:Int) = if (i == -1) q"$state" else q"""$state.${TermName("_"+(i+1))}""" - - } - - - case class FoldParseProjection( - sym: c.Symbol, - selectRole: SelectRole - ) - - case class FoldParse( - stateVal: ValDef, - stateSelectRole: SelectRole, - projections: List[FoldParseProjection], - selectValName: c.TermName, - selectCases: List[CaseDef] - ) { - lazy val projectionsBySym: Map[c.Symbol,(FoldParseProjection,Int)] = - projections.zipWithIndex.foldLeft(Map[c.Symbol,(FoldParseProjection,Int)]()) { (s,e) => - s.updated(e._1.sym,e) - } - } - - def withProjAssignments(fp:FoldParse, patSymbol: Symbol, body:c.Tree):c.Tree = - { - val stateName=fp.stateVal.name - val projAssignments = (fp.projections.zipWithIndex) map { - case (FoldParseProjection(sym,usedInSelect),i) => - val pf = TermName("_" + (i+1).toString) - q"val ${sym.name.toTermName} = $stateName.$pf" - } - val projectedSymbols = fp.projections.map(_.sym).toSet - val nbody = cleanIdentsSubstEffected(fp,body,projectedSymbols + fp.stateVal.symbol + patSymbol) - if (projAssignments.isEmpty) - nbody - else { - Block(projAssignments,cleanIdentsSubstEffected(fp,nbody,projectedSymbols)) - } - } - - private def cleanIdentsSubstEffected(fp: FoldParse,tree:c.Tree,symbols:Set[Symbol]):Tree = - { - val tr = new Transformer { - override def transform(tree:c.Tree):c.Tree = - tree match { - case Ident(s) => if (symbols.contains(tree.symbol)) { - // create new tree without associated symbol. - //(typer wil reassociate one). - atPos(tree.pos)(Ident(s)) - } else { - super.transform(tree) - } - case ValDef(m,s,rhs,lhs) => if (symbols.contains(tree.symbol)) { - atPos(tree.pos)(ValDef(m,s,rhs,lhs)) - super.transform(tree) - } else { - super.transform(tree) - } - case _ => super.transform(tree) - } - } - tr.transform(tree) - } - - def substProj(foldParse:FoldParse, newName: c.TermName, body:c.Tree, substEffected: Boolean, debug: Boolean):c.Tree = - { - val projections = foldParse.projections - val stateSymbol = foldParse.stateVal.symbol - val pi = projections.map(_.sym).zipWithIndex.toMap - //val sName = stateSymbol.name.toTermName - val sName = newName - val transformer = new Transformer() { - override def transform(tree:Tree):Tree = - tree match { - case Ident(name) => pi.get(tree.symbol) match { - case Some(n) => - if (substEffected && projections(n).selectRole.active) { - val proj = makeEffectedName(projections(n).sym) - atPos(tree.pos)(q"${proj}") - } else { - val proj = TermName("_"+(n+1).toString) - atPos(tree.pos)(q"${sName}.${proj}") - } - case None => - if (tree.symbol eq stateSymbol) { - if (substEffected && foldParse.stateSelectRole.active) { - val en = makeEffectedName(stateSymbol) - atPos(tree.pos)(Ident(en)) - }else{ - atPos(tree.pos)(Ident(sName)) - } - } else { - super.transform(tree) - } - } - case t@Typed(expr,tpt) => - tpt match { - case tptt: TypeTree => - tptt.original match { - case Select(base,name) => - //tptt.setOriginal(tranform(tptt.original)) - Typed(expr,transform(tptt.original)) - //val trOriginal = transform(tptt.original) - //Typed(expr,internal.setOriginal(tptt,trOriginal)) - //Typed(expr,tq"${sName}.read") - case _ => - super.transform(tree) - } - case _ => - super.transform(tree) - } - - case _ => super.transform(tree) - } - } - return transformer.transform(body) - } - - def preTransformCaseDefBody(fp:FoldParse, foldSelect: TermName, patSymbol: Symbol, body:c.Tree):c.Tree = - { - val sName = fp.stateVal.name - val tmpName = TermName(c.freshName("tmp")) - val refresh = refreshEffected(fp, foldSelect) - val statements = List( - q"val $tmpName = ${withProjAssignments(fp,patSymbol,body)}", - q"$sName = $tmpName" - ) ++ - refresh ++ List( - q"$sName" - ) - q"{..$statements}" - } - - - def beforeRefresh(foldSelect: TermName):c.Tree = - q"${foldSelect}.beforeRefresh()" - - def refreshEffected(fp:FoldParse, foldSelect:TermName):List[c.Tree] = - { - if (fp.stateSelectRole.active) { - beforeRefresh(foldSelect):: - fp.stateSelectRole.generateRefresh(foldSelect, fp.stateVal.name,-1).get::Nil - }else{ - val r = fp.projections.zipWithIndex.filter(_._1.selectRole.active).flatMap{ case (proj,i) => - proj.selectRole.generateRefresh(foldSelect,fp.stateVal.name,i) - } - if (r.nonEmpty) { - beforeRefresh(foldSelect)::r - }else{ - List() - } - } - } - - def makeEffectedName(sym:Symbol):TermName = - { - TermName(sym.name+"$eff") - } - - def preTransformCaseDef(fp:FoldParse, foldSelect: TermName, cd:CaseDef,stableName:TermName):CaseDef = - { - val patSymbol = cd.pat.symbol - val (pat, guard) = cd.pat match { - case Bind(name,t) => - fp.projections.indexWhere(_.sym.name == name) match { - case -1 => (cd.pat, cd.guard) - case idx => - // TODO: move parsing of rw-select to ASTUtil and - // eliminate code duplication with SelectorBuilder - t match { - case Typed(_,tp:TypeTree) => - val tpoa = if (tp.original.isEmpty) tp else tp.original - val tpo = MacroUtil.skipAnnotation(c)(tpoa) - tpo match { - case Select(ch,TypeName("read")) => - //TODO (low priority): implement shadowing instead abort - c.abort(cd.pat.pos,"Symbol in pattern shadow symbol in state") - case Select(ch,TypeName("write")) => - val newName = TermName(c.freshName("wrt")) - val newPat = atPos(cd.pat.pos)(Bind(newName,t)) - if (!cd.guard.isEmpty) { - c.abort(cd.pos,"guard must be empty"); - } - val sName = fp.stateVal.name.toTermName - val proj = TermName("_"+(idx+1)) - val newGuard = q"${newName} == $sName.$proj" - (newPat,newGuard) - case _ => - //TODO: implement read/write syntax - c.abort(cd.pat.pos,"read/write is required we have "+ - MacroUtil.shortString(c)(t)) - } - case _ => - c.abort(cd.pat.pos,"x:channel.read or x:channel.write form is required") - } - } - case Ident(TermName("_")) => (cd.pat, cd.guard) - case _ => c.abort(cd.pat.pos,"expected Bind or Default in pattern, have:"+cd.pat) - } - //val spat = substProj(fp,stableName,pat,true) - val symName = fp.stateVal.symbol.name.toTermName - atPos(cd.pos)(CaseDef(substProj(fp,stableName,pat,true,false), - substProj(fp,symName,guard,false,false), - preTransformCaseDefBody(fp,foldSelect,patSymbol,cd.body))) - } - - def parseFold[S](op: c.Expr[(S,FoldSelect[S])=>S]): FoldParse = - { - op.tree match { - case Function(List(x,y),Match(choice,cases)) => - val ValDef(_,yName,_,_) = y - if (choice.symbol != y.symbol) { - if (cases.length == 1) { - cases.head match { - case CaseDef(Apply(TypeTree(), - List(Apply(TypeTree(),params),Bind(sel,_))), - guard, - Match(Ident(choice1),cases1)) => - if (sel == choice1) { - val selectSymbols = retrieveSelectChannels(cases1) - FoldParse( - stateVal = x, - stateSelectRole = selectSymbols.getOrElse(x.symbol,SelectRole.NoParticipate), - projections = params map { x=> val sym = x.symbol - FoldParseProjection(sym,selectSymbols.getOrElse(sym,SelectRole.NoParticipate)) - }, - selectValName = sel.toTermName, - selectCases = cases1 - ) - } else { - c.abort(op.tree.pos,"expected shap like {case (a,s) => s match { ... } }") - } - case _ => - c.abort(op.tree.pos,"match agains selector in pf is expected") - } - } else { - c.abort(op.tree.pos,"partial function in fold must have one case") - } - } else { - val selectorName = choice match { - case Ident(sel) => sel - } - if (selectorName == yName) { - val selectSymbols = retrieveSelectChannels(cases) - FoldParse( - stateVal = x, - stateSelectRole = selectSymbols.getOrElse(x.symbol,SelectRole.NoParticipate), - projections = List(), - selectValName = selectorName.toTermName, - selectCases = cases - ) - } else { - c.abort(op.tree.pos,"expected choice over selector in fold") - } - } - // TODO: check that 'choice' == 'y' - case Function(params,something) => - c.abort(op.tree.pos,"match is expected in select.fold, we have: "+MacroUtil.shortString(c)(op.tree)) - case _ => - c.abort(op.tree.pos,"inline function is expected in select.fold, we have: "+MacroUtil.shortString(c)(op.tree)) - } - } - - private def retrieveSelectChannels(cases:List[CaseDef]): Map[c.Symbol,SelectRole] = - { - val s0=Map[c.Symbol,SelectRole]() - cases.foldLeft(s0){ (s,e) => - acceptSelectCaseDefPattern(e, - onRead = { in => s.updated(in.symbol,SelectRole.Read) }, - onWrite = { out => s.updated(out.symbol,SelectRole.Write) }, - onSelectTimeout => s, onIdle => s) - } - } - - //TODO: generalize and merge with parsing in SelectorBuilderImpl - def acceptSelectCaseDefPattern[A](caseDef:CaseDef,onRead: Tree => A, onWrite: Tree => A, - onSelectTimeout: Tree => A, onIdle: Tree => A):A = - { - caseDef.pat match { - case Bind(name,t) => - val termName = name.toTermName - t match { - case Typed(_,tp:TypeTree) => - val tpoa = if (tp.original.isEmpty) tp else tp.original - val tpo = MacroUtil.skipAnnotation(c)(tpoa) - tpo match { - case Select(ch,TypeName("read")) => onRead(ch) - case Select(ch,TypeName("write")) => onWrite(ch) - case Select(select,TypeName("timeout")) => onSelectTimeout(select) - case _ => - if (caseDef.guard.isEmpty) { - c.abort(tp.pos, "row caseDef:"+showRaw(caseDef) ); - c.abort(tp.pos, "match pattern in select without guard must be in form x:channel.write or x:channel.read"); - } else { - parseGuardInSelectorCaseDef(termName, caseDef.guard) match { - case q"scala.async.Async.await[${t}](${readed}.aread):${t1}" => - onRead(readed) - case q"gopher.goasync.AsyncWrapper.await[${t}](${readed}.aread):${t1}" => - onRead(readed) - case q"scala.async.Async.await[${t}](${ch}.awrite($expression)):${t1}" => - onWrite(ch) - case q"gopher.goasync.AsyncWrapper.await[${t}](${ch}.awrite($expression)):${t1}" => - onWrite(ch) - case x@_ => - c.abort(tp.pos, "can't parse match guard: "+x); - } - } - } - case _ => - c.abort(caseDef.pat.pos,"x:channel.read or x:channel.write form is required") - } - case Ident(TermName("_")) => onIdle(caseDef.pat) - case _ => - c.abort(caseDef.pat.pos,"bind in pattern is expected") - } - } - - private def wrapInEffected(foldParse:FoldParse, foldSelect: TermName, wrapped:List[c.Tree]):List[c.Tree] = - { - val stateValName = foldParse.stateVal.name - if (foldParse.stateSelectRole.active) { - genEffectedDef(foldParse.stateVal.symbol, foldSelect, 0 , q"()=>${stateValName}")::wrapped - } else if (foldParse.projections.nonEmpty && foldParse.projections.exists(_.selectRole.active)) { - foldParse.projections.zipWithIndex.filter(_._1.selectRole.active).map{ - case (p,i) => val funName = TermName("_"+(i+1)) - genEffectedDef(p.sym,foldSelect, i, q"()=>${stateValName}.${funName}") - } ++ wrapped - } else - wrapped - } - - private def genEffectedDef(sym:Symbol, foldSelect: TermName, index: Int, expr:c.Tree):c.Tree = - { - val constructorName = if (sym.typeSignature <:< c.weakTypeOf[Channel[_]]) { - "FoldSelectorEffectedChannel" - } else if (sym.typeSignature <:< c.weakTypeOf[Input[_]]) { - "FoldSelectorEffectedInput" - } else if (sym.typeSignature <:< c.weakTypeOf[Output[_]]) { - "FoldSelectorEffectedOutput" - } else { - c.abort(sym.pos,s"$sym expected type must be Channel or Input or Output") - } - val constructor=TermName(constructorName) - val effectedName = makeEffectedName(sym) - q"val $effectedName = gopher.channels.${constructor}(${foldSelect},${index},${expr})" - } - -} - -object FoldSelectorBuilder -{ - - - - - -} - - diff --git a/src/main/scala/gopher/channels/FoldSelectorEffected.scala b/src/main/scala/gopher/channels/FoldSelectorEffected.scala deleted file mode 100644 index b073b7fc..00000000 --- a/src/main/scala/gopher/channels/FoldSelectorEffected.scala +++ /dev/null @@ -1,99 +0,0 @@ -package gopher.channels - - -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ - -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.annotation.unchecked._ -import java.util.function.{BiConsumer => JBiConsumer} - -import scala.collection.mutable -import scala.ref.WeakReference - - -/** - * effected input inside fold. We know, that exists only one call of - * reading(FoldSelectorInput), generated in our fold statement. Than - * - * TODO: eliminate this class, instead refactoer SelectedReaderImpl to customize - * generation of reader statement. - */ -trait FoldSelectorEffectedInput[A,B] extends Input[A] -{ - def current: Input[A] - val foldSelector: FoldSelect[B] - val index: Int - - - def cbread[C](f: ContRead[A,C] => Option[ContRead.In[A] => Future[Continuated[C]]],ft: FlowTermination[C]): Unit = { - //currently will be never called, - current.cbread(f,ft) - } - -} - -object FoldSelectorEffectedInput{ - def apply[A,B](foldSelect: FoldSelect[B], index:Int, chFun: ()=>Input[A]): FoldSelectorEffectedInput[A,B] - = new CFoldSelectorEffectedInput[A,B](foldSelect,index,chFun) -} - -class CFoldSelectorEffectedInput[A,B](val foldSelector:FoldSelect[B], val index:Int, chFun: ()=>Input[A]) extends FoldSelectorEffectedInput[A,B] -{ - val api = chFun().api - def current() = chFun() -} - - -trait FoldSelectorEffectedOutput[A,B] extends Output[A] -{ - - def current: Output[A] - def foldSelector: FoldSelect[B] - def index: Int - - override def cbwrite[C](f: (ContWrite[A, C]) => Option[(A, Future[Continuated[C]])], ft: FlowTermination[C]): Unit = - { - current.cbwrite(f,ft) - } - -} - -object FoldSelectorEffectedOutput -{ - def apply[A,B](foldSelect: FoldSelect[B], index: Int, chFun: ()=>Output[A]):FoldSelectorEffectedOutput[A,B]= - new CFoldSelectorEffectedOutput(foldSelect, index, chFun) -} - -class CFoldSelectorEffectedOutput[A,B](val foldSelector: FoldSelect[B], val index: Int, chFun:()=>Output[A]) extends FoldSelectorEffectedOutput[A,B] -{ - val api = chFun().api - override def current = chFun() -} - -trait FoldSelectorEffectedChannel[A,B] extends FoldSelectorEffectedInput[A,B] with FoldSelectorEffectedOutput[A,B] -{ - - override def current: Channel[A] - - val foldSelector: FoldSelect[B] - val index: Int - - -} - -object FoldSelectorEffectedChannel -{ - def apply[A,B](foldSelect:FoldSelect[B], index:Int, chFun:()=>Channel[A]):FoldSelectorEffectedChannel[A,B]= - new CFoldSelectorEffectedChannel(foldSelect,index,chFun) -} - -class CFoldSelectorEffectedChannel[A,B](val foldSelector: FoldSelect[B], val index:Int,chFun:()=>Channel[A]) extends FoldSelectorEffectedChannel[A,B] -{ - override val api = chFun().api - override def current(): Channel[A] = chFun() -} diff --git a/src/main/scala/gopher/channels/ForeverSelectorBuilder.scala b/src/main/scala/gopher/channels/ForeverSelectorBuilder.scala deleted file mode 100644 index 25be613a..00000000 --- a/src/main/scala/gopher/channels/ForeverSelectorBuilder.scala +++ /dev/null @@ -1,111 +0,0 @@ -package gopher.channels - -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.annotation.unchecked._ - - -/** - * Builder for 'forever' selector. Can be obtained as `gopherApi.select.forever`. - **/ -trait ForeverSelectorBuilder extends SelectorBuilder[Unit] -{ - - - def reading[A](ch: Input[A])(f: A=>Unit): ForeverSelectorBuilder = - macro SelectorBuilder.readingImpl[A,Unit,ForeverSelectorBuilder] - // internal error in compiler when using this.type as S - - - def readingWithFlowTerminationAsync[A](ch: Input[A], f: (ExecutionContext, FlowTermination[Unit], A) => Future[Unit] ): this.type = - { - lazy val cont = ContRead(normalized, ch, selector) - def normalized(_cont:ContRead[A,Unit]):Option[ContRead.In[A]=>Future[Continuated[Unit]]] = - Some(ContRead.liftIn(_cont)(a=>f(ec,selector,a) map Function.const(cont))) - withReader[A](ch, normalized) - } - - def writing[A](ch: Output[A], x: A)(f: A => Unit): ForeverSelectorBuilder = - macro SelectorBuilder.writingImpl[A,Unit,ForeverSelectorBuilder] - - @inline - def writingWithFlowTerminationAsync[A](ch:Output[A], x: =>A, f: (ExecutionContext, FlowTermination[Unit], A) => Future[Unit] ): ForeverSelectorBuilder = - withWriter[A](ch, { cw => Some(x,f(ec,cw.flowTermination, x) map Function.const(cw)) } ) - - def timeout(t:FiniteDuration)(f: FiniteDuration => Unit): ForeverSelectorBuilder = - macro SelectorBuilder.timeoutImpl[Unit,ForeverSelectorBuilder] - - @inline - def timeoutWithFlowTerminationAsync(t:FiniteDuration, - f: (ExecutionContext, FlowTermination[Unit], FiniteDuration) => Future[Unit] ): this.type = - withTimeout(t){ sk => Some(f(ec,sk.flowTermination,t) map Function.const(sk)) } - - - def idle(body:Unit): ForeverSelectorBuilder = - macro SelectorBuilder.idleImpl[Unit,ForeverSelectorBuilder] - - - /** - * provide syntax for running select loop inside go (or async) block - * example of usage: - * - *{{{ - * go { - * ..... - * for(s <- gopherApi.select.forever) - * s match { - * case x: ch1.read => do something with x - * case q: chq.read => implicitly[FlowTermination[Unit]].doExit(()) - * case y: ch2.write if (y=expr) => do something with y - * case _ => do somethig when idle. - * } - *}}} - * - * Note, that you can use implicit instance of [FlowTermination[Unit]] to stop loop. - **/ - def foreach(f:Any=>Unit):Unit = - macro SelectorBuilderImpl.foreach[Unit] - - - - /** - * provide syntax for running select loop as async operation. - * - *{{{ - * val receiver = gopherApi.select.forever{ - * case x: channel.read => Console.println(s"received:\$x") - * } - *}}} - */ - def apply(f: PartialFunction[Any,Unit]): Future[Unit] = - macro SelectorBuilderImpl.apply[Unit] - - - def inputBuilder[B]() = new InputSelectorBuilder[B](api) - - /** - * provide syntax for creating output channels. - *{{{ - * - * val multiplexed = for(s <- gopherApi.select.forever) yield - * s match { - * case x: channelA => s"A:${x}" - * case x: channelB => s"B:${x}" - * } - * - *}}} - **/ - def map[B](f:Any=>B):Input[B] = macro SelectorBuilderImpl.map[B] - - def input[B](f:PartialFunction[Any,B]):Input[B] = - macro SelectorBuilderImpl.input[B] - -} - - - diff --git a/src/main/scala/gopher/channels/FutureInput.scala b/src/main/scala/gopher/channels/FutureInput.scala deleted file mode 100644 index 7816effe..00000000 --- a/src/main/scala/gopher/channels/FutureInput.scala +++ /dev/null @@ -1,65 +0,0 @@ -package gopher.channels - -import scala.concurrent._ -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import scala.util._ -import java.util.concurrent.ConcurrentLinkedQueue -import gopher._ - -/** - * Future[A], represented as input which produce a value when completed, after this - * closes. If evaluation of feature is unsuccessful (i.e. failure), than appropriative - * exception is thrown during reading. - * - * - * Can be obtained from gopherApi. - * - *{{{ - * import gopherApi._ - * - * val myInput = futureInput(future) - * select.forever{ - * case x: myInput.read => Console.println(s"we receive value from future: \${x}") - * implicitly[FlowTermination[Unit]].doExit(()) - * case x: myChannel.read => Console.println(s"value from channel: \${x}") - * } - *}}} - * - * Also it is possiblt to direclty read from future in case guard: - *{{{ - * select.forever{ - * case x: T if (x==future.read) => Console.println(s"we receive value from future: \${x}") - * case x: T if (x==channel.read) => Console.println(s"value from channel: \${x}") - * } - *}}} - * - */ -class FutureInput[A](future: Future[A], override val api: GopherAPI) extends Input[A] -{ - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]], flwt: FlowTermination[B] ): Unit = - { - future.onComplete{ r => - for (f1 <- f(ContRead(f,this,flwt))) { - if (closed) - f1(ContRead.In.channelClosed) - else { - closed = true - r match { - case Success(x) => f1(ContRead.In value x) - case Failure(ex) => f1(ContRead.In failure ex) - } - } - } - }(api.executionContext) - } - - def input: Input[A] = this - - @volatile private[this] var closed: Boolean = false - -} - - diff --git a/src/main/scala/gopher/channels/GopherAPIProvider.scala b/src/main/scala/gopher/channels/GopherAPIProvider.scala deleted file mode 100644 index 2e1d434c..00000000 --- a/src/main/scala/gopher/channels/GopherAPIProvider.scala +++ /dev/null @@ -1,8 +0,0 @@ -package gopher.channels - -import gopher._ - -trait GopherAPIProvider -{ - def api: GopherAPI -} diff --git a/src/main/scala/gopher/channels/GrowingBufferedChannel.scala b/src/main/scala/gopher/channels/GrowingBufferedChannel.scala deleted file mode 100644 index 4155e115..00000000 --- a/src/main/scala/gopher/channels/GrowingBufferedChannel.scala +++ /dev/null @@ -1,79 +0,0 @@ -package gopher.channels - -import akka.actor._ -import scala.language._ -import scala.concurrent._ -import scala.collection.immutable._ -import gopher._ - -class ChannelOverflowException extends RuntimeException - -/** - * ChannelActor - actor, which leave - */ -class GrowingBufferedChannelActor[A](id:Long, limit:Int, api: GopherAPI) extends BaseBufferedChannelActor[A](id,api) -{ - - - protected[this] def onContWrite(cwa: gopher.channels.ContWrite[A, _]): Unit = - { - if (closed) { - cwa.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - } else { - val prevNElements = nElements - if (processWriter(cwa) && prevNElements==0) { - processReaders() - } - } - } - - protected[this] def onContRead(cra: gopher.channels.ContRead[A, _]): Unit = - { - if (nElements==0) { - if (closed) { - processReaderClosed(cra) - } else { - readers = readers :+ cra - } - } else { - val prevNElements = nElements - if (processReader(cra)) { - if (closed) { - stopIfEmpty - } - } - } - } - - - protected[this] def processReader[B](reader:ContRead[A,B]): Boolean = - reader.function(reader) match { - case Some(f1) => - val readedElement = buffer.head.asInstanceOf[A] - buffer = buffer.tail - nElements-=1 - Future{ - val cont = f1(ContRead.In value readedElement ) - api.continue(cont, reader.flowTermination) - }(api.executionContext) - true - case None => - false - } - - - private[this] def processWriter[B](writer:ContWrite[A,B]): Boolean = - writer.function(writer) match { - case Some((a,cont)) => - nElements+=1 - buffer = buffer :+ a - api.continue(cont, writer.flowTermination) - true - case None => - false - } - - - var buffer= Queue[Any]() - -} diff --git a/src/main/scala/gopher/channels/Input.scala b/src/main/scala/gopher/channels/Input.scala deleted file mode 100644 index 86d27202..00000000 --- a/src/main/scala/gopher/channels/Input.scala +++ /dev/null @@ -1,508 +0,0 @@ -package gopher.channels - -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.language.experimental.macros -import scala.language.reflectiveCalls -import scala.reflect.macros.blackbox.Context -import gopher._ -import gopher.util._ -import java.util.concurrent.atomic._ - -import gopher.channels.ContRead.In - -/** - * Entity, from which we can read objects of type A. - * - * - */ -trait Input[A] extends GopherAPIProvider -{ - - thisInput => - - type <~ = A - type read = A - - case class Read(value:A) - - // TODO: use closed in selector. - - /** - * apply f, when input will be ready and send result to API processor - */ - def cbread[B](f: - ContRead[A,B]=>Option[ - ContRead.In[A]=>Future[Continuated[B]] - ], - ft: FlowTermination[B]): Unit - - - /** - * async version of read. Immediatly return future, which will contains result of read or failur with StreamClosedException - * in case of stream is closed. - */ - def aread:Future[A] = { - val ft = PromiseFlowTermination[A]() - cbread[A](cont => Some(ContRead.liftIn(cont) { - a => Future.successful(Done(a,ft)) - }), ft) - ft.future - } - - /** - * instance of gopher API - */ - def api: GopherAPI - - /** - * read object from channel. Must be situated inside async/go/action block. - */ - def read:A = macro InputMacro.read[A] - - /** - * synonym for read. - */ - def ? : A = macro InputMacro.read[A] - - /** - * return feature which contains sequence from first `n` elements. - */ - def atake(n:Int):Future[IndexedSeq[A]] = - { - if (n==0) { - Future successful IndexedSeq() - } else { - val ft = PromiseFlowTermination[IndexedSeq[A]] - @volatile var i = 0; - @volatile var r: IndexedSeq[A] = IndexedSeq() - def takeFun(cont:ContRead[A,IndexedSeq[A]]):Option[ContRead.In[A]=>Future[Continuated[IndexedSeq[A]]]] = - Some{ - ContRead.liftIn(cont) { a => - i += 1 - r = r :+ a - if (i f each time when new object is arrived. Ended when input closes. - * - * must be inside go/async/action block. - */ - def foreach(f: A=>Unit): Unit = macro InputMacro.foreachImpl[A] - - def aforeach(f: A=>Unit): Future[Unit] = macro InputMacro.aforeachImpl[A] - - class Filtered(p: A=>Boolean) extends Input[A] { - - def cbread[B](f:ContRead[A,B]=>Option[ContRead.In[A]=>Future[Continuated[B]]], ft: FlowTermination[B]): Unit = - thisInput.cbread[B]({ cont => - f(cont) map { f1 => - { case v@ContRead.Value(a) => - if (p(a)) { - f1(v) - } else { - f1(ContRead.Skip) - Future successful cont - } - case v@_ => f1(v) - } - } }, ft) - - def api = thisInput.api - - } - - def filter(p: A=>Boolean): Input[A] = new Filtered(p) - - def withFilter(p: A=>Boolean): Input[A] = filter(p) - - def map[B](g: A=>B): Input[B] = - new Input[B] { - - def cbread[C](f: ContRead[B,C] => Option[ContRead.In[B]=>Future[Continuated[C]]], ft: FlowTermination[C] ): Unit = - { - def mf(cont:ContRead[A,C]):Option[ContRead.In[A]=>Future[Continuated[C]]] = - { val contA = ContRead(f,this,cont.flowTermination) - f(contA) map (f1 => { case v@ContRead.Value(a) => f1(ContRead.Value(g(a))) - case ContRead.Skip => f1(ContRead.Skip) - Future successful cont - case ContRead.ChannelClosed => f1(ContRead.ChannelClosed) - case ContRead.Failure(ex) => f1(ContRead.Failure(ex)) - } ) - } - thisInput.cbread(mf,ft) - } - - def api = thisInput.api - - } - - - def zip[B](x: Iterable[B]): Input[(A,B)] = zip(Input.asInput(x,api)) - - def zip[B](x: Input[B]): Input[(A,B)] = new ZippedInput(api,this,x) - - def flatMapOp[B](g: A=>Input[B])(op:(Input[B],Input[B])=>Input[B]):Input[B] = new Input[B] { - - def cbread[C](f: ContRead[B,C] => Option[ContRead.In[B]=>Future[Continuated[C]]], ft: FlowTermination[C] ): Unit = - { - def mf(cont:ContRead[A,C]):Option[ContRead.In[A]=>Future[Continuated[C]]] = - { val contA = ContRead(f,this,cont.flowTermination) - f(contA) map { f1 => { - case v@ContRead.Value(a) => Future successful ContRead(f,op(g(a),this),cont.flowTermination) - case ContRead.Skip => f1(ContRead.Skip) - Future successful cont - case ContRead.ChannelClosed => f1(ContRead.ChannelClosed) - case ContRead.Failure(ex) => f1(ContRead.Failure(ex)) - }}} - thisInput.cbread(mf,ft) - } - - def api = thisInput.api - } - - def flatMap[B](g: A=>Input[B]):Input[B] = flatMapOp(g)( _ or _) - - def seq = new { - def flatMap[B](g: A=>Input[B]):Input[B] = flatMapOp(g)( _ append _ ) - } - - /** - * return input merged with 'other'. - * (i.e. non-determenistics choice) - **/ - def |(other:Input[A]):Input[A] = new OrInput(this,other) - - /** - * synonim for non-deteremenistics choice. - **/ - def or(other:Input[A]):Input[A] = new OrInput(this,other) - - /** - * when the first channel is exhaused, read from second. - **/ - def append(other:Input[A]):Input[A] = new Input[A] { - - def cbread[C](f: ContRead[A,C] => Option[ContRead.In[A]=>Future[Continuated[C]]], ft: FlowTermination[C] ): Unit = - { - def mf(cont:ContRead[A,C]):Option[ContRead.In[A]=>Future[Continuated[C]]] = - { val contA = ContRead(f,this,cont.flowTermination) - f(contA) map (f1 => { case v@ContRead.Value(a) => f1(ContRead.Value(a)) - case ContRead.Skip => f1(ContRead.Skip) - Future successful cont - case ContRead.ChannelClosed => f1(ContRead.Skip) - Future successful ContRead(f,other,cont.flowTermination) - case ContRead.Failure(ex) => f1(ContRead.Failure(ex)) - }) - } - thisInput.cbread(mf,ft) - } - - def api = thisInput.api - - } - - def prepend(a:A):Input[A] = new Input[A] { - - val aReaded = new AtomicBoolean(false) - - def cbread[C](f: ContRead[A,C] => Option[ContRead.In[A]=>Future[Continuated[C]]], ft: FlowTermination[C] ): Unit = - { - f(ContRead(f,this,ft)) map { f1 => - if (aReaded.compareAndSet(false,true)) { - f1(ContRead.Value(a)) - } else { - api.continuatedProcessorRef ! ContRead(f,thisInput,ft) - f1(ContRead.Skip) - } - } - } - - def api = thisInput.api - - } - - - /** - * return pair of inputs `(ready, timeouts)`, such that when you read from `ready` you receive element from `this` - * and if during reading you wait more than specified `timeout`, than timeout message is appear in `timeouts` - * - *``` - * val (inReady, inTimeouts) = in withInputTimeouts (10 seconds) - * select.forever { - * case x: inReady.read => Console.println(s"received value \${value}") - * case x: inTimeouts.read => Console.println(s"timeout occured") - * } - *``` - **/ - def withInputTimeouts(timeout: FiniteDuration): (Input[A],Input[FiniteDuration]) = - new InputWithTimeouts(this,timeout).pair - - /** - * duplicate input - */ - def dup(): (Input[A],Input[A]) = - (new DuppedInput(this)).pair - - def async = new { - - def foreach(f: A=> Unit):Future[Unit] = macro InputMacro.aforeachImpl[A] - - @inline - def foreachSync(f: A=>Unit): Future[Unit] = thisInput.foreachSync(f) - - @inline - def foreachAsync(f: A=>Future[Unit])(implicit ec:ExecutionContext): Future[Unit] = - thisInput.foreachAsync(f)(ec) - - } - - def foreachSync(f: A=>Unit): Future[Unit] = - { - val ft = PromiseFlowTermination[Unit] - lazy val contForeach = ContRead(applyF,this,ft) - def applyF(cont:ContRead[A,Unit]):Option[ContRead.In[A]=>Future[Continuated[Unit]]] = - Some( (in:ContRead.In[A]) => - in match { - case ContRead.ChannelClosed => Future successful Done((),ft) - case x => ContRead.liftIn(cont){ x => f(x) - Future successful contForeach - }(x) - } - ) - cbread(applyF, ft) - ft.future - } - - def foreachAsync(f: A=>Future[Unit])(implicit ec:ExecutionContext): Future[Unit] = - { - val ft = PromiseFlowTermination[Unit] - def applyF(cont:ContRead[A,Unit]):Option[ContRead.In[A]=>Future[Continuated[Unit]]] = - Some{ - case ContRead.ChannelClosed => Future successful Done((),ft) - case in => - ContRead.liftIn(cont){ x => f(x) map ( _ => ContRead(applyF, this, ft) ) }(in) - } - cbread(applyF,ft) - ft.future - } - - def flatFold(fun:(Input[A],A)=>Input[A]):Input[A] = new Input[A] { - - val current = new AtomicReference[Input[A]](thisInput) - - def cbread[C](f: ContRead[A,C] => Option[ContRead.In[A]=>Future[Continuated[C]]], ft: FlowTermination[C] ): Unit = - { - def mf(cont:ContRead[A,C]):Option[ContRead.In[A]=>Future[Continuated[C]]] = - f(ContRead(f,this,ft)) map { next => - { case ContRead.Value(a) => - var changed = false - while(!changed) { - var prev = current.get - var next = fun(prev,a) - changed = current.compareAndSet(prev,next) - } - next(ContRead.Value(a)) - // fp-version. - // next(ContRead.Skip) - //ContRead(f, one(a) append (fun(this,a) flatFold fun),ft) - case v@_ => next(v) - } } - current.get.cbread(mf,ft) - } - - def api = thisInput.api - - } - - /** - * async incarnation of fold. Fold return future, which successed when channel is closed. - *Operations withing fold applyed on result on each other, starting with s0. - *``` - * val fsum = ch.afold(0){ (s, n) => s+n } - *``` - * Here in fsum will be future with value: sum of all elements in channel until one has been closed. - **/ - def afold[S,B](s0:S)(f:(S,A)=>S): Future[S] = macro InputMacro.afoldImpl[A,S] - - /** - * fold opeations, available inside async bloc. - *``` - * go { - * val sum = ch.fold(0){ (s,n) => s+n } - * } - *``` - */ - def fold[S,B](s0:S)(f:(S,A)=>S): S = macro InputMacro.foldImpl[A,S] - - - - def afoldSync[S,B](s0:S)(f:(S,A)=>S): Future[S] = - { - val ft = PromiseFlowTermination[S] - var s = s0 - def applyF(cont:ContRead[A,S]):Option[ContRead.In[A]=>Future[Continuated[S]]] = - { - val contFold = ContRead(applyF,this,ft) - Some{ - case ContRead.ChannelClosed => Future successful Done(s,ft) - case ContRead.Value(a) => s = f(s,a) - Future successful contFold - case ContRead.Skip => Future successful contFold - case ContRead.Failure(ex) => Future failed ex - } - } - cbread(applyF,ft) - ft.future - } - - def afoldAsync[S,B](s0:S)(f:(S,A)=>Future[S])(implicit ec:ExecutionContext): Future[S] = - { - val ft = PromiseFlowTermination[S] - var s = s0 - def applyF(cont:ContRead[A,S]):Option[ContRead.In[A]=>Future[Continuated[S]]] = - { - Some{ - case ContRead.ChannelClosed => Future successful Done(s,ft) - case ContRead.Value(a) => f(s,a) map { x => - s = x - ContRead(applyF,this,ft) - } - case ContRead.Skip => Future successful ContRead(applyF,this,ft) - case ContRead.Failure(ex) => Future failed ex - } - } - cbread(applyF,ft) - ft.future - } - -} - -object Input -{ - def asInput[A](iterable:Iterable[A], api: GopherAPI): Input[A] = new IterableInput(iterable.iterator, api) - - class IterableInput[A](it: Iterator[A], override val api: GopherAPI) extends Input[A] - { - - def cbread[B](f:ContRead[A,B]=>Option[ContRead.In[A]=>Future[Continuated[B]]], ft: FlowTermination[B]): Unit = - f(ContRead(f,this,ft)) map (f1 => { val next = this.synchronized { - if (it.hasNext) - ContRead.Value(it.next) - else - ContRead.ChannelClosed - } - api.continue(f1(next),ft) - } - ) - } - - def closed[A](implicit gopherApi: GopherAPI): Input[A] = new Input[A] { - - def cbread[B](f:ContRead[A,B]=>Option[ContRead.In[A]=>Future[Continuated[B]]], ft: FlowTermination[B]): Unit = - f(ContRead(f,this,ft)) map (f1 => f1(ContRead.ChannelClosed)) - - def api = gopherApi - } - - def one[A](a:A)(implicit gopherApi: GopherAPI): Input[A] = new Input[A] { - - val readed: AtomicBoolean = new AtomicBoolean(false) - - def cbread[B](f:ContRead[A,B]=>Option[ContRead.In[A]=>Future[Continuated[B]]], ft: FlowTermination[B]): Unit = - f(ContRead(f,this,ft)) map (f1 => f1( - if (readed.compareAndSet(false,true)) { - ContRead.Value(a) - }else{ - ContRead.ChannelClosed - })) - - def api = gopherApi - } - - - def zero[A](implicit gopherAPI: GopherAPI):Input[A] = new Input[A] { - - /** - * will eat f without a trace (i.e. f will be never called) - */ - override def cbread[B](f: (ContRead[A, B]) => Option[(In[A]) => Future[Continuated[B]]], ft: FlowTermination[B]): Unit = {} - - /** - * instance of gopher API - */ - override def api: GopherAPI = gopherAPI - } - -} - - - -object InputMacro -{ - - def read[A](c:Context):c.Expr[A] = - { - import c.universe._ - c.Expr[A](q"{scala.async.Async.await(${c.prefix}.aread)}") - } - - def foreachImpl[A](c:Context)(f:c.Expr[A=>Unit]): c.Expr[Unit] = - { - import c.universe._ - c.Expr[Unit](q"scala.async.Async.await(${aforeachImpl(c)(f)})") - } - - - def aforeachImpl[A](c:Context)(f:c.Expr[A=>Unit]): c.Expr[Future[Unit]] = - { - import c.universe._ - f.tree match { - case Function(valdefs,body) => - if (MacroUtil.hasAwait(c)(body)) { - // TODO: add support for flow-termination (?) - val nbody = q"scala.async.Async.async(${body})" - val nfunction = atPos(f.tree.pos)(Function(valdefs,nbody)) - val ntree = q"${c.prefix}.foreachAsync(${nfunction})" - c.Expr[Future[Unit]](c.untypecheck(ntree)) - } else { - c.Expr[Future[Unit]](q"${c.prefix}.foreachSync(${f.tree})") - } - case _ => c.abort(c.enclosingPosition,"function expected") - } - } - - def foldImpl[A,S](c:Context)(s0:c.Expr[S])(f:c.Expr[(S,A)=>S]): c.Expr[S] = - { - import c.universe._ - c.Expr[S](q"scala.async.Async.await(${afoldImpl(c)(s0)(f)})") - } - - def afoldImpl[A,S](c:Context)(s0:c.Expr[S])(f:c.Expr[(S,A)=>S]): c.Expr[Future[S]] = - { - import c.universe._ - f.tree match { - case Function(valdefs,body) => - if (MacroUtil.hasAwait(c)(body)) { - val nbody = atPos(body.pos)(q"scala.async.Async.async(${body})") - val nfunction = atPos(f.tree.pos)(Function(valdefs,nbody)) - val ntree = q"${c.prefix}.afoldAsync(${s0.tree})(${nfunction})" - c.Expr[Future[S]](c.untypecheck(ntree)) - } else { - c.Expr[Future[S]](q"${c.prefix}.afoldSync(${s0.tree})(${f.tree})") - } - } - } - - -} diff --git a/src/main/scala/gopher/channels/InputOutput.scala b/src/main/scala/gopher/channels/InputOutput.scala deleted file mode 100644 index 69849fda..00000000 --- a/src/main/scala/gopher/channels/InputOutput.scala +++ /dev/null @@ -1,5 +0,0 @@ -package gopher.channels - -import gopher._ - -trait InputOutput[A] extends Input[A] with Output[A] diff --git a/src/main/scala/gopher/channels/InputSelectorBuilder.scala b/src/main/scala/gopher/channels/InputSelectorBuilder.scala deleted file mode 100644 index 33f81143..00000000 --- a/src/main/scala/gopher/channels/InputSelectorBuilder.scala +++ /dev/null @@ -1,98 +0,0 @@ -package gopher.channels - -import scala.language.experimental.macros -import scala.reflect.macros.whitebox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.annotation.unchecked._ - - -/** - * Builder for 'input' selector. Can be obtained as `gopherApi.select.input`. - * or map of forever selector. - * - * - */ -class InputSelectorBuilder[T](override val api: GopherAPI) extends SelectorBuilder[T@uncheckedVariance] - with Input[T] -{ - - val proxy = api.makeChannel[T]() - - def reading[A](ch: Input[A])(f: A=>T): InputSelectorBuilder[T] = - macro SelectorBuilder.readingImpl[A,T,InputSelectorBuilder[T]] - - @inline - def readingWithFlowTerminationAsync[A](ch: Input[A], f: (ExecutionContext, FlowTermination[T], A) => Future[T] ): InputSelectorBuilder[T] = - { - def normalized(_cont:ContRead[A,T]):Option[ContRead.In[A]=>Future[Continuated[T]]] = - Some(ContRead.liftIn(_cont)(a=>f(ec,selector,a) flatMap { - proxy.awrite(_) - } map Function.const(ContRead(normalized,ch,selector)))) - withReader[A](ch,normalized) - } - - /** - * write x to channel if possible - */ - def writing[A](ch: Output[A], x: A)(f: A=>T): InputSelectorBuilder[T] = - macro SelectorBuilder.writingImpl[A,T,InputSelectorBuilder[T]] - - @inline - def writingWithFlowTerminationAsync[A](ch:Output[A], x: =>A, f: (ExecutionContext, FlowTermination[T], A) => Future[T] ): this.type = - withWriter[A](ch, { cw => Some(x,f(ec,cw.flowTermination,x) flatMap { - x=>proxy.awrite(x) - } map Function.const(cw)) }) - -/* - def idle(body: T): InputSelectorBuilder[T] = - macro SelectorBuilder.idleImpl[T,InputSelectorBuilder[T]] - - @inline - def idleWithFlowTerminationAsync(f: (ExecutionContext, FlowTermination[T]) => Future[T] ): this.type = - withIdle{ sk => Some(f(ec,sk.flowTermination) flatMap(x => - proxy.awrite(x)) map(Function.const(sk)) ) } -*/ - - def timeout(t:FiniteDuration)(f: FiniteDuration => T): InputSelectorBuilder[T] = - macro SelectorBuilder.timeoutImpl[T,InputSelectorBuilder[T]] - - @inline - def timeoutWithFlowTerminationAsync(t:FiniteDuration, - f: (ExecutionContext, FlowTermination[T], FiniteDuration) => Future[T] ): this.type = - withTimeout(t){ sk => Some(f(ec,sk.flowTermination,t) flatMap( x => - proxy.awrite(x)) map(Function.const(sk)) ) } - - - def foreach(f:Any=>T):T = - macro SelectorBuilderImpl.foreach[T] - - def apply(f: PartialFunction[Any,T]): Future[T] = - macro SelectorBuilderImpl.apply[T] - - // input methods - def cbread[B](f: - ContRead[T,B]=>Option[ - ContRead.In[T]=>Future[Continuated[B]] - ], - ft: FlowTermination[B]): Unit = proxy.cbread(f,ft) - - def started: InputSelectorBuilder[T] = { go; this } - - // - override val selector = new Selector[T](api) { - override def doExit(a: T): T = - { - proxy.awrite(a) onComplete { - _ => proxy.close() - } - super.doExit(a) - } - } - -} - - diff --git a/src/main/scala/gopher/channels/InputWithTimeouts.scala b/src/main/scala/gopher/channels/InputWithTimeouts.scala deleted file mode 100644 index bcbd7769..00000000 --- a/src/main/scala/gopher/channels/InputWithTimeouts.scala +++ /dev/null @@ -1,52 +0,0 @@ -package gopher.channels - -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.util._ -import gopher._ - - -/** - * Wrap `origin` input into input, which produce 'timeout' value into `timeouts` channel - * when reading from wrapped channel take more time than `timeout` . - * - *@see InputChannel.withInputTimeouts - */ -class InputWithTimeouts[A](origin: Input[A], timeout: FiniteDuration) -{ - - def pair: (Input[A],Input[FiniteDuration]) = (wrapped, timeouts) - - val timeouts = origin.api.makeChannel[FiniteDuration]() - - val wrapped = new Input[A] { - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A]=>Future[Continuated[B]]], ft: FlowTermination[B]): Unit = - { - val c = api.actorSystem.scheduler.scheduleOnce(timeout){ - timeouts.awrite(timeout) - }(api.executionContext) - def fIn(cont: ContRead[A,B]):Option[ContRead.In[A]=>Future[Continuated[B]]] = - { - f(ContRead(f,this,ft)) map { f1 => - c.cancel() - in => in match { - case ContRead.Skip => Future successful ContRead(f,this,ft) - case ContRead.ChannelClosed => - timeouts.close() - f1(ContRead.ChannelClosed) - case x@_ => f1(x) - } - } - } - - origin.cbread(fIn,ft) - } - - def api = origin.api - - } - - -} - diff --git a/src/main/scala/gopher/channels/LazyChannel.scala b/src/main/scala/gopher/channels/LazyChannel.scala deleted file mode 100644 index 76100ff8..00000000 --- a/src/main/scala/gopher/channels/LazyChannel.scala +++ /dev/null @@ -1,24 +0,0 @@ -package gopher.channels - - -import scala.concurrent._ -import scala.concurrent.duration._ -import gopher._ - -/** - * lazy channel, which created during first input/output operations. - * (used in transputers as default value for In/Out Ports) - */ -class LazyChannel[A](override val api: GopherAPI) extends Input[A] with Output[A] -{ - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]], flwt: FlowTermination[B] ): Unit = - origin.cbread(f,flwt) - - def cbwrite[B](f: ContWrite[A,B] => Option[(A,Future[Continuated[B]])], flwt: FlowTermination[B] ): Unit = - origin.cbwrite(f,flwt) - - lazy val origin = api.makeChannel[A]() - -} - diff --git a/src/main/scala/gopher/channels/OnceSelectorBuilder.scala b/src/main/scala/gopher/channels/OnceSelectorBuilder.scala deleted file mode 100644 index 40b008ae..00000000 --- a/src/main/scala/gopher/channels/OnceSelectorBuilder.scala +++ /dev/null @@ -1,61 +0,0 @@ -package gopher.channels - -import scala.language.experimental.macros -import scala.reflect.macros.whitebox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.annotation.unchecked._ - - -/** - * Builder for 'once' selector. Can be obtained as `gopherApi.select.once`. - */ -trait OnceSelectorBuilder[T] extends SelectorBuilder[T@uncheckedVariance] -{ - - def reading[A](ch: Input[A])(f: A=>T): OnceSelectorBuilder[T] = - macro SelectorBuilder.readingImpl[A,T,OnceSelectorBuilder[T]] - - @inline - def readingWithFlowTerminationAsync[A](ch: Input[A], f: (ExecutionContext, FlowTermination[T], A) => Future[T] ): OnceSelectorBuilder[T] = - withReader[A](ch, { cr => Some(ContRead.liftIn(cr)(a => - f(ec,cr.flowTermination,a) map ( Done(_,cr.flowTermination)) - ) ) - } ) - - /** - * write x to channel if possible - */ - def writing[A](ch: Output[A], x: A)(f: A=>T): OnceSelectorBuilder[T] = - macro SelectorBuilder.writingImpl[A,T,OnceSelectorBuilder[T]] - - @inline - def writingWithFlowTerminationAsync[A](ch:Output[A], x: =>A, f: (ExecutionContext, FlowTermination[T], A) => Future[T] ): this.type = - withWriter[A](ch, { cw => Some(x,f(ec,cw.flowTermination,x) map(x => Done(x,cw.flowTermination)) ) } ) - - def timeout(t:FiniteDuration)(f: FiniteDuration => T): OnceSelectorBuilder[T] = - macro SelectorBuilder.timeoutImpl[T,OnceSelectorBuilder[T]] - - - @inline - def timeoutWithFlowTerminationAsync(t:FiniteDuration, - f: (ExecutionContext, FlowTermination[T], FiniteDuration) => Future[T] ): this.type = - withTimeout(t){ sk => Some(f(ec,sk.flowTermination,t).map(x => Done(x,sk.flowTermination)) ) } - - - def idle(body: T): OnceSelectorBuilder[T] = - macro SelectorBuilder.idleImpl[T,OnceSelectorBuilder[T]] - - - def foreach(f:Any=>T):T = - macro SelectorBuilderImpl.foreach[T] - - def apply(f: PartialFunction[Any,T]): Future[T] = - macro SelectorBuilderImpl.apply[T] - -} - - diff --git a/src/main/scala/gopher/channels/OneTimeChannel.scala b/src/main/scala/gopher/channels/OneTimeChannel.scala deleted file mode 100644 index 4cc801bc..00000000 --- a/src/main/scala/gopher/channels/OneTimeChannel.scala +++ /dev/null @@ -1,60 +0,0 @@ -package gopher.channels - -import scala.concurrent._ -import gopher._ -import java.util.concurrent.atomic._ - -/** - * channel, in which only one message can be written, - * after which it is automatically closed - * - * Writer is not waiting for reader to start. - */ -class OneTimeChannel[T](override val api:GopherAPI) extends Channel[T] -{ - private[this] val p = Promise[T]() - private[this] val readed = new AtomicBoolean(false) - - def future = p.future - def promise = p - - def cbread[B](f: ContRead[T,B] => Option[ContRead.In[T] => Future[Continuated[B]]],ft: FlowTermination[B]): Unit = - { - p.future.foreach{ a => - f(ContRead(f,this,ft)) foreach { g => - if (readed.compareAndSet(false,true)) { - api.continue(g(ContRead.Value(a)),ft) - } else{ - api.continue(g(ContRead.Skip),ft) - } - } - }(api.executionContext) - } - - def cbwrite[B](f: ContWrite[T,B] => Option[(T, Future[Continuated[B]])],ft: FlowTermination[B]): Unit = - { - if (p.isCompleted) { - ft.doThrow(new ChannelClosedException()) - } else { - f(ContWrite(f,this,ft)) foreach { case (a, next) => - if (!p.trySuccess(a)) { - ft.doThrow(throw new ChannelClosedException()) - } - api.continue(next,ft) - } - } - } - - - def close(): Unit = - p failure new ChannelClosedException() - -} - -object OneTimeChannel -{ - - def apply[A]()(implicit api:GopherAPI): OneTimeChannel[A] = - new OneTimeChannel[A](api) - -} diff --git a/src/main/scala/gopher/channels/OrInput.scala b/src/main/scala/gopher/channels/OrInput.scala deleted file mode 100644 index 481506f6..00000000 --- a/src/main/scala/gopher/channels/OrInput.scala +++ /dev/null @@ -1,62 +0,0 @@ -package gopher.channels - -import scala.annotation._ -import scala.concurrent._ -import scala.util._ -import java.util.concurrent.atomic.AtomicBoolean - -import gopher._ - -/** - * Input, which combine two other inputs. - * - * can be created with '|' operator. - * - * {{{ - * val x = read(x|y) - * }}} - */ -class OrInput[A](x:Input[A],y:Input[A]) extends Input[A] -{ - - - def cbread[B](f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]], flwt: FlowTermination[B] ): Unit = - { - val cBegin = new AtomicBoolean(false) - val cEnd = new AtomicBoolean(false) - - @tailrec - def cf(cont:ContRead[A,B]): Option[ContRead.In[A]=>Future[Continuated[B]]] = - { - if (cBegin.compareAndSet(false,true)) { - f(cont) match { - case sf1@Some(f1) => cEnd.set(true) - sf1 - case None => cBegin.set(false) - None - } - } else if (cEnd.get()) { - None - } else { - // own spin-lock: wait until second instance will completely processed. - // this is near impossible situation: when both inputs and outputs - // raise signal at the same time, A start check aviability of handler, - // B enter the section when aviability checking is not finished. - while(cBegin.get() && !cEnd.get()) { - // TODO: slip tick ? - Thread.`yield`(); - } - if (cEnd.get()) None else cf(cont) - } - } - x.cbread(cf, flwt) - y.cbread(cf, flwt) - } - - // | is left-associative, so (x|y|z|v).api better be v.api, - def api = y.api - - override def toString() = s"(${x}|${y})" - -} - diff --git a/src/main/scala/gopher/channels/Output.scala b/src/main/scala/gopher/channels/Output.scala deleted file mode 100644 index b46120e5..00000000 --- a/src/main/scala/gopher/channels/Output.scala +++ /dev/null @@ -1,144 +0,0 @@ -package gopher.channels - -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.async.Async._ -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ - -/** - * Entity, where we can write objects of type A. - * - */ -trait Output[A] extends GopherAPIProvider -{ - - type ~> = A - type writeExp[X] = A - type write = A - - - /** - * apply f and send result to channels processor. - */ - def cbwrite[B](f: ContWrite[A,B] => Option[ - (A,Future[Continuated[B]]) - ], - ft: FlowTermination[B]): Unit - - def api: GopherAPI - - def awrite(a:A):Future[A] = - { - val ft = PromiseFlowTermination[A]() - cbwrite[A]( cont => { - Some((a,Future.successful(Done(a,ft)))) - }, - ft - ) - ft.future - } - - /** - * 'blocking' write of 'a' to channel. - * Note, that this method can be called only inside - * 'go' or 'async' blocks. - **/ - def write(a:A):A = macro Output.writeImpl[A] - - /** - * shortcut for blocking write. - */ - def <~ (a:A):Output[A] = macro Output.writeWithBuilderImpl[A] - - /** - * shortcut for blocking write. - */ - def !(a:A):Unit = macro Output.writeImpl[A] - - - def awriteAll[C <: Iterable[A]](c:C):Future[Unit] = - { - if (c.isEmpty) { - Future successful (()) - } else { - val ft = PromiseFlowTermination[Unit] - val it = c.iterator - def f(cont:ContWrite[A,Unit]):Option[(A,Future[Continuated[Unit]])]= - { - val n = it.next() - if (it.hasNext) { - Some((n,Future successful ContWrite(f,this,ft))) - } else { - Some((n, Future successful Done((), ft) )) - } - } - cbwrite(f,ft) - ft.future - } - } - - def writeAll[C <: Iterable[A]](it:C):Unit = macro Output.writeAllImpl[A,C] - - def unfold[S](s:S)(f:S=>(S,A)):Unit = - { - var ca=f(s) - val fs = api.select.forever - fs.writingWithFlowTerminationAsync[A](this,ca._2,(ec,ft,a) => Future successful {ca=f(ca._1)} ) - fs.go - } - - /** - *provide pair from Output and Input `(ready, timeouts)` such that writing to `ready` - * will case writing to `output` and if it was not completed during ``timeout` than - * appropriative duration will be availabe in `timeouts` input. - * - *``` - *val (chReady, chTimeouts) = ch withOutputTimeouts (5 seconds) - *select.forever { - * case x: chReady.write if (x==somethingToWrite) => - * Console.println(s" \${x} send") - * case t: chTimeouts.read => - * Console.println(s"timeout during writing") - *} - *``` - **/ - def withOutputTimeouts(timeout: FiniteDuration): (Output[A],Input[FiniteDuration]) = - new OutputWithTimeouts(this, timeout).pair - -} - -object Output -{ - - def writeImpl[A](c:Context)(a:c.Expr[A]):c.Expr[A] = - { - import c.universe._ - c.Expr[A](q"scala.async.Async.await(${c.prefix}.awrite(${a}))") - } - - def writeAllImpl[A,C](c:Context)(it:c.Expr[C]):c.Expr[Unit] = - { - import c.universe._ - c.Expr[Unit](q"scala.async.Async.await(${c.prefix}.writeAll(${it}))") - } - - - def writeWithBuilderImpl[A](c:Context)(a:c.Expr[A]):c.Expr[Output[A]] = - { - import c.universe._ - val retval = c.Expr[Output[A]]( - q"""{ - val prefix = ${c.prefix} - scala.async.Async.await{prefix.awrite(${a});{}} - prefix - } - """ - ) - retval - } - - -} diff --git a/src/main/scala/gopher/channels/OutputWithTimeouts.scala b/src/main/scala/gopher/channels/OutputWithTimeouts.scala deleted file mode 100644 index fa587711..00000000 --- a/src/main/scala/gopher/channels/OutputWithTimeouts.scala +++ /dev/null @@ -1,54 +0,0 @@ -package gopher.channels - -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.async.Async._ -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ - -/** - * - * - */ -class OutputWithTimeouts[A](origin: Output[A], timeout: FiniteDuration) -{ - - - def pair:(Output[A],Input[FiniteDuration]) = (wrapped, timeouts) - - val timeouts: Channel[FiniteDuration] = api.makeChannel[FiniteDuration]() - - val wrapped: Output[A] = new Output[A] { - - def cbwrite[B](f: ContWrite[A,B] => Option[(A,Future[Continuated[B]])], - ft: FlowTermination[B]): Unit = - { - val c = api.actorSystem.scheduler.scheduleOnce(timeout){ - timeouts.awrite(timeout) - }(api.executionContext) - def fIn(cont: ContWrite[A,B]):Option[(A,Future[Continuated[B]])] = - { - try { - f(ContWrite(f,this,ft)) map { case (a,next) => - c.cancel() - (a,next) - } - } catch { - case ex:ChannelClosedException => timeouts.close() - throw ex - } - } - - origin.cbwrite(fIn,ft) - } - - def api = origin.api - - } - - def api = origin.api - -} - diff --git a/src/main/scala/gopher/channels/PromiseFlowTermination.scala b/src/main/scala/gopher/channels/PromiseFlowTermination.scala deleted file mode 100644 index a50cb949..00000000 --- a/src/main/scala/gopher/channels/PromiseFlowTermination.scala +++ /dev/null @@ -1,50 +0,0 @@ -package gopher.channels - -import scala.language.postfixOps -import scala.concurrent._ -import scala.util._ -import gopher._ - -trait PromiseFlowTermination[A] extends FlowTermination[A] -{ - - def doThrow(e: Throwable): Unit = - { - if (isCompleted) { - import ExecutionContext.Implicits.global - p.future.onComplete{ - case Success(x) => - // success was before throw, ignoring. - case Failure(prevEx) => - //prevEx.printStackTrace(); - } - } else { - p failure e - } - } - - def doExit(a: A): A = - { - p trySuccess a - a - } - - def future = - p future - - def isCompleted = p.isCompleted - - def throwIfNotCompleted(ex: Throwable):Unit = - p.tryFailure(ex) - - def completeWith(other: Future[A]): Unit = - p.completeWith(other) - - private[this] val p = Promise[A]() - -} - -object PromiseFlowTermination -{ - def apply[A]() = new PromiseFlowTermination[A]() {} -} diff --git a/src/main/scala/gopher/channels/Selector.scala b/src/main/scala/gopher/channels/Selector.scala deleted file mode 100644 index b5f8d332..00000000 --- a/src/main/scala/gopher/channels/Selector.scala +++ /dev/null @@ -1,267 +0,0 @@ -package gopher.channels - -import gopher._ -import akka.actor._ -import akka.pattern._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.language.postfixOps -import scala.util._ -import java.util.concurrent.atomic.AtomicBoolean -import java.util.concurrent.atomic.AtomicLong -import java.util.concurrent.ConcurrentLinkedQueue - - -class Selector[A](api: GopherAPI) extends PromiseFlowTermination[A] -{ - - thisSelector => - - def addReader[E](ch:Input[E],f: ContRead[E,A] => Option[ContRead.In[E]=>Future[Continuated[A]]]): Unit = - { - waiters add makeLocked(ContRead(f, ch, this)) - } - - def addWriter[E](ch:Output[E], f: ContWrite[E,A] => Option[(E,Future[Continuated[A]])]): Unit = - { - waiters add makeLocked(ContWrite(f,ch,this)) - } - - def addTimeout(timeout:FiniteDuration, f: Skip[A] => Option[Future[Continuated[A]]]):Unit = - { - if (!timeoutRecord.isDefined) { - timeoutRecord.lastNOperations = nOperations.get - timeoutRecord.timeout = timeout - timeoutRecord.waiter = makeLocked(Skip(f,this)) - } else { - throw new IllegalStateException("select must have only one timeout entry") - } - } - - def run:Future[A] = - { - sendWaits() - if (timeoutRecord.isDefined) { - scheduleTimeout() - } - future - } - - - private[channels] def lockedRead[E](f:ContRead.AuxF[E,A],ch:Input[E],ft:FlowTermination[A]):ContRead.AuxF[E,A]= - { - def f1(cont:ContRead[E,A]): Option[ContRead.In[E]=>Future[Continuated[A]]] = - tryLocked(f(ContRead(f,ch,ft)),cont,"read") map { q => - in => unlockAfter( - try { - q(in) - } catch { - case e: Throwable => ft.doThrow(e) - Future successful Never - }, - ft,"read") - } - f1 - } - - private[channels] def lockedWrite[E](f:ContWrite.AuxF[E,A],ch:Output[E],ft:FlowTermination[A]):ContWrite.AuxF[E,A]= - { (cont) => - tryLocked(f(ContWrite(f,ch,ft)), cont, "write") map { - case (a,future) => - (a,unlockAfter(future, ft ,"write")) - } - } - - private[channels] def lockedSkip(f:Skip.AuxF[A],ft:FlowTermination[A]):Skip.AuxF[A]= - { cont => - tryLocked(f(Skip(f,ft)), cont , "skip") map { - unlockAfter(_,ft,"skip") - } - } - - private[channels] def makeLocked(block: Continuated[A]): Continuated[A] = - { - block match { - case cr@ContRead( f, ch, ft) => ContRead(lockedRead(f,ch,ft),ch,ft) - case cw@ContWrite(f, ch, ft) => ContWrite(lockedWrite(f,ch,ft),ch,ft) - case sk@Skip(f,ft) => Skip(lockedSkip(f,ft),ft) - case dn@Done(f,ft) => dn - case Never => Never - } - } - - - @inline - private[this] def tryLocked[X](body: => Option[X], cont: FlowContinuated[A], dstr: String):Option[X] = - if (tryLock()) { - try { - body match { - case None => mustUnlock(dstr,cont.flowTermination) - waiters add cont - None - case sx@Some(x) => - nOperations.incrementAndGet() - sx - } - }catch{ - case ex: Throwable => - unlock(dstr) - cont.flowTermination.doThrow(ex) - None - } - } else { - toWaiters(cont) - None - } - - - @inline - private[channels] def unlockAfter(f:Future[Continuated[A]], flowTermination: FlowTermination[A], dstr: String): Future[Continuated[A]] = - f.transform( - next => { - if (mustUnlock(dstr,flowTermination)) { - if (timeoutRecord.isDefined) - scheduleTimeout() - makeLocked(next) - } else Never - }, - ex => { mustUnlock( dstr, flowTermination); ex } - ) - - private[this] def toWaiters(cont:Continuated[A]):Unit= - { - waiters.add(cont) - if (!lockFlag.get()) { - // possible, when we call waiters.put locked, but then in other thread it was - // unlocked and queue cleaned before waiters modify one. - sendWaits() - } - } - - - private[this] def scheduleTimeout():Unit = - { - - def tickOperation():Unit = - { - if (!isCompleted && timeoutRecord.isDefined) { - val currentNOperations = nOperations.get() - if (currentNOperations == timeoutRecord.lastNOperations) { - // fire - timeoutRecord.waiter match { - // TODO: add timeout field to skip - case sk@Skip(f,ft) => f(sk) foreach { futureNext => - futureNext.onComplete { - case Success(next) => if (!isCompleted) { - next match { - case sk@Skip(f,ft) if (ft eq this) => timeoutRecord.waiter = sk - case other => - timeoutRecord.waiter = Never - api.continuatedProcessorRef ! other - } - } - case Failure(ex) => if (!isCompleted) ft.doThrow(ex) - } - } - case other => api.continuatedProcessorRef ! other - } - } - } - } - - if (timeoutRecord.isDefined) { - // TODO: make CAS - timeoutRecord.lastNOperations = nOperations.get() - val scheduler = api.actorSystem.scheduler - scheduler.scheduleOnce(timeoutRecord.timeout)(tickOperation) - } - - } - - def isLocked: Boolean = lockFlag.get() - - private[this] def tryLock(): Boolean = lockFlag.compareAndSet(false,true) - - private[this] def unlock(debugFrom: String): Boolean = - { - val retval = lockFlag.compareAndSet(true,false) - //if (retval) { - sendWaits() - //} - retval - } - - private[this] def mustUnlock(debugFrom: String, ft: FlowTermination[_]): Boolean = - { - if (!unlock(debugFrom)) { - try { - throw new IllegalStateException("other fiber occypied select 'lock'") - }catch{ - //!!! - case ex: Exception => ft.doThrow(ex) - ex.printStackTrace() - } - false - } else true - } - - - - private[this] def sendWaits(waiters: ConcurrentLinkedQueue[Continuated[A]] = waiters): Unit = - { - // concurrent structure fpr priority queue - var skips = List[Continuated[A]]() - var nSend = 0 - while(!waiters.isEmpty && !lockFlag.get()) { - val c = waiters.poll - if (!(c eq null)) { - nSend = nSend + 1 - c match { - case sk@Skip(_,_) => - skips = c.asInstanceOf[Continuated[A]]::skips - case _ => - processor ! c - } - } - } - if (!lockFlag.get) { - //planIdle - //TODO: plan instead direct send. - for(c <- skips) { - (processor.ask(c)(10 seconds)).foreach(x => - waiters.add(x.asInstanceOf[Continuated[A]]) - ) - } - } - } - - private[this] val log = api.actorSystem.log - - // false when unlocked, true otherwise. - private[this] val lockFlag: AtomicBoolean = new AtomicBoolean(false) - - // number of operations, increased during each lock/unlock. - // used for idle and timeout detection - private[channels] val nOperations = new AtomicLong(); - - private[this] val waiters: ConcurrentLinkedQueue[Continuated[A]] = new ConcurrentLinkedQueue() - - private[this] class TimeoutRecord( - var lastNOperations: Long, - var timeout: FiniteDuration, - var waiter: Continuated[A] - ) { - def isDefined:Boolean = (waiter != Never) - } - - private[this] val timeoutRecord: TimeoutRecord = new TimeoutRecord(0L,0 milliseconds, Never) - - private[this] val processor = api.continuatedProcessorRef - - private[this] implicit val executionContext: ExecutionContext = api.executionContext - - -} - - - diff --git a/src/main/scala/gopher/channels/SelectorArguments.scala b/src/main/scala/gopher/channels/SelectorArguments.scala deleted file mode 100644 index d95abea0..00000000 --- a/src/main/scala/gopher/channels/SelectorArguments.scala +++ /dev/null @@ -1,109 +0,0 @@ -package gopher.channels - -import gopher._ -import scala.concurrent._ - -sealed trait ReadSelectorArgument[A,B] -{ - def normalizedFun: ContRead[A,B] => Option[ContRead.In[A]=>Future[Continuated[B]]] -} - -case class AsyncFullReadSelectorArgument[A,B]( - f: ContRead[A,B] => Option[ContRead.In[A]=>Future[Continuated[B]]] - ) extends ReadSelectorArgument[A,B] -{ - def normalizedFun = f -} - -case class AsyncNoOptionReadSelectorArgument[A,B]( - f: ContRead[A,B] => (ContRead.In[A]=>Future[Continuated[B]]) - ) extends ReadSelectorArgument[A,B] -{ - def normalizedFun = ( cont => Some(f(cont)) ) -} - -case class AsyncNoGenReadSelectorArgument[A,B]( - f: ContRead[A,B] => (A=>Future[Continuated[B]]) - ) extends ReadSelectorArgument[A,B] -{ - def normalizedFun = ( cont => Some(ContRead.liftIn(cont)(f(cont))) ) -} - -case class AsyncPairReadSelectorArgument[A,B]( - f: (A, ContRead[A,B]) => Future[Continuated[B]] - ) extends ReadSelectorArgument[A,B] -{ - def normalizedFun = ( c => Some(ContRead.liftIn(c)(f(_,c))) ) -} - -case class SyncReadSelectorArgument[A,B]( - f: ContRead[A,B] => (ContRead.In[A] => Continuated[B]) - ) extends ReadSelectorArgument[A,B] -{ - def normalizedFun = ( cont => Some( gen => Future successful f(cont)(gen) ) ) -} - -case class SyncPairReadSelectorArgument[A,B]( - f: (A, ContRead[A,B]) => Continuated[B] - ) extends ReadSelectorArgument[A,B] -{ - def normalizedFun = ( c => Some(ContRead.liftIn(c)(a => Future successful f(a,c))) ) -} - -sealed trait WriteSelectorArgument[A,B] -{ - def normalizedFun: ContWrite[A,B] => Option[(A,Future[Continuated[B]])] -} - -case class AsyncFullWriteSelectorArgument[A,B]( - f: ContWrite[A,B] => Option[(A,Future[Continuated[B]])] - ) extends WriteSelectorArgument[A,B] -{ - def normalizedFun = f -} - -case class AsyncNoOptWriteSelectorArgument[A,B]( - f: ContWrite[A,B] => (A,Future[Continuated[B]]) - ) extends WriteSelectorArgument[A,B] -{ - def normalizedFun = (c => Some(f(c))) -} - -case class SyncWriteSelectorArgument[A,B]( - f: ContWrite[A,B] => (A,Continuated[B]) - ) extends WriteSelectorArgument[A,B] -{ - def normalizedFun = {c => - val (a, next) = f(c) - Some((a,Future successful next)) - } - -} - -sealed trait SkipSelectorArgument[A] -{ - def normalizedFun: Skip[A] => Option[Future[Continuated[A]]] -} - -case class AsyncFullSkipSelectorArgument[A]( - f: Skip[A] => Option[Future[Continuated[A]]] - ) extends SkipSelectorArgument[A] -{ - def normalizedFun = f -} - -case class AsyncNoOptSkipSelectorArgument[A]( - f: Skip[A] => Future[Continuated[A]] - ) extends SkipSelectorArgument[A] -{ - def normalizedFun = { c => Some(f(c)) } -} - -case class SyncSelectorArgument[A]( - f: Skip[A] => Continuated[A] - ) extends SkipSelectorArgument[A] -{ - def normalizedFun = { c => Some(Future successful f(c)) } -} - - diff --git a/src/main/scala/gopher/channels/SelectorBuilder.scala b/src/main/scala/gopher/channels/SelectorBuilder.scala deleted file mode 100644 index 10777bd4..00000000 --- a/src/main/scala/gopher/channels/SelectorBuilder.scala +++ /dev/null @@ -1,499 +0,0 @@ -package gopher.channels - -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ -import gopher.goasync._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.annotation.unchecked._ - -trait SelectorBuilder[A] -{ - - type timeout = FiniteDuration - - def api: GopherAPI - - def onRead[E](ch:Input[E])(arg: ReadSelectorArgument[E,A]): this.type = - { - selector.addReader(ch,arg.normalizedFun) - this - } - - def onWrite[E](ch:Output[E])(arg: WriteSelectorArgument[E,A]): this.type = - { - selector.addWriter(ch,arg.normalizedFun) - this - } - - def onIdle(arg: SkipSelectorArgument[A]): this.type = - { - withTimeout(api.idleTimeout)(arg.normalizedFun) - this - } - - def onTimeout(t:FiniteDuration)(arg: SkipSelectorArgument[A]): this.type = - withTimeout(t)(arg.normalizedFun) - - @inline - def withReader[B](ch:Input[B], f: ContRead[B,A] => Option[ContRead.In[B]=>Future[Continuated[A]]]): this.type = - { - selector.addReader(ch,f) - this - } - - @inline - def withWriter[B](ch:Output[B], f: ContWrite[B,A] => Option[(B,Future[Continuated[A]])] ): this.type = - { - selector.addWriter(ch,f) - this - } - - @inline - def withIdle(f: Skip[A] => Option[Future[Continuated[A]]]):this.type = - { - withTimeout(api.idleTimeout)(f) - } - - @inline - def withTimeout(t:FiniteDuration)(f: Skip[A] => Option[Future[Continuated[A]]]):this.type = - { - selector.addTimeout(t,f) - this - } - - final def go: Future[A] = selectorRun - - // for call from SelectorTransforment wich have another 'go' - def selectorRun: Future[A] = selector.run - - implicit def ec: ExecutionContext = api.executionContext - - private[gopher] val selector=new Selector[A](api) - - // used for reading from future - @inline - def futureInput[A](f:Future[A]):FutureInput[A]=api.futureInput(f) - -} - - -class SelectorBuilderImpl(val c: Context) extends ASTUtilImpl -{ - - import c.universe._ - - def foreach[T](f:c.Expr[Any=>T]):c.Expr[T] = - { - val builder = f.tree match { - case Function(forvals,Match(choice,cases)) => - // TOD: check that forvals and choice are same - foreachBuildMatch(cases) - // TODO: think, are we need syntax with common-expr ? - //case Function(forvals,Block(commonExpr,Match(choice,cases))) => - // foreachBuildMatch(forvals,choice,cases, commonExpr) - case Function(a,b) => - c.abort(f.tree.pos, "match expected in gopher select loop, have: ${MacroUtil.shortString(b)} "); - case _ => { - c.abort(f.tree.pos, "match expected in gopher select loop, have: ${MacroUtil.shortString(f.tree)}"); - } - } - c.Expr[T](MacroUtil.cleanUntypecheck(c)(q"gopher.goasync.AsyncWrapper.await(${builder}.go)")) - } - - def foreachBuildMatch(cases:List[c.universe.CaseDef]):c.Tree = - { - import c.universe._ - val bn = TermName(c.freshName) - val calls = transformSelectMatch(bn,cases) - q"""..${q"val ${bn} = ${c.prefix}" :: calls}""" - } - - def transformSelectMatch(bn: c.universe.TermName, cases:List[c.universe.CaseDef]):List[c.Tree] = - { - import c.universe._ - cases map { cs => - cs.pat match { - case Bind(ident, t) => foreachTransformReadWriteTimeoutCaseDef(bn,cs) - case Ident(TermName("_")) => foreachTransformIdleCaseDef(bn,cs) - case _ => c.abort(cs.pat.pos,"expected Bind or Default in pattern, have:"+cs.pat) - } - } - } - - def foreachTransformReadWriteTimeoutCaseDef(builderName:c.TermName, - caseDef: c.universe.CaseDef):c.Tree= - { - - val symbolsToErase = Set(caseDef.pat.symbol, caseDef.pat.symbol.owner) - - // when we split cassDef on few functions, than sometines, symbols - // entries in identifier tree are not cleared. - // So, we 'reset' symbols which belong to caseDef which will be erased by macros - // //TODO: check, may be will be better to use scala-compiler internal API and changeOwner instead. - // yet one alternative - untypedef 'up' term - def clearCaseDefOwner(oldName:c.Name, newName: c.TermName, tree:Tree):Tree = - { - val oldTermName = oldName.toTermName - - def changeName(name: c.TermName):c.TermName = - if (name==oldTermName) newName else name - - def ownerWillBeErased(sym:Symbol):Boolean = - symbolsToErase.contains(sym) - - class ClearTransformer extends Transformer { - - var insideMustBeErased: Boolean = false - - override def transform(tree:Tree): Tree = - { - tree match { - case Typed(ident@Ident(`oldTermName`),_) => if (ident.symbol!=null && ownerWillBeErased(ident.symbol)) - atPos(tree.pos)(Ident(newName)) - else - super.transform(tree) - case ident@Ident(`oldTermName`) => if (ident.symbol!=null && ownerWillBeErased(ident.symbol)) - atPos(tree.pos)(Ident(newName)) - else - super.transform(tree) - case _ => - if (tree.symbol != null && tree.symbol != NoSymbol) { - if (ownerWillBeErased(tree.symbol)) { - var prevMustBeErased = insideMustBeErased - insideMustBeErased = true - try { - val (done, rtree) = doClear(tree) - insideMustBeErased = prevMustBeErased - if (done) { - rtree - } else { - super.transform(tree) - } - }catch{ - case ex: Exception => - System.err.println(s"ex, tree.symbol=${tree.symbol}") - ex.printStackTrace() - throw ex - } - } else super.transform(tree) - } else { - if (false && insideMustBeErased) { - val (done, rtree) = doClear(tree) - if (done) rtree else super.transform(rtree) - } else - super.transform(tree) - } - } - } - - def doClear(tree: c.Tree):(Boolean, c.Tree) = - { - tree match { - case Ident(name:TermName) => - (true, atPos(tree.pos)(Ident(changeName(name)))) - case Bind(name:TermName,body) => - (true, atPos(tree.pos)(Bind(changeName(name),transform(body))) ) - case ValDef(mods,name,tpt,rhs) => - (true, atPos(tree.pos)(ValDef(mods,changeName(name),transform(tpt),transform(rhs)))) - case Select(Ident(name:TermName),proj) => - (true, atPos(tree.pos)(Select(Ident(changeName(name)),proj)) ) - case _ => - // (false, tree) - throw new IllegalStateException("unexpected shapr") - c.abort(tree.pos,"""Unexpected shape for tree with caseDef owner, which erased by macro, - please, fire bug-report to scala-gopher, raw="""+showRaw(tree)) - } - } - - } - val transformer = new ClearTransformer() - transformer.transform(tree) - } - - def retrieveOriginal(tp:Tree):Tree = - tp match { - case tpt: TypeTree => if (tpt.original.isEmpty) tpt else tpt.original - case _ => tp - } - - def unUnapplyPattern(x:Tree):Tree = - x match { - case Bind(name, UnApply(_,List(t@Typed(_,_))) ) => Bind(name,t) - case _ => x - } - - val retval = unUnapplyPattern(caseDef.pat) match { - case Bind(name,Typed(_,tp)) => - val termName = name.toTermName - // when debug problems on later compilation steps, you can create freshName and see visually: - // is oldName steel leaked to later compilation phases. - //val newName = c.freshName(termName) - val newName = termName - val tpoa = clearCaseDefOwner(name, newName, retrieveOriginal(tp)) - val tpo = MacroUtil.skipAnnotation(c)( tpoa ) - val param = ValDef(Modifiers(Flag.PARAM), newName, tpoa ,EmptyTree) - val body = clearCaseDefOwner(name,newName,caseDef.body) - tpo match { - case Select(ch,TypeName("read")) => - if (!caseDef.guard.isEmpty) { - c.abort(caseDef.guard.pos,"guard is not supported in read in select case") - } - val reading = q"${builderName}.reading(${ch}){ ${param} => ${body} }" - atPos(caseDef.pat.pos)(reading) - case Select(ch,TypeName("write")) => - val expression = if (!caseDef.guard.isEmpty) { - parseGuardInSelectorCaseDef(termName,caseDef.guard) - } else { - atPos(caseDef.pat.pos)(Ident(termName)) - } - val writing = q"${builderName}.writing(${ch},${expression})(${param} => ${body} )" - atPos(caseDef.pat.pos)(writing) - case Select(select,TypeName("timeout")) => - val expression = if (!caseDef.guard.isEmpty) { - parseGuardInSelectorCaseDef(termName,caseDef.guard) - } else { - atPos(caseDef.pat.pos)(q"implicitly[akka.util.Timeout].duration") - } - val timeout = q"${builderName}.timeout(${expression})(${param} => ${body} )" - atPos(caseDef.pat.pos)(timeout) - case _ => - if (caseDef.guard.isEmpty) { - c.abort(tp.pos, "row caseDef:"+showRaw(caseDef) ); - c.abort(tp.pos, "match pattern in select without guard must be in form x:channel.write or x:channel.read"); - } else { - parseGuardInSelectorCaseDef(termName, caseDef.guard) match { - case q"scala.async.Async.await[${t}](${readed}.aread):${t1}" => - // here is 'reverse' of out read macros - val channel = readed match { - case q"gopher.`package`.FutureWithRead[${t2}](${future})" => - q"${builderName}.futureInput(${future})" - case _ => - if (readed.tpe <:< typeOf[gopher.channels.Input[_]]) { - readed - } else if (readed.tpe <:< typeOf[gopher.`package`.FutureWithRead[_]]) { - q"${builderName}.futureInput(${readed}.aread)" - } else { - c.abort(readed.pos,"reading in select pattern guide must be channel or future, we have:"+readed.tpe) - } - } - q"${builderName}.reading(${channel})(${param} => ${body} )" - case q"scala.async.Async.await[${t}](${ch}.awrite($expression)):${t1}" => - q"${builderName}.writing(${ch},${expression})(${param} => ${body} )" - case x@_ => - c.abort(tp.pos, "can't parse match guard: "+x); - } - - } - } - case Bind(name,x) => - val rawToShow = x match { - case Typed(_,tp) => - MacroUtil.shortString(c)(tp) - case _ => - MacroUtil.shortString(c)(x) - } - c.abort(caseDef.pat.pos, s"match must be in form x:channel.write or x:channel.read, have: ${rawToShow}"); - case _ => - c.abort(caseDef.pat.pos, "match must be in form x:channel.write or x:channel.read or x:select.timeout"); - } - - retval - - } - - def foreachTransformIdleCaseDef(builderName:c.TermName, caseDef: c.universe.CaseDef):c.Tree= - { - if (!caseDef.guard.isEmpty) { - c.abort(caseDef.guard.pos,"guard is not supported in select case") - } - q"${builderName}.timeout(${builderName}.api.idleTimeout)( _ => ${caseDef.body})" - } - - def mapBuildMatch[T:c.WeakTypeTag](cases:List[c.universe.CaseDef]):c.Tree = - { - val bn = TermName(c.freshName) - val calls = transformSelectMatch(bn,cases) - q"""..${q"val ${bn} = ${c.prefix}.inputBuilder[${weakTypeOf[T]}]()" :: calls}""" - } - - def map[T:c.WeakTypeTag](f:c.Expr[Any=>T]):c.Expr[Input[T]] = - { - val builder = f.tree match { - case Function(forvals,Match(choice,cases)) => - mapBuildMatch[T](cases) - case Function(a,b) => - c.abort(f.tree.pos, "match expected in gopher select map, have: ${MacroUtil.shortString(b)} "); - case _ => - c.abort(f.tree.pos, "match expected in gopher select map, have: ${MacroUtil.shortString(f.tree)}"); - - } - c.Expr[Input[T]](MacroUtil.cleanUntypecheck(c)(q"${builder}.started")) - } - - def builder[T](f:c.Expr[PartialFunction[Any,T]]):c.Tree = - { - f.tree match { - case q"{case ..$cases}" => - foreachBuildMatch(cases) - case _ => c.abort(f.tree.pos,"expected partial function with syntax case ... =>, have ${MacroUtil.shortString(f.tree)}"); - } - } - - def apply[T](f:c.Expr[PartialFunction[Any,T]]):c.Expr[Future[T]] = - { - val b = builder[T](f) - c.Expr[Future[T]](c.untypecheck(q"${b}.go")) - } - - /** - * processor: loop => just add waiters to this selector. - */ - def loop[T](f:c.Expr[PartialFunction[Any,T]]):c.Expr[Unit] = - { - val b = builder[T](f) - c.Expr[Unit](c.untypecheck(q"{selectorInit = ()=>${b}; selectorInit()}")) - } - - def input[T:c.WeakTypeTag](f:c.Expr[PartialFunction[Any,T]]):c.Expr[Input[T]] = - { - val builder = f.tree match { - case q"{case ..$cases}" => - mapBuildMatch[T](cases) - case _ => c.abort(f.tree.pos,"expected partial function with syntax case ... =>, have ${MacroUtil.shortString(f.tree)}"); - } - c.Expr[Input[T]](MacroUtil.cleanUntypecheck(c)(q"${builder}.started")) - } - -} - -object SelectorBuilder -{ - - def readingImpl[A,B:c.WeakTypeTag,S](c:Context)(ch:c.Expr[Input[A]])(f:c.Expr[A=>B]):c.Expr[S] = - { - import c.universe._ - f.tree match { - case Function(valdefs, body) => - buildAsyncCall[B,S](c)(valdefs,body, - { (nvaldefs, nbody) => - q"""${c.prefix}.readingWithFlowTerminationAsync(${ch}, - ${Function(nvaldefs,nbody)} - ) - """ - }) - case _ => c.abort(c.enclosingPosition,"argument of reading.apply must be function") - } - } - - def writingImpl[A,T:c.WeakTypeTag,S](c:Context)(ch:c.Expr[Output[A]],x:c.Expr[A])(f:c.Expr[A=>T]):c.Expr[S] = - { - import c.universe._ - f.tree match { - case Function(valdefs, body) => - val retval = buildAsyncCall[T,S](c)(valdefs,body, - { (nvaldefs, nbody) => - q"""${c.prefix}.writingWithFlowTerminationAsync(${ch},${x}, - ${Function(nvaldefs,nbody)} - ) - """ - }) - retval - case _ => c.abort(c.enclosingPosition,"second argument of writing must have shape Function(x,y)") - } - } - - def transformDelayedMacroses[T:c.WeakTypeTag](c:Context)(block:c.Tree):c.Tree = - { - import c.universe._ - val transformer = new Transformer { - override def transform(tree:Tree): Tree = - tree match { - case Apply(TypeApply(Select(obj,TermName("implicitly")),List(objType)), args) => - // unresolve implicit references of specific type - if (!(obj.tpe eq null) && obj.tpe =:= typeOf[Predef.type] && - objType.tpe <:< typeOf[FlowTermination[Nothing]] - ) { - TypeApply(Select(obj,TermName("implicitly")),List(objType)) - } else { - super.transform(tree) - } - case Apply(TypeApply(Select(obj,member),objType), args) => - if (!(obj.tpe eq null) && obj.tpe =:= typeOf[CurrentFlowTermination.type] ) { - member match { - case TermName("exit") => - Apply(TypeApply(Select(obj,TermName("exitDelayed")),objType), args) - case _ => super.transform(tree) - } - } else { - super.transform(tree) - } - case Apply(Select(obj,member), args) => - if (!(obj.tpe eq null) && obj.tpe =:= typeOf[CurrentFlowTermination.type] ) { - member match { - case TermName("exit") => - Apply(Select(obj,TermName("exitDelayed")),args) - case _ => super.transform(tree) - } - } else { - super.transform(tree) - } - case _ => - super.transform(tree) - } - } - transformer.transform(block) - } - - def buildAsyncCall[T:c.WeakTypeTag,S](c:Context)(valdefs: List[c.universe.ValDef], body: c.Tree, - lastFun: (List[c.universe.ValDef], c.Tree) => c.Tree): c.Expr[S] = - { - import c.universe._ - val Seq(ft, ft1, ec, ec1) = Seq("ft","ft","ec","ec1") map (x => TermName(c.freshName(x))) - val ftParam = ValDef(Modifiers(Flag.PARAM),ft,tq"gopher.FlowTermination[${weakTypeOf[T]}]",EmptyTree) - val ecParam = ValDef(Modifiers(Flag.PARAM),ec,tq"scala.concurrent.ExecutionContext",EmptyTree) - val nvaldefs = ecParam::ftParam::valdefs - val asyncBody = GoAsync.transformAsyncBody[T](c)(body) - val nbody = q"""{ - implicit val ${ft1} = ${ft} - implicit val ${ec1} = ${ec} - gopher.goasync.AsyncWrapper.async(${transformDelayedMacroses[T](c)(asyncBody)})(${ec}) - } - """ - val newTree = lastFun(nvaldefs,nbody) - // untypecheck is necessory: otherwise exception in async internals - c.Expr[S](MacroUtil.cleanUntypecheck(c)(newTree)) - } - - def idleImpl[T:c.WeakTypeTag,S](c:Context)(body:c.Expr[T]):c.Expr[S] = - { - import c.universe._ - c.Expr[S](q"${c.prefix}.timeout(${c.prefix}.api.idleTimeout)(_ => ${body})") - } - - def timeoutImpl[T:c.WeakTypeTag,S](c:Context)(t:c.Expr[FiniteDuration])(f:c.Expr[FiniteDuration=>T]):c.Expr[S] = - { - import c.universe._ - f.tree match { - case Function(valdefs, body) => - val r = SelectorBuilder.buildAsyncCall[T,S](c)(valdefs,body, - { (nvaldefs, nbody) => - q"""${c.prefix}.timeoutWithFlowTerminationAsync(${t}, - ${Function(nvaldefs,nbody)} - ) - """ - }) - r - case _ => c.abort(c.enclosingPosition,"second argument of timeout must have shape Function(x,y)") - } - } - - - -} - - - diff --git a/src/main/scala/gopher/channels/SelectorFactory.scala b/src/main/scala/gopher/channels/SelectorFactory.scala deleted file mode 100644 index bbed25da..00000000 --- a/src/main/scala/gopher/channels/SelectorFactory.scala +++ /dev/null @@ -1,82 +0,0 @@ -package gopher.channels - -import scala.language.experimental.macros -import scala.reflect.macros.whitebox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.annotation.unchecked._ - - -/** - * Factory for select instantiation. - * Can be obtained via gopherAPI - * - * {{{ - * val selector = gopherApi.select.forever - * for(s <- selector) ... - * }}} - */ -class SelectFactory(val api: GopherAPI) -{ - - selectFactory => - - type timeout = FiniteDuration - - trait SelectFactoryApi - { - def api = selectFactory.api - } - - /** - * forever builder. - *@see ForeverSelectorBuilder - */ - def forever: ForeverSelectorBuilder = new ForeverSelectorBuilder with SelectFactoryApi {} - - /** - * once builder, where case clause return type is `T` - */ - def once[T]: OnceSelectorBuilder[T] = new OnceSelectorBuilder[T] with SelectFactoryApi {} - - def inputBuilder[T]() = new InputSelectorBuilder[T](api) - - /** - * generic selector builder - */ - def loop[A]: SelectorBuilder[A] = new SelectorBuilder[A] with SelectFactoryApi {} - - /** - * afold - asynchronious version of fold. - * {{{ - * select.afold((0,1)) { (s,(x,y)) => - * s match { - * case x: out.write => (y,x+y) - * case q: quit.read => select.exit((x,y)) - * } - * } - * }}} - */ - def afold[S](s:S)(op:(S,Any)=>S):Future[S] = macro FoldSelectorBuilderImpl.afold[S] - - def fold[S](s:S)(op:(S,Any)=>S):S = macro FoldSelectorBuilderImpl.fold[S] - - def map[B](f:Any=>B):Input[B] = macro SelectorBuilderImpl.map[B] - - def input[B](f:PartialFunction[Any,B]):Input[B] = - macro SelectorBuilderImpl.input[B] - - def amap[B](f:PartialFunction[Any,B]):Input[B] = - macro SelectorBuilderImpl.input[B] - - // - - def exit[A](a:A):A = macro CurrentFlowTermination.exitImpl[A] - - def shutdown():Unit = macro CurrentFlowTermination.shutdownImpl - -} - diff --git a/src/main/scala/gopher/channels/UnbufferedChannelActor.scala b/src/main/scala/gopher/channels/UnbufferedChannelActor.scala deleted file mode 100644 index 2d1ca542..00000000 --- a/src/main/scala/gopher/channels/UnbufferedChannelActor.scala +++ /dev/null @@ -1,134 +0,0 @@ -package gopher.channels - -import akka.actor._ - -import scala.language._ -import scala.concurrent._ -import scala.collection.immutable._ -import gopher._ - -import scala.util.control.NonFatal - - -/** - * Actor backend for channel - */ -class UnbufferedChannelActor[A](id:Long, unused:Int, api: GopherAPI) extends ChannelActor[A](id,api) -{ - - protected[this] def onContWrite(cw:ContWrite[A,_]):Unit = - { - if (closed) { - cw.flowTermination.throwIfNotCompleted(new ChannelClosedException()) - } else if (!processReaders(cw)) { - writers = writers :+ cw - } - } - - - protected[this] def onContRead(cr:ContRead[A,_]):Unit = - { - if (closed) { - processReaderClosed(cr) - } else if (!processWriters(cr)) { - readers = readers :+ cr; - } - } - - protected[this] def getNElements():Int = 0 - - - def processReaders(w: ContWrite[A,_]) : Boolean = - { - var done = false - while(!(done || readers.isEmpty)) { - val current = readers.head - readers = readers.tail - done = processReader(current,w) - } - done - } - - private[this] def processReader[B,C](reader:ContRead[A,B],writer:ContWrite[A,C]): Boolean = - reader.function(reader) match { - case Some(f1) => - writer.function(writer) match { - case Some((a,wcont)) => - Future{ - val cont = f1(ContRead.In value a) - api.continue(cont, reader.flowTermination) - }(api.executionContext) - api.continue(wcont, writer.flowTermination) - true - case None => - val cont = f1(ContRead.Skip) - api.continue(cont, reader.flowTermination) - false - } - case None => - false - } - - def processWriters[C](reader:ContRead[A,C]): Boolean = - { - if (writers.isEmpty) { - false - } else { - val r = try { - reader.function(reader) - } catch { - case NonFatal(ex) => ex.printStackTrace() - throw ex - } - r match { - case Some(f1) => - var done = false - while(!writers.isEmpty && !done) { - val current = writers.head - writers = writers.tail - done = processWriter(current,f1,reader) - } - if (!done) { - f1(ContRead.Skip) - } - done - case None => - false - } - } - } - - private[this] def processWriter[B,C](writer:ContWrite[A,B], - f1:ContRead.In[A]=>Future[Continuated[C]], - reader:ContRead[A,C]): Boolean = - writer.function(writer) match { - case Some((a,wcont)) => - Future { - val rcont = f1(ContRead.In value a) - api.continue(rcont,reader.flowTermination) - } - api.continue(wcont,writer.flowTermination) - true - case None => - false - } - - - def stopIfEmpty: Boolean = - { - require(closed==true) - stopReaders() - stopWriters() - if (nRefs == 0) { - // here we leave 'closed' channels in actor-system untile they will be - // garbage-collected. TODO: think about actual stop ? - self ! GracefullChannelStop - } - true - } - - - private[this] implicit def ec: ExecutionContext = api.executionContext - - -} diff --git a/src/main/scala/gopher/channels/ZippedInput.scala b/src/main/scala/gopher/channels/ZippedInput.scala deleted file mode 100644 index a0982f04..00000000 --- a/src/main/scala/gopher/channels/ZippedInput.scala +++ /dev/null @@ -1,116 +0,0 @@ -package gopher.channels - -import scala.concurrent._ -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import scala.util._ -import java.util.concurrent.ConcurrentLinkedQueue -import gopher._ - - -class ZippedInput[A,B](override val api: GopherAPI, inputA: Input[A], inputB: Input[B]) extends Input[(A,B)] -{ - - import ZippedInput._ - - val pairs = new ConcurrentLinkedQueue[(A,B)]() - val readers = new ConcurrentLinkedQueue[ContRead[(A,B),_]] - - - def cbread[C](f: (ContRead[(A,B),C] => Option[ContRead.In[(A,B)] => Future[Continuated[C]]]), flwt: FlowTermination[C] ): Unit = - { - if (!pairs.isEmpty) { - implicit val ec = api.executionContext - f(ContRead(f,this,flwt)) match { - case Some(f1) => - val ready = pairs.poll(); - val in = if (! (ready eq null) ) { - ContRead.Value(ready) - } else { - // unfortunelly, somebody has been eat our pair between !empty and poll() - ContRead.Skip - } - api.continue(f1(in), flwt) - case None => /* do nothing */ - } - } else { - readers.add(ContRead(f,this,flwt)) - val s = new State[A,B] - inputA.cbread[C](cont => Some(ContRead.liftIn(cont)(a => { - val toFire = s.synchronized{ - s.oa=Some(a) - s.ob.isDefined - } - fireAttempt(toFire, s) - } )) - , flwt) - inputB.cbread[C](cont => - Some(ContRead.liftIn(cont)(b => { - val toFire = s.synchronized{ - s.ob = Some(b) - s.oa.isDefined - } - fireAttempt(toFire,s) - } )) - , flwt) - } - - - def fireAttempt(toFire: Boolean, s:State[A,B]):Future[Continuated[C]] = - { - if (toFire) { - s match { - case State(Some(a),Some(b)) => - val pair = (a,b) - val cont = readers.poll().asInstanceOf[ContRead[(A,B),({type R})#R]] - // existencial type not allow cont.function(cont) - if (cont eq null) { - pairs.add(pair) - } else { - implicit val ec = api.executionContext - cont.function(cont) match { - case Some(f1) => - api.continue(f1(ContRead.Value(pair)),cont.flowTermination) - case None => - pairs.add(pair) - } - } - case _ => throw new IllegalStateException("Impossible: fully-filled state is a precondition"); - } - } - // always return never, since real continuated we passed after f1 from readers queue was executed. - // note, that we can't return it direct here, becouse type of readers head continuation can be - // other than C, as in next scenario: - // 1. Reader R1 call cbread and start to collect (a1,b1) (readers <- R1) - // 2. Reader R2 call cbread and start to collect (a2,b2) (readers <- R2) - // 3. (a1,b1) collected, but R1 is locked. (pairs <- (a1,a2), readers -> drop R1) - // in such case fireAttempt for R1 will process R2 (wich can have different C in FlowTermination[C]) - Future successful Never - } - - - } - - -} - -object ZippedInput -{ - - // can't be case class: compiler error when annotating variables. - // see https://issues.scala-lang.org/browse/SI-8873 - class State[A,B] - { - @volatile var oa:Option[A] = None - @volatile var ob:Option[B] = None - } - - object State - { - def unapply[A,B](s:State[A,B]) = Some((s.oa,s.ob)) - } - -} - - diff --git a/src/main/scala/gopher/channels/package.scala b/src/main/scala/gopher/channels/package.scala deleted file mode 100644 index 6a11c4ba..00000000 --- a/src/main/scala/gopher/channels/package.scala +++ /dev/null @@ -1,27 +0,0 @@ - -package gopher - -/** - * - * == Overview == - * - * - * - * - * == Internals == - * - * Core entity is [[Continuated]] which provide iteratee-like structure for reading and writing. - * Instance of [[Continuated]] represent one step of computations and leave in queue inside [[ChannelProcessor]] or [[ChannelActor]] - * [[Selector]] transform [[Continuated]] to executed exclusive with each other within one selector. Also we have [[IdleDetector]] which - * determinate idle selectors and activer appropriative actions. - * - */ -package object channels { - - -} - diff --git a/src/main/scala/gopher/goasync/AsyncApply.scala b/src/main/scala/gopher/goasync/AsyncApply.scala deleted file mode 100644 index 1ebbbd48..00000000 --- a/src/main/scala/gopher/goasync/AsyncApply.scala +++ /dev/null @@ -1,144 +0,0 @@ -package gopher.goasync - -import scala.language.experimental.macros -import scala.language.reflectiveCalls -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ -import scala.concurrent._ -import scala.annotation.unchecked._ - - - -object AsyncApply -{ - - - def impl1[A:c.WeakTypeTag,B:c.WeakTypeTag,C:c.WeakTypeTag](c:Context)(hof:c.Expr[Function[Function[A,B],C]])(nf:c.Expr[Function[A,Future[B]]]): - c.Expr[Future[C]] = - { - import c.universe._ - val nhof = transformHof[A,B](c)(hof.tree,List()) - val retval = c.Expr[Future[C]](q"${nhof}(${nf})") - retval - } - - def apply1i[A,B,C](hof:Function[Function[A,B],C])(nf:Function[A,Future[B]],implicitParams:Any*):Future[C] = macro AsyncApply.impl1i[A,B,C] - - - def impl1i[A:c.WeakTypeTag,B:c.WeakTypeTag,C:c.WeakTypeTag](c:Context)(hof:c.Expr[Function[Function[A,B],C]])(nf:c.Expr[Function[A,Future[B]]],implicitParams:c.Expr[Any]*): c.Expr[Future[C]] = - { - import c.universe._ - val nhof = transformHof[A,B](c)(hof.tree,implicitParams.map(_.tree)) - val retval = (q"${nhof}(${nf})") - c.Expr[Future[C]](retval) - } - - def transformHof[A:c.WeakTypeTag,B:c.WeakTypeTag](c:Context)(hof:c.Tree,imps:Seq[c.Tree]):c.Tree = { - import c.universe.{Function=>_,_} - hof match { - case q"${p}.$h" => - val ah = genAsyncName(c)(h,hof.pos) - q"${p}.${ah}" - case q"${p}.$h[..$w]" => - val ah = genAsyncName(c)(h,hof.pos) - q"${p}.${ah}[..$w]" - case q"($fp)=>$res($fp1)" if (fp.symbol == fp1.symbol) => - val nested = transformHof[A,B](c)(res,imps) - val (paramName, paramDef) = createAsyncParam[A,B](c)(fp) - val mfp2 = appendImplicitExecutionContext(c)(imps) - val transformed = q"($paramDef)=>$nested($paramName)(..$mfp2)" - c.typecheck(transformed) - case q"($fp)=>$res($fp1)(..$fp2)" if (fp.symbol == fp1.symbol) => - // ..fp2 is a list of implicit params. - val nested = transformHof[A,B](c)(res,imps) - val (paramName, paramDef) = createAsyncParam[A,B](c)(fp) - val mfp2 = appendImplicitExecutionContext(c)(fp2) - val r = q"($paramDef)=>$nested($paramName)(..$mfp2)" - r - case q"($fp)=>$res[$w1,$w2]($fp1)($fp2)" => - c.abort(hof.pos,"A1"+hof) - case q"($fp:$ft)=>$a" => - c.abort(hof.pos,"a="+a) - case q"{ ..$stats }" => - try { - val nstats = transformLast(c){ - t => transformHof[A,B](c)(t,imps) - }(stats) - q"{ ..$nstats }" - } catch { - case ex: Throwable => - System.err.println(s"error during transforming ${stats}") - ex.printStackTrace() - throw ex - } - case _ => c.abort(hof.pos,"hof match failed:"+hof+"\n raw:"+showRaw(hof)) - } - } - - def createAsyncParam[A:c.WeakTypeTag,B:c.WeakTypeTag](c:Context)(fp:c.Tree):(c.TermName,c.Tree) = - { - import c.universe._ - // TODO: check that fp is ident and get fp as name. - val nname = TermName(c.freshName()) - val paramType = tq"Function[${c.weakTypeOf[A]},Future[${c.weakTypeOf[B]}]]" - (nname,ValDef(Modifiers(Flag.PARAM),nname,paramType,EmptyTree)) - } - - def inferImplicitExecutionContext(c:Context)():c.Tree = - { - val ect = c.weakTypeOf[scala.concurrent.ExecutionContext] - c.inferImplicitValue(ect, silent=false) - } - - - def appendImplicitExecutionContext(c:Context)(paramList:Seq[c.Tree]):Seq[c.Tree] = - { - val t = inferImplicitExecutionContext(c)() - paramList.find(_.symbol == t.symbol) match { - case None => paramList :+ t - case Some(v) => paramList - } - } - -/* - def appendImplicitExecutionContextParam(c:Context)(paramList:List[c.Tree]):List[c.Tree]= - { - // check that paramList not contains ec. - // (note, input must be typed - paramList.find{ x => - x match { - case ValDef(m,pn,pt,pv) => - m.hasFlag(Flag.IMPLICIT) && pt =:= c.weakTypeOf[scala.concurrent.ExecutionContext] - case _ => false - } - } match { - case None => - val pName = TermName(c.freshName("ec")) - val pType = c.weakTypeOf[scala.concurrent.ExecutionContext] - ValDef(Modifiers(Flag.PARAM|Flag.IMPLICIT),pName,paramType,EmptyTree) - - } - } -*/ - - def genAsyncName(c:Context)(h:c.TermName,pos:c.Position):c.TermName = - { - import c.universe._ - h match { - case TermName(hname) => - TermName(hname+"Async") - case _ => - c.abort(pos,"ident expected for hight order function") - } - } - - def transformLast(c:Context)(f:c.Tree=>c.Tree)(block: List[c.Tree]):List[c.Tree] = - block match { - case Nil => Nil - case r::Nil => f(r)::Nil - case h::q => h::transformLast(c)(f)(q) - } - -} diff --git a/src/main/scala/gopher/goasync/AsyncIterable.scala b/src/main/scala/gopher/goasync/AsyncIterable.scala deleted file mode 100644 index fb7f5b05..00000000 --- a/src/main/scala/gopher/goasync/AsyncIterable.scala +++ /dev/null @@ -1,34 +0,0 @@ -package gopher.goasync - -import scala.concurrent._ -import scala.async.Async._ -import scala.collection.generic._ - - -class AsyncIterable[T](val x:Iterable[T]) //extends AnyVal [implementation restriction, [scala-2.11.8] -{ - - - def foreachAsync[U](f: T => Future[U])(implicit ec:ExecutionContext): Future[Unit] = - async{ - val it = x.iterator - while(it.hasNext) { - await(f(it.next)) - } - } - - - def mapAsync[U,Z](f: T => Future[U])(implicit bf: CanBuildFrom[_,U,Z], ec:ExecutionContext): Future[Z] = - async { - val builder = bf.apply() - val it = x.iterator - while(it.hasNext) { - val v = it.next - builder += await(f(v)) - } - builder.result() - } - -} - - diff --git a/src/main/scala/gopher/goasync/AsyncOption.scala b/src/main/scala/gopher/goasync/AsyncOption.scala deleted file mode 100644 index cffa207e..00000000 --- a/src/main/scala/gopher/goasync/AsyncOption.scala +++ /dev/null @@ -1,63 +0,0 @@ -package gopher.goasync - -import scala.concurrent._ -import scala.async.Async._ -import scala.collection.generic._ - - -class AsyncOption[T](val x:Option[T]) extends AnyVal -{ - - - def foreachAsync[U](f: T => Future[U])(implicit ec:ExecutionContext): Future[Unit] = - { - if (x.isDefined) { - f(x.get) map (_ => ()) - } else { - Future successful (()) - } - } - - - def mapAsync[U](f: T => Future[U])(implicit ec:ExecutionContext): Future[Option[U]] = - { - if (x.isDefined) { - f(x.get) map (x => Some(x)) - } else { - Future successful None - } - } - - def flatMapAsync[U](f: T => Future[Option[U]])(implicit ec:ExecutionContext): Future[Option[U]] = - { - if (x.isDefined) { - f(x.get) - } else { - Future successful None - } - } - - def filterAsync(f: T=>Future[Boolean])(implicit ec:ExecutionContext): Future[Option[T]] = - { - if (x.isDefined) { - f(x.get) map { r => - if (r) x else None - } - } else { - Future successful None - } - } - - def filterNotAsync(f: T=>Future[Boolean])(implicit ec:ExecutionContext): Future[Option[T]] = - { - if (x.isDefined) { - f(x.get) map { r => if (r) None else x } - } else { - Future successful None - } - } - - -} - - diff --git a/src/main/scala/gopher/goasync/AsyncWrapper.scala b/src/main/scala/gopher/goasync/AsyncWrapper.scala deleted file mode 100644 index ce9c2c11..00000000 --- a/src/main/scala/gopher/goasync/AsyncWrapper.scala +++ /dev/null @@ -1,35 +0,0 @@ -package gopher.goasync - -import scala.language.experimental.macros -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import scala.concurrent._ - -object AsyncWrapper -{ - - def async[T](x:T)(implicit ec:ExecutionContext):Future[T] = macro asyncImpl[T] - - def await[T](x:Future[T]):T = macro awaitImpl[T] - - def postWrap[T](x:T):T = macro postWrapImpl[T] - - def asyncImpl[T](c:Context)(x:c.Expr[T])(ec:c.Expr[ExecutionContext]):c.Expr[Future[T]] = - { - import c.universe._ - c.Expr[Future[T]](q"gopher.goasync.AsyncWrapper.postWrap(scala.async.Async.async(${x})(${ec}))") - } - - def awaitImpl[T](c:Context)(x:c.Expr[Future[T]]):c.Expr[T] = - { - import c.universe._ - c.Expr[T](q"gopher.goasync.AsyncWrapper.postWrap(scala.async.Async.await(${x}))") - } - - def postWrapImpl[T](c:Context)(x:c.Expr[T]):c.Expr[T]= - { - import c.universe._ - x - } - -} diff --git a/src/main/scala/gopher/goasync/GoAsync.scala b/src/main/scala/gopher/goasync/GoAsync.scala deleted file mode 100644 index 4ab1fb4c..00000000 --- a/src/main/scala/gopher/goasync/GoAsync.scala +++ /dev/null @@ -1,285 +0,0 @@ -package gopher.goasync - -import scala.language.experimental.macros -import scala.language.reflectiveCalls -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import gopher._ -import gopher.util._ -import scala.concurrent._ -import scala.annotation.unchecked._ - - -/** - * async arround go. - * - * Basicly go is - * 1. translate await-like exressions inside inline functions to calls of appropriative async functions. - * (or show error if not found). - *``` - * x.foreach{ x => p; await(x); .. } - *``` - * become - *``` - * await( transform-defer( x.foreachAsync{ x => async(p; await(x); ..) }) ) - *``` - * (note, that channel.read macroses are expanded to await-s on this point) - * - * 2. transform defer calls if defer statement is found inside go: - *``` - * asnyc{ p .. defer(x) .. } - *``` - * become (reallity is a little complext, here is just idea) - *``` - * { val d = new Defers(); async{ p .. d.defer(x) .. }.onComplete(d.tryProcess) } - *``` - */ -object GoAsync -{ - - //TODO: add handling of try/catch and operations inside collections. - - def goImpl[T:c.WeakTypeTag](c:Context)(body:c.Expr[T])(ec:c.Expr[ExecutionContext]):c.Expr[Future[T]] = - { - import c.universe._ - val ttype = c.weakTypeOf[T] - val nbody = GoAsync.transformAsyncBody[T](c)(body.tree) - val asyncNBody = applyAsync[T](c)(nbody)(ec) - val r = if (containsDefer(c)(body)) { - val defers = TermName(c.freshName) - val promise = TermName(c.freshName) - // asyn transform wantstyped tree on entry, so we must substitute 'defers' to untyped - // values after it, no before. - q""" - gopher.goasync.GoAsync.transformDeferMacro[${ttype}]( - {implicit val ${defers} = new Defers[${c.weakTypeOf[T]}]() - val ${promise} = Promise[${c.weakTypeOf[T]}]() - ${asyncNBody}.onComplete( x => - ${promise}.complete(${defers}.tryProcess(x)) - )(${ec}) - ${promise}.future - } - ) - """ - } else { - q"${asyncNBody}" - } - c.Expr[Future[T]](r) - } - - def goScopeImpl[T:c.WeakTypeTag](c:Context)(body:c.Expr[T]):c.Expr[T] = - { - import c.universe._ - if (containsDefer(c)(body)) { - val nbody = transformDefer[T](c)(body.tree) - c.Expr[T](q"""{implicit val defered = new gopher.Defers[${c.weakTypeOf[T]}]() - defered.processResult(gopher.Defers.controlTry(${c.untypecheck(nbody)})) - }""") - } else { - body - } - } - - def containsDefer[T:c.WeakTypeTag](c:Context)(body:c.Expr[T]):Boolean = - { - import c.universe._ - val findDefer = new Traverser { - var found = false - override def traverse(tree:Tree):Unit = - { - if (!found) { - tree match { - case q"gopher.`package`.defer(..${args})" => found = true - case _ => super.traverse(tree) - } - } - } - } - findDefer traverse body.tree - findDefer.found - } - - def transformDeferMacro[T](body:Future[T]):Future[T] = macro transformDeferMacroImpl[T] - - def transformDeferMacroImpl[T:c.WeakTypeTag](c:Context)(body:c.Expr[Future[T]]):c.Expr[Future[T]] = - { - c.Expr[Future[T]](c.untypecheck(transformDefer[T](c)(body.tree))) - } - - def transformDefer[T:c.WeakTypeTag](c:Context)(body:c.Tree):c.Tree = - { - import c.universe._ - val transformer = new Transformer { - override def transform(tree:Tree):Tree = - tree match { - case q"gopher.`package`.defer(..${args})" => - q"implicitly[gopher.Defers[${weakTypeOf[T]}]].defer(..${args map (transform(_))} )" - case q"$gopher.`package`.recover[$tps](..${args})" => - q"implicitly[gopher.Defers[${weakTypeOf[T]}]].recover(..${args map (transform(_))} )" - case _ => - super.transform(tree) - } - } - transformer.transform(body) - } - - def transformAsyncBody[T:c.WeakTypeTag](c:Context)(body:c.Tree):c.Tree = - { - import c.universe._ - var found = false - var transformed = false - val transformer = new Transformer { - override def transform(tree:Tree):Tree = - { - - // if subtree was transformed, try to transform again origin tree, - // because await can be lifted-up from previous level. - def transformAgainIfNested(tree: Tree):Tree = - { - val prevTransformed = transformed - transformed = false - val nested = super.transform(tree) - if (transformed) { - transform(nested) - }else{ - transformed = prevTransformed - nested - } - } - - tree match { - case q"${f1}(${a}=>${b})(..$a2)" => - // TODO: cache in tree. - found = findAwait(c)(b) - if (found) { - // this can be implicit parameters of inline apply. - // whe can distinguish first from second by looking at f1 shape. - val isTwoParams = if (!(tree.symbol eq null)) { - if (tree.symbol.isMethod) { - tree.symbol.asMethod.paramLists.size == 2 - } else if (tree.symbol eq NoSymbol) { - // untyped, hope for the best - true - } else false - } else true - if (isTwoParams) { - transformed = true - transformInlineHofCall1(c)(f1,a,b,a2) - } else { - super.transform(tree) - } - }else{ - // TODO: think, may-be try to transform first b instead nested [?] - transformAgainIfNested(tree) - } - case q"${f1}(${a}=>${b})" => - found = findAwait(c)(b) - if (found) { - transformed = true - transformInlineHofCall1(c)(f1,a,b,List()) - } else { - // TODO: think, may-be try to transform first b instead nested [?] - transformAgainIfNested(tree) - } - case _ => - super.transform(tree) - } - } - - } - - var r = transformer.transform(body) - - r - } - - // handle things like: - // q"${fun}(${param}=>${body})($implicitParams)" => - def transformInlineHofCall1(c:Context)(fun:c.Tree,param:c.Tree,body:c.Tree,implicitParams:List[c.Tree]):c.Tree = - { - import c.universe._ - val btype = body.tpe - val nb = body - val anb = atPos(body.pos){ - val nnb = transformAsyncBody(c)(nb) - val ec = c.inferImplicitValue(c.weakTypeOf[ExecutionContext]) - // untypechack is necessory, because async-transform corrupt - // symbols owners inside body and we change scope of param - // [scala-2.11.8] - MacroUtil.cleanUntypecheck(c)( - q"(${param})=>scala.async.Async.async[$btype](${nnb})($ec)" - ) - } - val ar = atPos(fun.pos) { - val uar = if (implicitParams.isEmpty) { - q"gopher.asyncApply1(${fun})(${anb})" - } else { - q"gopher.goasync.AsyncApply.apply1i(${fun})(${anb},${implicitParams})" - } - if (true) { - // typecheck is necessory - // 1. to prevent runnint analysis of async over internal awaits in anb as on - // enclosing async instead those applied from asyncApply - // 2. to expand macroses here, to prevent error during expanding macroses - // in next typecheck - c.typecheck(uar) - } else { - uar - } - } - //typecheck with macros disabled is needed for compiler, - //to set symbol 'await', because async macro discovered - //awaits by looking at symbols - val r = c.typecheck(q"scala.async.Async.await(${ar})",withMacrosDisabled=true) - r - } - - def findAwait(c:Context)(body:c.Tree): Boolean = - { - import c.universe._ - var found: Boolean = false - val transformer = new Transformer { - - override def transform(tree:Tree):Tree = - { - if (found) - tree - else { - tree match { - case q"(scala.async.Async.await[${w}]($r)):${w1}"=> - found = true - tree - case q"scala.async.Async.await[${w}]($r)"=> - found = true - tree - case q"(scala.async.Async.async[${w}]($r)):${w1}"=> - //TODO: add test to test-case - tree - case q"(${a}=>${b})" => - // don't touch nested functions - tree - //super.transform(tree) - case _ => - super.transform(tree) - } - } - } - - } - transformer.transform(body) - found - } - - - def applyAsync[T:c.WeakTypeTag](c:Context)(nbody:c.Tree)(ec:c.Expr[ExecutionContext]):c.Tree = - { - import c.universe._ - nbody match { - case q"scala.async.Async.await[$t](..${x})" => q"${x.head}" - case _ => q"scala.async.Async.async($nbody)($ec)" - } - - } - -} - diff --git a/src/main/scala/gopher/package.scala b/src/main/scala/gopher/package.scala deleted file mode 100644 index f29b2df5..00000000 --- a/src/main/scala/gopher/package.scala +++ /dev/null @@ -1,196 +0,0 @@ - -import scala.language.experimental.macros -import scala.language.implicitConversions - -import scala.concurrent._ -import gopher.channels._ -import gopher.goasync._ - -/** - * Provides scala API for 'go-like' CSP channels. - * - * - * == Overview == - * - * see readme for quick introduction. - * - * == Usage == - * - * At first you must receive gopherApi as Akka extension: - *{{{ - * import gopher._ - * - * ..... - * val gopherApi = Gopher(actorSystem) - *}}} - * - * Then you can use CPS channels with blocling operations inside go clauses: - *{{{ - * val channel = gopherApi.makeChannel[Long] - * val n = 10000 - * val producer = go { - * @volatile var(x,y) = (0L,1L) - * for( s <- gopherApi.select.forever) { - * case z: channel.write if (z==x) => - * x = y - * y = x+z - * if (x > n) { - * channel.close - * implicitly[FlowTermination[Unit]].doExit() - * } - * } - * } - * val consumer = for((c,i) <- channel.zip(1 to n)) { - * Console.println(s"fib(\${i})=\${c}") - * } - * Await.ready(consumer, 10 seconds) - *}}} - * - * and defer/recover in go/goScope - * - *{{{ - * goScope{ - * val f = openFile(myFileName) - * defer{ - * if (! recover{case ex:FileNotFoundException => Console.println("invalid fname")}) { - * f.close() - * } - * } - * } - *}}} - * - *@see [[GopherAPI]] - *@see [[channels.Channel]] - *@see [[channels.Input]] - *@see [[channels.Output]] - *@see [[channels.SelectorBuilder]] - *@see [[channels.SelectFactory]] - *@author Ruslan Shevchenko - */ -package object gopher { - - - - // - // magnetic arguments for selector-builder unsugared API - // - - implicit def toAsyncFullReadSelectorArgument[A,B]( - f: ContRead[A,B] => Option[ContRead.In[A] => Future[Continuated[B]]] - ): ReadSelectorArgument[A,B] = AsyncFullReadSelectorArgument(f) - - implicit def toAsyncNoOptionReadSelectorArgument[A,B]( - f: ContRead[A,B] => (ContRead.In[A]=> Future[Continuated[B]]) - ): ReadSelectorArgument[A,B] = AsyncNoOptionReadSelectorArgument(f) - - implicit def toAsyncNoGenReadSelectorArgument[A,B]( - f: ContRead[A,B] => (A => Future[Continuated[B]]) - ): ReadSelectorArgument[A,B] = AsyncNoGenReadSelectorArgument(f) - - implicit def toAsyncPairReadSelectorArgument[A,B]( - f: (A, ContRead[A,B]) => Future[Continuated[B]] - ): ReadSelectorArgument[A,B] = AsyncPairReadSelectorArgument(f) - - implicit def toSyncReadSelectorArgument[A,B]( - f: ContRead[A,B] => (ContRead.In[A] => Continuated[B]) - ):ReadSelectorArgument[A,B] = SyncReadSelectorArgument(f) - - implicit def toSyncPairReadSelectorArgument[A,B]( - f: (A, ContRead[A,B]) => Continuated[B] - ):ReadSelectorArgument[A,B] = SyncPairReadSelectorArgument(f) - - - - implicit def toAsyncFullWriteSelectorArgument[A,B]( - f: ContWrite[A,B] => Option[(A,Future[Continuated[B]])] - ):WriteSelectorArgument[A,B] = AsyncFullWriteSelectorArgument(f) - - implicit def toAsyncNoOptWriteSelectorArgument[A,B]( - f: ContWrite[A,B] => (A,Future[Continuated[B]]) - ):WriteSelectorArgument[A,B] = AsyncNoOptWriteSelectorArgument(f) - - implicit def toSyncWriteSelectorArgument[A,B]( - f: ContWrite[A,B] => (A,Continuated[B]) - ): WriteSelectorArgument[A,B] = SyncWriteSelectorArgument(f) - - implicit def toAsyncFullSkipSelectorArgument[A]( - f: Skip[A] => Option[Future[Continuated[A]]] - ):SkipSelectorArgument[A] = AsyncFullSkipSelectorArgument(f) - - implicit def toAsyncNoOptSkipSelectorArgument[A]( - f: Skip[A] => Future[Continuated[A]] - ):SkipSelectorArgument[A] = AsyncNoOptSkipSelectorArgument(f) - - implicit def toSyncSelectorArgument[A]( - f: Skip[A] => Continuated[A] - ):SkipSelectorArgument[A] = SyncSelectorArgument(f) - -// -// Time from time we forgott to set 'go' in selector builder. -// Let's transform one automatically -// TODO: make 'go' nilpotent before this. -// -// implicit def toFuture[A](sb:SelectorBuilder[A]):Future[A] = sb.go - - @scala.annotation.compileTimeOnly("FlowTermination methods must be used inside flow scopes (go, reading/writing/idle args)") - implicit def compileTimeFlowTermination[A]: FlowTermination[A] = ??? - - /** - * starts asyncronics execution of `body` in provided execution context. - * Inside go we can use `defer`/`recover` clauses and blocked read/write channel operations. - * - */ - def go[T](body: T)(implicit ec:ExecutionContext) : Future[T] = macro GoAsync.goImpl[T] - - /** - * provide access to using defer/recover inside body in the current thread of execution. - */ - def goScope[T](body: T): T = macro GoAsync.goScopeImpl[T] - - /** - * pseudostatement which can be used inside go/goScope block. - **/ - @scala.annotation.compileTimeOnly("defer/recover method usage outside go / goScope ") - def defer(x: =>Unit): Unit = ??? - - /** - * can be called only from defer block. If we in handling exception, try to apply f - * to exception and if it's applied - stop panic and return true, otherwise return false. - * - *@param f - partial function for recovering exception. - *@return true if exception was recovered, false otherwise - */ - @scala.annotation.compileTimeOnly("defer/recover method usage outside go / goScope ") - def recover[T](f: PartialFunction[Throwable, T]): Boolean = ??? - - /** - * sugar for reading value from future. - */ - implicit class FutureWithRead[T](f:Future[T]) - { - def read: T = macro InputMacro.read[T] - - def aread: Future[T] = f - } - - import scala.language.experimental.macros - import scala.reflect.macros.blackbox.Context - import scala.reflect.api._ - def awaitImpl[T](c:Context)(v:c.Expr[Future[T]]):c.Expr[T] = - { - import c.universe._ - c.Expr[T](q"scala.async.Async.await($v)") - } - - - def asyncApply1[A,B,C](hof:(A=>B)=>C)(nf:A=>Future[B]):Future[C] = - macro gopher.goasync.AsyncApply.impl1[A,B,C] - - import scala.collection.generic._ - implicit def toAsyncIterable[T](x:Iterable[T]): AsyncIterable[T] = new AsyncIterable[T](x) - implicit def toAsyncOption[T](x:Option[T]): AsyncOption[T] = new AsyncOption[T](x) - - - -} - diff --git a/src/main/scala/gopher/transputers/ReplicateTransputer.scala b/src/main/scala/gopher/transputers/ReplicateTransputer.scala deleted file mode 100644 index 0765c476..00000000 --- a/src/main/scala/gopher/transputers/ReplicateTransputer.scala +++ /dev/null @@ -1,285 +0,0 @@ -package gopher.transputers - -import gopher._ -import gopher.channels._ -import gopher.util._ -import scala.language.experimental.macros -import scala.reflect.macros.whitebox.Context -import scala.reflect.api._ -import scala.concurrent._ -import scala.annotation._ -import scala.language.higherKinds -import async.Async._ - -import scala.collection.mutable.ArraySeq - -trait PortAdapter[P[_],A] -{ - def apply(x:P[A], n:Int, api: GopherAPI): (IndexedSeq[P[A]],Option[ForeverSelectorBuilder=>Unit]) -} - -class SharePortAdapter[P[_],A] extends PortAdapter[P,A] -{ - def apply(x:P[A], n:Int, api: GopherAPI): (IndexedSeq[P[A]],Option[ForeverSelectorBuilder=>Unit]) - = ((1 to n) map (_ => x) ,None) -} - -class DuplicatePortAdapter[A](buffLen: Int = 1) extends PortAdapter[Input,A] -{ - def apply(x:Input[A], n:Int, api: GopherAPI): (IndexedSeq[Input[A]],Option[ForeverSelectorBuilder=>Unit]) - = { - val upApi = api - val newPorts = (1 to n) map (_ => api.makeChannel[A](buffLen)) - def f(selector:ForeverSelectorBuilder): Unit = - selector.readingWithFlowTerminationAsync(x, - (ec:ExecutionContext, ft: FlowTermination[Unit], a: A) => async{ - var i = 0 - var fl:List[Future[A]]=Nil - while(iInt, buffLen: Int = 1) extends PortAdapter[Input,A] -{ - def apply(x:Input[A], n:Int, api: GopherAPI): (IndexedSeq[Input[A]],Option[ForeverSelectorBuilder=>Unit]) = - { - val newPorts = (1 to n) map (_ => api.makeChannel[A](buffLen)) - val sf: (ForeverSelectorBuilder=>Unit) = _.readingWithFlowTerminationAsync(x, - (ec:ExecutionContext, ft: FlowTermination[Unit], a: A) => - newPorts(f(a) % n).awrite(a).map(_ => ())(ec) - ) - (newPorts, Some(sf)) - } -} - - -class AggregatePortAdapter[A](f: Seq[A]=>A, buffLen:Int = 1) extends PortAdapter[Output,A] -{ - - def apply(x:Output[A], n:Int, api: GopherAPI): (IndexedSeq[Output[A]],Option[ForeverSelectorBuilder=>Unit]) = - { - val newPorts = (1 to n) map (_ => api.makeChannel[A](buffLen)) - val sf: (ForeverSelectorBuilder=>Unit) = _.readingWithFlowTerminationAsync(newPorts(0), - (ec:ExecutionContext, ft: FlowTermination[Unit], a: A) => async{ - val data = new ArraySeq[A](n) - data(0) = a - val i=1 - while(iReplicated[X] is transputer which keep n instances of X - * where ports of replicated consumer are connected to appropriative ports of instances in parallel. - * - *@see gopher.GopherAPI#replicate - */ -abstract class ReplicatedTransputer[T<:Transputer, Self](api: GopherAPI, n: Int) extends ParTransputer(api,List()) -{ - - thisReplicatedTransputer: Self => - - class InPortWithAdapter[A](in:Input[A]) extends InPort[A](in) - { - var adapter: PortAdapter[Input, A] = new SharePortAdapter[Input,A] - def owner: Self = thisReplicatedTransputer - } - - class OutPortWithAdapter[A](out:Output[A]) extends OutPort[A](out) - { - var adapter: PortAdapter[Output, A] = new SharePortAdapter[Output,A] - def owner: Self = thisReplicatedTransputer - } - - - class SelectorRunner(configFun: ForeverSelectorBuilder => Unit ) extends SelectTransputer - { - - selectorInit = ()=>configFun(this) - selectorInit() - - def api = thisReplicatedTransputer.api - def recoverFactory = () => new SelectorRunner(configFun) - } - - def init(): Unit - - - override def onStart():Unit= - { init() } - - override def onRestart(prev:Transputer):Unit= - { init() } - - - def replicated: Seq[T] - = replicatedInstances - - protected var replicatedInstances: Seq[T] = Seq() - - protected def replicatePorts():IndexedSeq[ForeverSelectorBuilder=>Unit] - - protected final def formChilds(selectorFuns:IndexedSeq[ForeverSelectorBuilder=>Unit]):Unit = { - childs = (selectorFuns map(new SelectorRunner(_))) ++ replicatedInstances - for(x <- childs) x.parent = Some(this) - } - -} - - - - -object PortAdapters -{ - - implicit class DistributeInput[G <: ReplicatedTransputer[_,_], A](in: G#InPortWithAdapter[A]) - { - def distribute(f: A=>Int): G = - { in.adapter = new DistributePortAdapter(f) - in.owner.asInstanceOf[G] - } - } - - - implicit class ShareInput[G <: ReplicatedTransputer[_,_],A](in: G#InPortWithAdapter[A]) - { - def share(): G = - { in.adapter = new SharePortAdapter[Input,A]() - in.owner.asInstanceOf[G] - } - } - - - implicit class ShareOutput[G <: ReplicatedTransputer[_,_], A](out: G#OutPortWithAdapter[A]) - { - def share(): G = - { out.adapter = new SharePortAdapter[Output,A] - out.owner.asInstanceOf[G] - } - } - - - implicit class DuplicateInput[G <: ReplicatedTransputer[_,_],A](in: G#InPortWithAdapter[A]) - { - def duplicate(): G = - { in.adapter = new DuplicatePortAdapter[A] - in.owner.asInstanceOf[G] - } - } - - - -} - - - -object Replicate -{ - - /** - * macro for GopherAPI.replicate - */ - def replicateImpl[T<:Transputer:c.WeakTypeTag](c:Context)(n:c.Expr[Int]):c.Expr[Transputer] = - { - import c.universe._ - - def portDefs[P:TypeTag](portWithAdapterName:String,portConstructorName:String):List[Tree] = - { - val portWithAdapterType = TypeName(portWithAdapterName) - val portConstructor = TermName(portConstructorName) - val ports = ReflectUtil.retrieveValSymbols[P](c.universe)(weakTypeOf[T]) - for(p <- ports) yield { - val getter = p.getter.asMethod - if (getter.returnType.typeArgs.length!=1) { - c.abort(p.pos, "assumed {In|Out}Port[A], have type ${getter.returnType} with typeArgs length other then 1") - } - val ta = getter.returnType.typeArgs.head - val name = TermName(getter.name.toString) - q"val ${name}: ${portWithAdapterType}[${ta}] = new ${portWithAdapterType}[${ta}](${portConstructor}().v)" - } - } - - def replicatePort(p:TermName):Tree= - q"""{ val (replicatedPorts,optSelectorFun) = ${p}.adapter(${p}.v,n,api) - for((r,e) <- (replicatedPorts zip replicatedInstances)) { - e.${p}.connect(r) - } - selectorFuns = selectorFuns ++ optSelectorFun - } - """ - - def retrieveValNames[P:TypeTag]:List[TermName] = - ReflectUtil.retrieveValSymbols[P](c.universe)(weakTypeOf[T]) map (x=>TermName(x.getter.name.toString)) - - def replicatePorts():List[Tree] = - { - (retrieveValNames[Transputer#InPort[_]] ++ retrieveValNames[Transputer#OutPort[_]]) map (replicatePort(_)) - } - - - val className = TypeName(c.freshName("Replicated"+weakTypeOf[T].typeSymbol.name)) - var stats = List[c.Tree]() ++ ( - portDefs[Transputer#InPort[_]]("InPortWithAdapter","InPort") - ++ - portDefs[Transputer#OutPort[_]]("OutPortWithAdapter","OutPort") - ) - - val retval = c.Expr[Transputer](q""" - { - class ${className}(api:GopherAPI,n:Int) extends ReplicatedTransputer[${weakTypeOf[T]},${className}](api,n) - { - type Self = ${className} - - def init(): Unit = - { - replicatedInstances = (1 to n) map (i => { - val x = api.makeTransputer[${weakTypeOf[T]}] - x.replicaNumber = i - x - }) - formChilds(replicatePorts) - } - - def replicatePorts():IndexedSeq[ForeverSelectorBuilder=>Unit] = - { - var selectorFuns = IndexedSeq[ForeverSelectorBuilder=>Unit]() - - ..${replicatePorts()} - - selectorFuns - } - - - ..${stats} - - } - new ${className}(${c.prefix},${n.tree}) - } - """ - ) - retval - } - - -} diff --git a/src/main/scala/gopher/transputers/TransputerSupervisor.scala b/src/main/scala/gopher/transputers/TransputerSupervisor.scala deleted file mode 100644 index 41f3b7ba..00000000 --- a/src/main/scala/gopher/transputers/TransputerSupervisor.scala +++ /dev/null @@ -1,86 +0,0 @@ -package gopher.transputers - -import gopher._ -import akka.actor._ -import scala.util._ - - -/** - * one actor, which perform operations for starting/stopping - **/ -class TransputerSupervisor(api: GopherAPI) extends Actor with ActorLogging -{ - import TransputerSupervisor._ - - implicit def ec = api.executionContext - - def receive = { - case Start(t) => log.debug(s"starting ${t}") - t.goOnce onComplete { - case scala.util.Success(x) => - api.transputerSupervisorRef ! Stop(t) - case scala.util.Failure(ex) => - api.transputerSupervisorRef ! Failure(t,ex) - } - case Failure(t,ex) => - handleFailure(t,ex) - case Stop(t) => log.debug(s"${t} stopped") - if (!t.flowTermination.isCompleted) { - t.flowTermination.doExit(()) - } - case Escalate(t,ex) => - t.flowTermination.doThrow(ex) - } - - - def handleFailure(t: Transputer, ex: Throwable) = - { - import SupervisorStrategy.{Resume,Restart,Stop,Escalate} - if (t.recoveryStatistics.failure(ex,t.recoveryLimits,System.nanoTime)) { - escalate(t, new Transputer.TooManyFailures(t)) - } else if (t.recoveryFunction.isDefinedAt(ex)) { - t.recoveryFunction(ex) match { - case Resume => log.debug(s"${t} failed with ${ex.getMessage()}, resume execution") - log.debug("caused by {}",ex) - t.beforeResume() - self ! Start(t) - case Restart => log.debug(s"${t} failed with ${ex.getMessage()}, restart") - log.debug("caused by {}",ex) - val nt = t.recoverFactory() - nt.copyPorts(t) - nt.copyState(t) - nt.beforeRestart(t) - self ! Start(nt) - case Stop => self ! TransputerSupervisor.Stop(t) - case Escalate => log.debug(s"escalate from ${t} : ${ex}") - escalate(t,ex) - } - } else { - escalate(t,ex) - } - } - - def escalate(t: Transputer, ex: Throwable): Unit = - { - self ! Escalate(t, ex) - t.parent match { - case Some(p) => self ! Failure(p,ex) - case None => // root escalate, acccordint to akka rules: throw to supervisor of all system. - log.error(s"transputer exception escalated to root: ${ex.getMessage}") - throw ex; - } - } - -} - - -object TransputerSupervisor -{ - sealed trait Message - case class Start(t: Transputer) extends Message - case class Failure(t: Transputer,ex: Throwable) extends Message - case class Stop(t: Transputer) extends Message - case class Escalate(t: Transputer, ex: Throwable) extends Message -} - - diff --git a/src/main/scala/gopher/transputers/package.scala b/src/main/scala/gopher/transputers/package.scala deleted file mode 100644 index 72531352..00000000 --- a/src/main/scala/gopher/transputers/package.scala +++ /dev/null @@ -1,12 +0,0 @@ -package gopher - -/** - * transputers implementations - * - *@see gopher.transputers.TransputerSupervisor - *@see gopher.transputers.ReplicatedTransputer - **/ -package object transputers -{ - -} diff --git a/src/main/scala/gopher/util/ASTUtilImpl.scala b/src/main/scala/gopher/util/ASTUtilImpl.scala deleted file mode 100644 index 6d168a73..00000000 --- a/src/main/scala/gopher/util/ASTUtilImpl.scala +++ /dev/null @@ -1,21 +0,0 @@ -package gopher.util - -import scala.reflect.macros.blackbox.Context - - -trait ASTUtilImpl -{ - val c: Context - import c.universe._ - - def parseGuardInSelectorCaseDef(name: c.TermName, guard:c.Tree): c.Tree = - { - guard match { - case Apply(Select(Ident(`name`),TermName("$eq$eq")),List(expression)) => - expression - case _ => - c.abort(guard.pos, s"expected ${name}== in select guard") - } - } - -} diff --git a/src/main/scala/gopher/util/Effected.scala b/src/main/scala/gopher/util/Effected.scala deleted file mode 100644 index 7f86a71c..00000000 --- a/src/main/scala/gopher/util/Effected.scala +++ /dev/null @@ -1,62 +0,0 @@ -package gopher.util - -import java.util.concurrent.atomic._ - -trait Effected[T] -{ - - def apply(f:T=>T): Unit - - @inline def <<=(f:T=>T): Unit = apply(f) - - def replace(x: T): Unit = apply(_ => x) - - @inline def :=(x:T): Unit = replace(x) - - protected def current: T -} - - -class SinglethreadedEffected[T](initValue:T) extends Effected[T] -{ - - override def apply(f: T=>T): Unit = - { v=f(v) } - - override def replace(x: T): Unit = - { v=x } - - override def current = v - - protected[this] var v = initValue -} - -class MultithreadedEffected[T](initValue:T) extends Effected[T] -{ - - override def apply(f: T=>T): Unit = - { - setv(f(v.get)) - } - - override def replace(x:T) - { - setv(x) - } - - protected[this] def setv(x: =>T):Unit = - { - var success = false; - while(!success) { - val prev = v.get() - val next = x - success = v.compareAndSet(prev,next) - } - } - - override def current = v.get - - protected[this] val v = new AtomicReference[T](initValue) -} - - diff --git a/src/main/scala/gopher/util/IntIndexedReverse.scala b/src/main/scala/gopher/util/IntIndexedReverse.scala deleted file mode 100644 index 5e1abe6e..00000000 --- a/src/main/scala/gopher/util/IntIndexedReverse.scala +++ /dev/null @@ -1,78 +0,0 @@ -package gopher.util - - - -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer - -case class CounterRecord[T](var value: T, var counter: Int = 0) - -class IntIndexedCounterReverse[T <: AnyRef](n:Int) { - - val values = new Array[CounterRecord[T]](n) - val backIndex = new mutable.WeakHashMap[T,CounterRecord[T]]() - - def put(i: Int, v:T):CounterRecord[T] = - { - var cr = values(i) - if ((cr eq null) || !(cr.value eq v)) { - backIndex.get(v) match { - case None => cr = new CounterRecord[T](v,0) - backIndex.put(v,cr) - case Some(backed) => cr = backed - } - values(i) = cr - } - cr - } - - def get(i: Int): Option[CounterRecord[T]] = - Option(values(i)) - - /** - * Search for index of v in T by reference. - * @param v - value to search - * @return index or -1 if not found - */ - def refIndexOf(v:T):Int = - { - var i=0 - var r = -1 - while(i < values.length && r == -1) { - if (values(i).value eq v) { - r = i - } - i += 1 - } - r - } - - def foreach(f:CounterRecord[T] => Unit): Unit = - { - var i=0 - while(iUnit):Unit = - { - var i = 0 - while(i < values.length) { - val current = values(i) - if (!(current eq null)) { - f(i,current) - } - i += 1 - } - } - - - def getBackIndex(v:T):Option[CounterRecord[T]] = - backIndex.get(v) - -} diff --git a/src/main/scala/gopher/util/MacroUtil.scala b/src/main/scala/gopher/util/MacroUtil.scala deleted file mode 100644 index 19a01a1b..00000000 --- a/src/main/scala/gopher/util/MacroUtil.scala +++ /dev/null @@ -1,127 +0,0 @@ -package gopher.util - -import scala.annotation._ -import scala.reflect.macros.blackbox.Context -import scala.reflect.api._ -import scala.language.reflectiveCalls - - -object MacroUtil -{ - - /** - * short representation of tree, suitable for show in - * error messages. - */ - def shortString(c:Context)(x:c.Tree):String = - { - val raw = c.universe.showRaw(x) - if (raw.length > SHORT_LEN) { - raw.substring(0,raw.length-3)+"..." - } else { - raw - } - } - - def skipAnnotation(c:Context)(x: c.Tree):c.Tree = - { - import c.universe._ - x match { - case Annotated(_,arg) => arg - case _ => x - } - } - - def hasAwait(c:Context)(x: c.Tree):Boolean = - { - import c.universe._ - val findAwait = new Traverser { - var found = false - override def traverse(tree:Tree):Unit = - { - if (!found) { - tree match { - case Apply(TypeApply(Select(obj,TermName("await")),objType), args) => - if (obj.tpe =:= typeOf[scala.async.Async.type]) { - found=true - } else super.traverse(tree) - case _ => super.traverse(tree) - } - } - } - } - findAwait.traverse(x) - findAwait.found - } - - /** - * bug in async/scala-2.12.x - * async/types generate in state-machine next chunk of code: - *``` - * val result: scala.concurrent.Promise[Int] = Promise.apply[Int](); - * def result: scala.concurrent.Promise[Int] = stateMachine$macro$1041.this.result; - * val execContext: scala.concurrent.ExecutionContext = .. - * def execContext: scala.concurrent.Promise[Int] = stateMachine$macro$1041.this.execContext; - *``` - * when we attempt untype/type code again, it is not compiled. - *So, we need to remove result and execContext DefDefs - **/ - def removeAsyncStateMachineResultDefDef(c:Context)(tree: c.Tree):c.Tree = - { - import c.universe._ - - val outsideStm = new Transformer { - - override def transform(tree:Tree):Tree = - tree match { - case ClassDef(mods,name,tparams,impl) - if (name.toString.startsWith("stateMachine$")) => - impl match { - case Template(parents,self,body) => - ClassDef(mods,name,tparams, - Template(parents,self,removeResultDefDef(body,Nil))) - //case _ => // impossible, throw - } - case _ => super.transform(tree) - } - - @tailrec - def removeResultDefDef(body:List[Tree],acc:List[Tree]):List[Tree] = - { - body match { - case Nil => acc.reverse - case head::tail => - val (rest,nacc) = head match { - case DefDef(mods,name,tparams,vparamss,tpt,rsh) - if (name.toString == "result" || - name.toString == "execContext" ) => (tail,acc) - case _ => (tail, transform(head)::acc) - } - removeResultDefDef(rest,nacc) - } - } - - } - - val retval = outsideStm.transform(tree) - retval - } - - def cleanUntypecheck(c:Context)(tree:c.Tree):c.Tree = - { - if (isScala2_11) { - c.untypecheck(tree) - } else if (isScala2_12_0) { - removeAsyncStateMachineResultDefDef(c)(c.untypecheck(tree)) - } else { - c.untypecheck(tree) - } - } - - val isScala2_11 = util.Properties.versionNumberString.startsWith("2.11.") - - val isScala2_12_0 = util.Properties.versionNumberString.startsWith("2.12.0") - - - final val SHORT_LEN = 80 -} diff --git a/src/main/scala/gopher/util/ReflectUtil.scala b/src/main/scala/gopher/util/ReflectUtil.scala deleted file mode 100644 index 54711070..00000000 --- a/src/main/scala/gopher/util/ReflectUtil.scala +++ /dev/null @@ -1,37 +0,0 @@ -package gopher.util - -import scala.reflect._ -import scala.reflect.api._ - -object ReflectUtil -{ - - - def retrieveValSymbols[T:u.TypeTag](u:Universe)(ownerType:u.Type): List[u.TermSymbol] = - { - val r1 = ownerType.members.filter(_.isTerm).map(_.asTerm).filter(x => x.isVal) - val signatures = r1.map(_.typeSignature) - val ut = u.typeOf[T] - val checkResults = signatures.map( _ <:< u.typeOf[T]) - val retval = ownerType.members.filter(_.isTerm).map(_.asTerm).filter{ x => - if (x.isVal) { - // in scala 2.12 getter method type, scala 2.11 - type - val r = x.typeSignature match { - case u.NullaryMethodType(rt) => rt <:< u.typeOf[T] // for scala-2.12 - case _ => (x.typeSignature <:< u.typeOf[T]) // for scala-2.11 - } - r - } else false - }.toList - retval - } - - - def retrieveVals[T:ru.TypeTag,O:ClassTag](ru:Universe)(mirror: ru.ReflectiveMirror, o:O): List[T] = - { - val im = mirror.reflect(o); - retrieveValSymbols(ru)(im.symbol.typeSignature) map (im.reflectField(_).get.asInstanceOf[T]) - } - - -} diff --git a/src/test/resources/application.conf b/src/test/resources/application.conf deleted file mode 100644 index b91330ac..00000000 --- a/src/test/resources/application.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka { - //loglevel = DEBUG - loglevel = OFF -} - diff --git a/src/test/scala/example/BetterSieveSuite.scala b/src/test/scala/example/BetterSieveSuite.scala deleted file mode 100644 index 98a3b225..00000000 --- a/src/test/scala/example/BetterSieveSuite.scala +++ /dev/null @@ -1,65 +0,0 @@ -package example - -import gopher._ -import gopher.channels._ -import CommonTestObjects.gopherApi._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.concurrent.ExecutionContext.Implicits.global - -import scala.language.postfixOps - -import org.scalatest._ - -/** - * more 'scala-like' sieve - **/ -object BetterSieve -{ - - def generate(n:Int, quit:Promise[Boolean]):Input[Int] = - { - val channel = makeChannel[Int]() - channel.awriteAll(2 to n) foreach (_ => quit success true) - channel - } - - /** - * flatFold modify channel with each read - */ - def filter(in:Input[Int]):Input[Int] = - in.flatFold{ (s,prime) => s.filter( _ % prime != 0) } - - def primes(n:Int, quit: Promise[Boolean]):Input[Int] = - filter(generate(n,quit)) - -} - -class BetterSieveSuite extends FunSuite -{ - - test("last prime before 1000") { - - val quit = Promise[Boolean]() - val quitInput = futureInput(quit.future) - - val pin = Sieve.primes(1000,quit) - - var lastPrime=0; - val future = select.forever { - case p: pin.read => - if (false) { - System.err.print(p) - System.err.print(" ") - } - lastPrime=p - case q: quitInput.read => - //System.err.println() - CurrentFlowTermination.exit(()); - } - Await.ready(future, 10 seconds) - assert( lastPrime == 997) - } - -} - diff --git a/src/test/scala/example/Bingo.scala b/src/test/scala/example/Bingo.scala deleted file mode 100644 index da8f8374..00000000 --- a/src/test/scala/example/Bingo.scala +++ /dev/null @@ -1,71 +0,0 @@ -package examples - -import gopher._ -import gopher.channels._ -import gopher.tags._ -import org.scalatest._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import akka.actor._ - - -trait Bingo extends SelectTransputer -{ - - val inX = InPort[Int]() - val inY = InPort[Int]() - val out = OutPort[Boolean]() - - loop { - case x: inX.read => - val y = inY.read - //Console.println(s"Bingo checker, received ${x}, ${y}") - out.write(x==y) - } - - recover { - case ex: ChannelClosedException => SupervisorStrategy.Stop - } - -} - -trait Acceptor extends SelectTransputer -{ - - val inA = InPort[Boolean]() - - var nBingos = 0 - var nPairs = 0 - - loop { - case x: inA.read => - // Console.println(s"acceptor: ${nPairs} ${nBingos} ${x}") - if (x) { - nBingos += 1 - } - nPairs += 1 - } - -} - -class BingoSuite extends FunSuite -{ - - test("bingo process wit identical input must return same") { - val inX = gopherApi.iterableInput(1 to 100) - val inY = gopherApi.iterableInput(1 to 100) - val bingo = gopherApi.makeTransputer[Bingo] - val acceptor = gopherApi.makeTransputer[Acceptor] - bingo.inX connect inX - bingo.inY connect inY - bingo.out >~~> acceptor.inA - val w = (bingo + acceptor).start() - Await.ready(w,10 seconds) - assert(acceptor.nBingos == acceptor.nPairs) - } - - def gopherApi = CommonTestObjects.gopherApi - -} - diff --git a/src/test/scala/example/BroadcasterSuite.scala b/src/test/scala/example/BroadcasterSuite.scala deleted file mode 100644 index e3870915..00000000 --- a/src/test/scala/example/BroadcasterSuite.scala +++ /dev/null @@ -1,151 +0,0 @@ -package example.broadcast - -/** - * code from - * Concurrent Idioms #1: Broadcasting values in Go with linked channels. - * https://rogpeppe.wordpress.com/2009/12/01/concurrent-idioms-1-broadcasting-values-in-go-with-linked-channels/ - */ - -import scala.concurrent.{Channel=>_,_} -import scala.concurrent.duration._ -import scala.concurrent.ExecutionContext.Implicits.global -import scala.language.postfixOps -import scala.async.Async._ - -import gopher._ -import gopher.channels._ -import CommonTestObjects.gopherApi._ - -import org.scalatest._ - - -class Broadcaster[A] -{ - import Broadcaster._ - - val listenc: Channel[Channel[Channel[Message[A]]]] = makeChannel() - val sendc: Channel[A] = makeChannel() - val quitc: Channel[Boolean] = makeChannel() - - val process = select.afold(makeChannel[Message[A]](1)) { (last,s) => - s match { - case v: sendc.read @unchecked => - val next = makeChannel[Message[A]](1) - last <~ ValueMessage(next,v) - next - case r: listenc.read @unchecked => - r <~ last - last - case q: quitc.read => - CurrentFlowTermination.exit(last) - } - } - - - - def alisten(): Future[Receiver[A]] = go { - val c = makeChannel[Channel[Message[A]]]() - listenc <~ c - new Receiver(c.read) - } - -} - - -object Broadcaster { - - import language.experimental.macros - import scala.reflect.macros.blackbox.Context - import scala.reflect.api._ - - class Receiver[A](initChannel: Channel[Message[A]]) - { - val current = makeEffectedChannel(initChannel) - - /** - * return Some(a) when broadcaster is not closed; None when closed. - * (this is logic from original Go example, where - * 'T' in Go is equilend to Option[T] in Scala [Go nil ~ Scala None]) - * In real life, interface will be better. - **/ - def aread():Future[Option[A]] = go { - val b = current.read - current.write(b) - b match { - case ValueMessage(ch,v) => - current := ch - Some(v) - case EndMessage => - None - } - } - - def read():Option[A] = macro Receiver.readImpl[A] - - } - - object Receiver - { - def readImpl[A](c:Context)():c.Expr[Option[A]]= - { - import c.universe._ - awaitImpl[Option[A]](c)(c.Expr[Future[Option[A]]](q"${c.prefix}.aread()")) - } - } - - sealed trait Message[+A] - case class ValueMessage[A](ch: Channel[Message[A]],v:A) extends Message[A] - case object EndMessage extends Message[Nothing] - -} - - -class BroadcaseSuite extends FunSuite -{ - - def listen[A](r: Broadcaster.Receiver[A],out:Output[A]): Future[Unit] = go { - var finish = false; - while(!finish) { - val x = await(r.aread) - x match { - case Some(m) => out.write(m) - case None => finish = true - } - } - (); - } - - def doBroadcast(out:Channel[Int]): Unit = go { - - val b = new Broadcaster[Int]() - - val r1 = await(b.alisten()) - val l1 = listen(r1,out) - val r2 = await(b.alisten()) - val l2 = listen(r2,out) - - b.sendc.write(1) - - val r3 = await(b.alisten()) - val l3 = listen(r3,out) - - b.sendc.write(2) - - b.quitc.write(true) - - Thread.sleep(500) - out.close() - } - - test("broadcast") { - val channel = makeChannel[Int]() - doBroadcast(channel); - val fsum = channel.afold(0){ (s,n) => s+n } - val sum = Await.result(fsum,10 seconds) - assert(sum==8) - } - -} - - - diff --git a/src/test/scala/example/CopyFile.scala b/src/test/scala/example/CopyFile.scala deleted file mode 100644 index dd44aa4a..00000000 --- a/src/test/scala/example/CopyFile.scala +++ /dev/null @@ -1,25 +0,0 @@ -package example - -import java.io._ -import gopher._ - -object CopyFile { - - def main(args:Array[String]):Unit = - { - if (args.length < 3) { - System.err.println("usage: copy in out"); - } - copy(new File(args(1)), new File(args(2))) - } - - def copy(inf: File, outf: File): Long = - goScope { - val in = new FileInputStream(inf) - defer{ in.close() } - val out = new FileOutputStream(outf); - defer{ out.close() } - out.getChannel() transferFrom(in.getChannel(), 0, Long.MaxValue) - } - -} diff --git a/src/test/scala/example/FibonaccyAsyncSuite.scala b/src/test/scala/example/FibonaccyAsyncSuite.scala deleted file mode 100644 index 32e62e3c..00000000 --- a/src/test/scala/example/FibonaccyAsyncSuite.scala +++ /dev/null @@ -1,79 +0,0 @@ -package example - -import org.scalatest._ -import gopher._ -import gopher.channels._ -import akka.actor._ -import gopher.tags._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.concurrent.ExecutionContext.Implicits._ - - - -object FibonaccyAsync { - - def fibonacci(ch: Output[Long], quit: Input[Int]): Unit = { - var (x,y) = (0L,1L) - gopherApi.select.forever.writing(ch, y){ _ => - val z = x - x = y - y = z + y - }.reading(quit){ - x => - implicitly[FlowTermination[Unit]].doExit(()) - }.go - } - - def run(n:Int, acceptor: Long => Unit ): Unit = - { - val c = gopherApi.makeChannel[Long](1); - val quit = gopherApi.makeChannel[Int](1); - - var last=0L - /* - // error in compiler [scala-2.11.2] - //TODO: debug to small example and send pr - */ - /* - c.zip(1 to n).foreach{ a => - val (x,i) = a - //Console.print("%d, %d\n".format(i,x)) - last = x - } flatMap { x => quit.awrite(1) } - */ - val receiver = c.zip(1 to n).map{ case (x,i) => - // don't show, I trust you ;) - //Console.print("%d, %d\n".format(i,x)) - last = x - (i,x) - }.atake(n) flatMap { - x => - quit.awrite(1) - } - - fibonacci(c,quit) - - Await.ready(receiver, 10 seconds) - - acceptor(last) - - } - - lazy val gopherApi = channels.CommonTestObjects.gopherApi - -} - - -class FibonaccyAsyncSuite extends FunSuite -{ - - test("async fibonaccy must be processed up to 50") { - var last:Long = 0; - FibonaccyAsync.run(50, { last = _ } ) - assert(last != 0) - } - -} - diff --git a/src/test/scala/example/FibonaccyAsyncUnsugaredSuite.scala b/src/test/scala/example/FibonaccyAsyncUnsugaredSuite.scala deleted file mode 100644 index e0e97f55..00000000 --- a/src/test/scala/example/FibonaccyAsyncUnsugaredSuite.scala +++ /dev/null @@ -1,87 +0,0 @@ -package example - -import gopher._ -import gopher.channels._ -import scala.language._ -import scala.async.Async._ -import scala.concurrent._ -import scala.concurrent.duration._ -import org.scalatest._ -import ExecutionContext.Implicits.global - -import gopher.tags._ - -class FibonaccyAsyncUnsugaredSuite extends FunSuite { - - - object Fibonaccy { - - // illustrate usage of internal low-level API - // - def fibonacci(c: Output[Long], quit: Input[Int]): Future[Unit] = { - - @volatile var (x,y) = (0L,1L) - - val selector = new Selector[Unit](gopherApi) - - selector.addWriter(c, - ((cont:ContWrite[Long,Unit]) => Some{ - (x, async{ - val z=x - x=y - y=z+y - cont} - ) - } - ) - ) - selector.addReader(quit, - ((cont:ContRead[Int,Unit]) => Some{ (in:ContRead.In[Int]) => - Future successful Done((),cont.flowTermination) - } - ) - ) - selector.run - } - - def run(max:Int, acceptor: Long => Unit ): Unit = - { - val c = gopherApi.makeChannel[Long](); - val quit = gopherApi.makeChannel[Int](); - - val selector = new Selector[Long](gopherApi) - selector.addReader(c zip (1 to max), - (cont:ContRead[(Long,Int),Long]) => Some(ContRead.liftIn(cont){ in => - val (n,i) = in - //Console.println(s"received:${i}:${n} from channel ${cont.channel}") - Future successful { - if (i >= max) - Done(n,cont.flowTermination) - else - cont - } - }) - ) - val consumer = selector.run - - val producer = fibonacci(c,quit) - - acceptor(Await.result(consumer, 10 seconds)) - - } - - - } - - test("fibonaccy must be processed up to 50") { - var last:Long = 0; - Fibonaccy.run(50, last = _ ) - assert(last != 0) - } - - val gopherApi = CommonTestObjects.gopherApi - -} - - - diff --git a/src/test/scala/example/FibonaccySuite.scala b/src/test/scala/example/FibonaccySuite.scala deleted file mode 100644 index 1e369bb2..00000000 --- a/src/test/scala/example/FibonaccySuite.scala +++ /dev/null @@ -1,64 +0,0 @@ -package example - -import gopher._ -import gopher.channels._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ - -import org.scalatest._ -import gopher.tags._ - -/* - * code from go tutorial: http://tour.golang.org/#66 -* -*/ - -object Fibonaccy { - - import scala.concurrent.ExecutionContext.Implicits.global - - def fibonacci(c: Output[Long], quit: Input[Int]): Future[Unit] = go { - var (x,y) = (0L,1L) - for(s <- gopherApi.select.forever) { - s match { - case z: c.write if (z == x) => - x = y - y = z+y - case q: quit.read => - implicitly[FlowTermination[Unit]].doExit(()) - } - } - } - - def run(n:Int, acceptor: Long => Unit ): Future[Unit] = - { - val c = gopherApi.makeChannel[Long](1); - val quit = gopherApi.makeChannel[Int](1); - val r = go { - // for loop in go with async inside - for( i<- 1 to n) { - val x: Long = (c ?) - //Console.println(s"received: ${i}, ${x}") - acceptor(x) - } - quit <~ 0 - } - - fibonacci(c,quit) - } - - def gopherApi = CommonTestObjects.gopherApi - -} - -class FibonaccySuite extends FunSuite -{ - - test("fibonaccy must be processed up to 50") { - @volatile var last:Long = 0; - Await.ready( Fibonaccy.run(50, last = _ ), 10 seconds ) - assert(last != 0) - } - -} diff --git a/src/test/scala/gopher/channels/AsyncSelectSuite.scala b/src/test/scala/gopher/channels/AsyncSelectSuite.scala deleted file mode 100644 index 48fa5a6d..00000000 --- a/src/test/scala/gopher/channels/AsyncSelectSuite.scala +++ /dev/null @@ -1,153 +0,0 @@ -package gopher.channels - -import org.scalatest._ -import gopher._ -import gopher.tags._ -import akka.actor._ -import scala.concurrent._ -import scala.concurrent.duration._ - -class AsyncSelectSuite extends FunSuite { - - - val MAX_N=100 - - test("async base: channel write, select read") { - - - val channel = gopherApi.makeChannel[Int](10) - - channel.awriteAll(1 to MAX_N) - - var sum = 0; - - val consumer = gopherApi.select.loop.onRead(channel){ - (a:Int, cont:ContRead[Int,Unit]) => sum = sum + a - if (a < MAX_N) { - cont - } else { - Done((),cont.flowTermination) - } - }.go - - //val consumer = go { - // for(s <- select) { - // s match { - // case `channel` ~> (i:Int) => - // //System.err.println("received:"+i) - // sum = sum + i - // if (i==1000) s.shutdown() - // } - // } - // sum - //} - - Await.ready(consumer, 10.second) - - val xsum = (1 to MAX_N).sum - assert(xsum == sum) - - } - - test("async base: select write, select read") { - - val channel = gopherApi.makeChannel[Int](10) - - var sum=0 - var curA=0 - val process = gopherApi.select.loop. - onRead(channel){ - (a:Int, cont:ContRead[Int,Unit]) => sum = sum + a - //System.err.println("received:"+a) - if (a < MAX_N) { - cont - } else { - Done((),cont.flowTermination) - } - }.onWrite(channel){ - cont:ContWrite[Int,Unit] => - curA = curA+1 - if (curA < MAX_N) { - (curA, cont) - } else { - (curA,Done((),cont.flowTermination)) - } - }.go - - Await.ready(process, 10000.second) - - assert(curA == MAX_N) - - } - - test("async base: select read, default action") { - - val channel = gopherApi.makeChannel[Int](10) - - val consumer = channel.atake(100) - - var i = 1 - var d = 1 - val process = gopherApi.select.loop[Int].onWrite(channel) { - cont:ContWrite[Int,Int] => i=i+1 - (i,cont) - }.onIdle{ - cont:Skip[Int] => - if (i < 100) { - d=d+1 - cont - } else { - Done(d,cont.flowTermination) - } - }.go - - Await.ready(process, 10.second) - - assert(consumer.isCompleted) - assert(process.isCompleted) - assert(i>100) - - } - - test("async base: catch exception in read") { - val ERROR_N = 10 - var lastReaded = 0 - val channel = gopherApi.makeChannel[Int](10) - val process = gopherApi.select.loop. - onRead(channel){ - (a:Int, cont:ContRead[Int,Unit]) => lastReaded=a - if (a == ERROR_N) { - throw new IllegalStateException("qqq") - } - cont - }.go - - channel.awriteAll(1 to MAX_N) - - Await.ready(process, 10000.second) - - intercept[IllegalStateException]{ - Await.result(process, 10000.second) - } - - } - - test("async base: catch exception in idle") { - val process = gopherApi.select.loop.onIdle( - (cont: Skip[Int]) => - if (true) { - throw new IllegalStateException("qqq") - } else cont - ).go - - Await.ready(process, 10000.second) - - assert(process.value.get.isFailure) - - - } - - def actorSystem = CommonTestObjects.actorSystem - def gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/channels/ChannelCleanupSuite.scala b/src/test/scala/gopher/channels/ChannelCleanupSuite.scala deleted file mode 100644 index 563906aa..00000000 --- a/src/test/scala/gopher/channels/ChannelCleanupSuite.scala +++ /dev/null @@ -1,79 +0,0 @@ -package gopher.channels - - -import org.scalatest._ -import scala.concurrent.{Channel=>_,_} -import scala.concurrent.duration._ -import gopher._ -import gopher.tags._ -import java.lang.ref._ - - -import scala.async.Async._ -import scala.concurrent.ExecutionContext.Implicits.global - -object CleanupFlags -{ - @volatile var v1 = 0 -} - -class CleanedObject(val v: Int) -{ - override protected def finalize():Unit = - { - CleanupFlags.v1 = 1 - super.finalize() - } -} - -class ChannelCleanupSuite extends FunSuite -{ - - - // This test is run, but JVM ntot guarantie this. - // so, it can - test("unused channel-actor must be cleanuped during gc") { - - val cleanedObjectRq = new ReferenceQueue[CleanedObject](); - var weakRef: WeakReference[CleanedObject] = null; - - def createChannel(): Channel[CleanedObject] = - { - val channel = gopherApi.makeChannel[CleanedObject](100) - var obj = new CleanedObject(1) - weakRef = new WeakReference(obj, cleanedObjectRq) - val producer = channel.awrite(obj) - obj = null - channel - } - - var ch = createChannel() - ch = null; - var quit=false; - var nTryes = 0 - if (cleanedObjectRq.poll() == null) { - while(!quit) { - val x = (cleanedObjectRq.remove(100L) != null) - // when we have not null, object in channel is garbage collected - // this can be never done, when we have enought memory, so - // look at finalizer - quit=(CleanupFlags.v1 == 1) - System.gc(); - System.runFinalization() - Thread.sleep(100) - nTryes += 1 - //assert(nTryes < 100) - if (nTryes >= 100) { - cancel("Test to finalization is canceled, but it is not guarantued by JVM specs ") - } - } - } - - // System.err.println("CleanupFlags.v1="+CleanupFlags.v1) - - } - - - def gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/channels/ChannelCloseSuite.scala b/src/test/scala/gopher/channels/ChannelCloseSuite.scala deleted file mode 100644 index 80bd83bf..00000000 --- a/src/test/scala/gopher/channels/ChannelCloseSuite.scala +++ /dev/null @@ -1,75 +0,0 @@ -package gopher.channels - - -import org.scalatest._ -import scala.concurrent._ -import scala.concurrent.duration._ -import gopher._ -import gopher.tags._ - - -import scala.async.Async._ -import scala.concurrent.ExecutionContext.Implicits.global - -class ChannelCloseSuite extends FunSuite -{ - - - test("writing after close is impossile") { - - val channel = gopherApi.makeChannel[Int](100) - - channel.close - - val producer = channel.awriteAll(1 to 1000) - - Await.ready(producer, 10.second) - - assert(producer.isCompleted) - assert(producer.value.get.isFailure) - } - - test("in async we must see throw") { - - val channel = gopherApi.makeChannel[Int](100) - channel.close - @volatile var catched = false - @volatile var notCatched = false - val p = async { - channel.write(1) - notCatched=true - } - try { - Await.result(p, 10.second) - } catch { - case ex: ChannelClosedException => - catched = true - } - assert(!notCatched) - assert(catched) - - } - - test("after close we can read but not more, than was send") { - val channel = gopherApi.makeChannel[Int](100) - @volatile var q = 0 - val p = async { - channel <~ 1 - channel.close - q = channel.read - } - Await.result(p, 10.second) - assert(q==1) - val afterClose = async{ - val a = channel.read - q = 2 - } - Await.ready(afterClose, 10.second) - assert(q != 2) - } - - - - def gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/channels/CommonTestObjects.scala b/src/test/scala/gopher/channels/CommonTestObjects.scala deleted file mode 100644 index abbe51ae..00000000 --- a/src/test/scala/gopher/channels/CommonTestObjects.scala +++ /dev/null @@ -1,15 +0,0 @@ -package gopher.channels - -import org.scalatest._ -import gopher._ -import gopher.tags._ -import akka.actor._ -import scala.concurrent._ -import scala.concurrent.duration._ - -object CommonTestObjects { - - lazy val actorSystem = ActorSystem.create("system") - lazy val gopherApi = Gopher(actorSystem) - -} diff --git a/src/test/scala/gopher/channels/DuppedChannelsSuite.scala b/src/test/scala/gopher/channels/DuppedChannelsSuite.scala deleted file mode 100644 index 7822e8a2..00000000 --- a/src/test/scala/gopher/channels/DuppedChannelsSuite.scala +++ /dev/null @@ -1,73 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.channels._ -import gopher.tags._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.util._ - -import org.scalatest._ -import org.scalatest.concurrent._ - -import scala.concurrent.ExecutionContext.Implicits.global - -class DuppedChannelsSuite extends FunSuite with Waiters { - - - test("duped input must show two") { - val w = new Waiter - val ch = gopherApi.makeChannel[String]() - val dupped = ch.dup - ch.awrite("1") - val r1 = dupped._1.aread map { - x => w{ assert(x=="1") } - w.dismiss() - } - val r2 = dupped._2.aread map { - x => w{ assert(x=="1") } - w.dismiss() - } - w.await(timeout(10 seconds),Dismissals(2)) - } - - - test("output is blocked by both inputs") { - val ch = gopherApi.makeChannel[Int]() - val aw=ch.awriteAll(1 to 100) - val (in1, in2) = ch.dup - val at1 = in1.atake(100) - intercept[TimeoutException] { - Await.ready(aw, 1 second) - } - assert(!aw.isCompleted) - assert(!at1.isCompleted) - val at2 = in2.atake(100) - Await.ready(at2, 1 second) - assert(aw.isCompleted) - } - - test("on closing of main stream dupped outputs also closed.") { - val ch = gopherApi.makeChannel[Int](1) - val (in1, in2) = ch.dup - val f1 = go { - ch.write(1) - ch.close() - } - Await.ready(f1, 1 second) - val w = new Waiter - in1.aread map { x => w(assert(x==1)); w.dismiss() } onComplete { - case Failure(ex) => w( throw ex ) - case Success(_) => - in1.aread.failed.foreach{ ex => w(assert(ex.isInstanceOf[ChannelClosedException])); - w.dismiss() - } - } - w.await(timeout(10 seconds),Dismissals(2)) - } - - def gopherApi = CommonTestObjects.gopherApi - - -} diff --git a/src/test/scala/gopher/channels/FlowTerminationSuite.scala b/src/test/scala/gopher/channels/FlowTerminationSuite.scala deleted file mode 100644 index 940158ac..00000000 --- a/src/test/scala/gopher/channels/FlowTerminationSuite.scala +++ /dev/null @@ -1,76 +0,0 @@ -package gopher.channels - - -import org.scalatest._ -import scala.concurrent._ -import scala.concurrent.duration._ -import gopher._ -import scala.language.postfixOps - -import scala.concurrent.ExecutionContext.Implicits.global - -class FlowTerminationSuite extends FunSuite -{ - - - test("flowTermination covariance assignment") { - - val fUnit = PromiseFlowTermination[Unit]() - // val fAny: FlowTermination[Any] = fUnit - implicit val f_ : FlowTermination[_] = fUnit - - val qq = implicitly[FlowTermination[_]] - - } - - - test("select with queue type") { - import gopherApi._ - - val channel = makeChannel[Int](100) - - val producer = channel.awriteAll(1 to 1000) - - var sum = 0; - val consumer = Future { - val sc = new Selector[Unit](gopherApi) - def f(self: ContRead[Int,Unit]):Option[ContRead.In[Int]=>Future[Continuated[Unit]]] = - { - Some { - case ContRead.Value(a) => sum = sum + a - if (a == 1000) sc.doExit(()) - Future successful self - case ContRead.Failure(e) => Future failed e - case _ => - Future successful self - } - } - sc.addReader(channel,f) - Await.ready(sc.run, 10.second) - } - - Await.ready(consumer, 10.second) - - } - - test("not propagate signals after exit") { - - import gopherApi._ - val channel = makeChannel[Int](100) - var sum = 0 - val f = select.forever{ - case x: channel.read => sum += x - select.shutdown() - } - val f2 = channel.awrite(1) - Await.result(f, 1 second) - assert(sum == 1) - val f3 = channel.awrite(2) - Thread.sleep(1000) - assert(sum == 1) - - } - - val gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/channels/FoldSelectSuite.scala b/src/test/scala/gopher/channels/FoldSelectSuite.scala deleted file mode 100644 index 1166d9a0..00000000 --- a/src/test/scala/gopher/channels/FoldSelectSuite.scala +++ /dev/null @@ -1,104 +0,0 @@ -package gopher.channels - - -import gopher._ -import gopher.channels._ -import gopher.tags._ - -import org.scalatest._ - -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ - - -class FoldSelectSuite extends FunSuite -{ - - lazy val gopherApi = CommonTestObjects.gopherApi - import gopherApi._ - - import scala.concurrent.ExecutionContext.Implicits.global - - test("fold-over-selector with changed read",Now) { - //for(i <- 1 to 100) { - //pending // we debug next now - val in = makeChannel[Int]() - val out = makeChannel[Int]() - var r0 = IndexedSeq[Int]() - val generator = go { - select.fold(in){ (ch,s) => - s match { - case p:ch.read => - r0 = r0 :+ p - out.write(p) - ch.filter{ _ % p != 0 } - } - } - } - generator.failed.foreach{ _.printStackTrace() } - //in.awriteAll(2 to Int.MaxValue) - go { - for(i <- 2 to Int.MaxValue) { - in.write(i) - } - } - - val read = go { - for(i <- 1 to 100) yield { - val x = out.read - x - } - } - - //val read = scala.async.Async.async(scala.async.Async.await((1 to 100).mapAsync(i=>out.aread))) - //val read = (1 to 100).mapAsync(i=>out.aread) - - //val r = Await.result(read,1 second) - val r = Await.result(read,2 seconds) - if (r.last != 541 || r(18)!=67 ) { - System.err.println(s"r0=$r0") - System.err.println(s"r1=$r") - } - //assert(r.last === 29) - //assert(r(0) === 2) - //assert(r(2) === 3) - assert(r(18) === 67) - assert(r.last === 541) - //} - } - - test("fold-over-selector with swap read") { - //pending - - val in1 = makeChannel[Int]() - val in2 = makeChannel[Int]() - val quit = makeChannel[Boolean]() - - val generator = go { - select.fold((in1,in2,0)){ case ((in1,in2,n),s) => - s match { - case x:in1.read => - if (x >= 100) { - select.exit((in1, in2, n)) - } else { - (in2, in1, n + x) - } - case x:in2.read => - (in2,in1,n-x) - } - } - } - - - in1.awriteAll(1 to 101) - - val r = Await.result(generator, 1 second) - - // 0 + 1 - 2 + 3 - 4 + 5 - 6 ... +99 - 100 + 101 - // - 1 2 -2 3 - 3 +50 - 50 - assert(r._3 == - 50) - - } - -} diff --git a/src/test/scala/gopher/channels/IOTimeoutsSuite.scala b/src/test/scala/gopher/channels/IOTimeoutsSuite.scala deleted file mode 100644 index 6bf0a7dc..00000000 --- a/src/test/scala/gopher/channels/IOTimeoutsSuite.scala +++ /dev/null @@ -1,177 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.channels._ -import gopher.tags._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.util._ - -import org.scalatest._ -import org.scalatest.concurrent._ - -import gopher._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.concurrent.ExecutionContext.Implicits.global - -class IOTimeoutsSuite extends FunSuite with Waiters { - - - test("messsaged from timeouts must be appear during reading attempt from empty channel") { - val ch = gopherApi.makeChannel[String]() - val (chReady, chTimeout) = ch.withInputTimeouts(300 milliseconds) - val w = new Waiter() - val f = gopherApi.select.once { - case x: chReady.read => 1 - case x: chTimeout.read => 2 - } - Await.ready(f map ( x => { w( assert(x == 2) ); w.dismiss() } ), 1 second ) - w.await() - } - - test("when we have value, we have no timeouts") { - val ch = gopherApi.makeChannel[String]() - ch.awrite("qqq") - val (chReady, chTimeout) = ch.withInputTimeouts(300 milliseconds) - val w = new Waiter() - val f = gopherApi.select.once { - case x: chReady.read => 1 - case x: chTimeout.read => 2 - } - Await.ready(f map ( x => { w( assert(x == 1) ); w.dismiss() } ), 1 second ) - w.await() - } - - - test("on input close it's timeout channel also must close") { - val w = new Waiter() - val ch = gopherApi.makeChannel[String](1) - Await.ready(ch.awrite("qqq"), 1 second) - val (chReady, chTimeout) = ch.withInputTimeouts(300 milliseconds) - ch.close() - // now must read one element - val f1 = gopherApi.select.once { - case x: chReady.read => 1 - case x: chTimeout.read => 2 - } - Await.ready(f1 map ( x => { w( x == 1); w.dismiss() } ), 1 second ) - // now receive stream-closed exception - val f2 = chReady.aread - f2 onComplete { x => w(assert(x.isFailure && x.failed.get.isInstanceOf[ChannelClosedException])) - w.dismiss() - } - Await.ready(f2, 1 second) - val f3 = chTimeout.aread - f3 onComplete { x => w(assert(x.isFailure && x.failed.get.isInstanceOf[ChannelClosedException])) - w.dismiss() - } - Await.ready(f3, 5 seconds) - w.await(dismissals=Dismissals(3)) - - } - - - test("messsaged from timeouts must be appear during attempt to write to filled unbuffered channel") { - val ch = gopherApi.makeChannel[Int]() - val (chReady, chTimeout) = ch.withOutputTimeouts(150 milliseconds) - @volatile var count = 1 - val f = gopherApi.select.forever { - case x: chReady.write if (x==count) => - {}; - count += 1 // will newer called, since we have no reader - case t: chTimeout.read => - implicitly[FlowTermination[Unit]].doExit(count) - } - Await.ready(f, 1 second) - assert(count==1) - } - - test("messsaged from timeouts must be appear during attempt to write to filled buffered channel") { - val ch = gopherApi.makeChannel[Int](1) - val (chReady, chTimeout) = ch.withOutputTimeouts(150 milliseconds) - @volatile var count = 1 - val f = gopherApi.select.forever { - case x: chReady.write if (x==count) => - {}; - count += 1 - case t: chTimeout.read => - implicitly[FlowTermination[Unit]].doExit(count) - } - Await.ready(f, 1 second) - assert(count==2) - } - - test("when we have where to write -- no timeouts") { - val ch = gopherApi.makeChannel[Int](1) - val (chReady, chTimeout) = ch.withOutputTimeouts(300 milliseconds) - val f = gopherApi.select.once { - case x: chReady.write if (x==1) => 1 - case t: chTimeout.read => 2 - } - val r = Await.result(f, 1 second) - assert(r == 1) - } - - test("on output close it's timeout channel also must close") { - val ch = gopherApi.makeChannel[Int](1) - val (chReady, chTimeout) = ch.withOutputTimeouts(300 milliseconds) - val w = new Waiter() - val f1 = chReady.awrite(1) - f1 onComplete { - case Success(x) => w{assert(x==1) }; w.dismiss() - case Failure(th) => w{ throw th }; w.dismiss() - } - Await.ready(f1, 1 second) - ch.close() - val f2 = chReady.awrite(2) - f2 onComplete { x => w(assert(x.isFailure && x.failed.get.isInstanceOf[ChannelClosedException])) - w.dismiss() - } - w.await(dismissals=Dismissals(2)) - } - - test("during 'normal' processing timeouts are absent") { - val ch = gopherApi.makeChannel[Int]() - val (chInputReady, chInputTimeout) = ch.withInputTimeouts(300 milliseconds) - val (chOutputReady, chOutputTimeout) = ch.withOutputTimeouts(300 milliseconds) - @volatile var count = 0 - @volatile var count1 = 0 - @volatile var wasInputTimeout = false - @volatile var wasOutputTimeout = false - val maxCount = 100 - val fOut = gopherApi.select.forever { - case x: chOutputReady.write if (x==count) => - if (count == maxCount) { - implicitly[FlowTermination[Unit]].doExit(()) - } else { - count += 1 - } - case t: chOutputTimeout.read => - {}; - wasOutputTimeout = true - } - val fIn = gopherApi.select.forever { - case x: chInputReady.read => - count1 = x - if (x == maxCount) { - implicitly[FlowTermination[Unit]].doExit(()) - } - case t: chInputTimeout.read => - {}; - wasInputTimeout = true - } - Await.ready(fOut, 1 second) - Await.ready(fIn, 1 second) - assert(count == maxCount) - assert(count1 == maxCount) - assert(!wasInputTimeout) - assert(!wasOutputTimeout) - } - - def gopherApi = CommonTestObjects.gopherApi - - -} - diff --git a/src/test/scala/gopher/channels/InputOpsSuite.scala b/src/test/scala/gopher/channels/InputOpsSuite.scala deleted file mode 100644 index d9577832..00000000 --- a/src/test/scala/gopher/channels/InputOpsSuite.scala +++ /dev/null @@ -1,335 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.channels._ -import gopher.tags._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.util._ - -import org.scalatest._ -import org.scalatest.concurrent._ - -import scala.concurrent.ExecutionContext.Implicits.global - -class InputOpsSuite extends FunSuite with Waiters { - - - test("map operation for input") { - val w = new Waiter - val ch = gopherApi.makeChannel[String]() - ch.awriteAll(List("AAA","123","1234","12345")) - val mappedCh = ch map (_.reverse) - val r = mappedCh.atake(4) map { l => - w{ assert(l(0) == "AAA") } - w{ assert(l(1) == "321") } - w{ assert(l(2) == "4321") } - w{ assert(l(3) == "54321") } - w.dismiss() - } - w.await(timeout(10 seconds)) - } - - test("filter operation for input") { - val w = new Waiter - val ch = gopherApi.makeChannel[String]() - ch.awriteAll(List("qqq", "AAA","123","1234","12345")) - val filteredCh = ch filter (_.contains("A")) - filteredCh.aread map { x => w{ assert(x == "AAA") } } onComplete{ case Success(x) => w.dismiss() - case Failure(ex) => w(throw ex) - } - w.await(timeout(10 seconds)) - } - - - test("zip operation for two simple inputs") { - val w = new Waiter - val ch1 = gopherApi.makeChannel[String]() - ch1.awriteAll(List("qqq", "AAA","123","1234","12345")) - val ch2 = gopherApi.makeChannel[Int]() - ch2.awriteAll(List(1, 2, 3, 4, 5, 6)) - val zipped = ch1 zip ch2 - val r1 = Await.result(zipped.aread, 10 seconds) - assert(r1 == ("qqq", 1)) - val r2 = Await.result(zipped.aread, 10 seconds) - assert(r2 == ("AAA", 2)) - val r3 = Await.result(zipped.aread, 10 seconds) - assert(r3 == ("123", 3)) - val r4 = Await.result(zipped.aread, 10 seconds) - assert(r4 == ("1234", 4)) - val r5 = Await.result(zipped.aread, 10 seconds) - assert(r5 == ("12345", 5)) - } - - test("zip operation from two finite channels") { - val ch1 = Input.asInput(List(1,2),gopherApi) - val ch2 = Input.asInput(List(1,2,3,4,5,6),gopherApi) - val zipped = ch1 zip ch2 - val r1 = Await.result(zipped.aread, 10 seconds) - assert(r1 == (1, 1)) - val r2 = Await.result(zipped.aread, 10 seconds) - assert(r2 == (2, 2)) - intercept[ChannelClosedException] { - val r3 = Await.result(zipped.aread, 10 seconds) - } - } - - test("take from zip") { - val ch1 = Input.asInput(List(1,2,3,4,5),gopherApi) - val ch2 = Input.asInput(List(1,2,3,4,5,6),gopherApi) - val zipped = ch1 zip ch2 - val at = zipped.atake(5) - var ar = Await.result(at, 10 seconds) - assert(ar(0)==(1,1)) - assert(ar(4)==(5,5)) - } - - test("taking from iterator-input") { - val ch1 = Input.asInput(List(1,2,3,4,5),gopherApi) - val at = ch1.atake(5) - var ar = Await.result(at, 10 seconds) - assert(ar(4)==5) - } - - test("zip with self will no dup channels, but generate (odd, even) pairs. It's a feature, not a bug") { - val ch = gopherApi.makeChannel[Int]() - val zipped = ch zip ch - val ar1 = zipped.aread - ch.awriteAll(List(1,2,3,4,5,6,7,8)) - assert( Set((1,2),(2,1)) contains Await.result(ar1, 10 seconds) ) - val ar2 = zipped.aread - assert( Set((3,4),(4,3)) contains Await.result(ar2, 10 seconds) ) - val ar3 = zipped.aread - assert( Set((5,6),(6,5)) contains Await.result(ar3, 10 seconds) ) - } - - test("reading from Q1|Q2") { - - val ch1 = gopherApi.makeChannel[Int]() - val ch2 = gopherApi.makeChannel[Int]() - - val ar1 = (ch1 | ch2).aread - ch1.awrite(1) - - val r1 = Await.result(ar1, 10 seconds) - assert(r1==1) - - val ar2 = (ch1 | ch2).aread - ch2.awrite(2) - val r2 = Await.result(ar2, 10 seconds) - assert(r2==2) - - } - - test("simultanuos reading from Q1|Q2") { - - val w = new Waiter() - - val ch1 = gopherApi.makeChannel[Int]() - val ch2 = gopherApi.makeChannel[Int]() - - val ar1 = (ch1 | ch2).aread - val ar2 = (ch1 | ch2).aread - - ch1.awrite(1) - ch2.awrite(2) - - val r1 = Await.result(ar1, 10 seconds) - val r2 = Await.result(ar2, 10 seconds) - - if (r1 == 1) { - assert(r2 == 2) - } else { - assert(r2 == 1) - } - - val ar3 = (ch1 | ch2).aread - intercept[TimeoutException] { - val r3 = Await.result(ar3, 300 milliseconds) - } - - } - - test("reflexive or Q|Q") { - val ch = gopherApi.makeChannel[Int]() - val aw1 = ch.awrite(1) - val ar1 = (ch | ch).aread - val r1 = Await.result(ar1, 10 seconds) - assert(r1==1) - val ar2 = (ch | ch).aread - intercept[TimeoutException] { - val r2_1 = Await.result(ar2, 300 milliseconds) - } - val aw2 = ch.awrite(3) - val r2 = Await.result(ar2, 10 seconds) - assert(r2==3) - } - - test("two items read from Q1|Q2") { - val ch1 = gopherApi.makeChannel[Int]() - val ch2 = gopherApi.makeChannel[Int]() - val aw1 = ch1.awrite(1) - val aw2 = ch2.awrite(2) - val chOr = (ch1 | ch2) - val ar1 = chOr.aread - val ar2 = chOr.aread - val r1 = Await.result(ar1, 10 seconds) - val r2 = Await.result(ar2, 10 seconds) - assert( ((r1,r2)==(1,2)) ||((r1,r2)==(2,1)) ) - } - - test("atake read from Q1|Q2") { - val ch1 = gopherApi.makeChannel[Int]() - val ch2 = gopherApi.makeChannel[Int]() - - val aw1 = ch1.awriteAll(1 to 2) - val aw2 = ch2.awriteAll(1 to 2) - val at = (ch1 | ch2).atake(4) - val r = Await.result(at, 10 seconds) - } - - test("awrite/take ") { - val ch = gopherApi.makeChannel[Int]() - val aw = ch.awriteAll(1 to 100) - val at = ch.atake(100) - val r = Await.result(at, 10 seconds) - } - - test("Input foreach on closed stream must do nothing ") { - val ch = gopherApi.makeChannel[Int]() - @volatile var flg = false - val f = go { for(s <- ch) { - flg = true - } } - ch.close() - val r = Await.result(f, 10 seconds) - assert(!flg) - } - - test("Input foreach on stream with 'N' elements inside must run N times ") { - val w = new Waiter - val ch = gopherApi.makeChannel[Int]() - @volatile var count = 0 - val f = go { for(s <- ch) { - count += 1 - } } - val ar = ch.awriteAll(1 to 10) - ar.onComplete{ case _ => { ch.close(); w.dismiss() } } - f.onComplete{ case _ => w{ assert(count == 10) }; w.dismiss() } - // Too many awaits. - w.await(timeout(10 seconds), dismissals(2)) - } - - test("Input afold on stream with 'N' elements inside ") { - val ch = gopherApi.makeChannel[Int]() - val f = ch.afold(0)((s,e)=>s+1) - val ar = ch.awriteAll(1 to 10) - ar.onComplete{ case _ => ch.close() } - val r = Await.result(f,10 seconds) - assert(r==10) - } - - test("forech with mapped closed stream") { - def one(i:Int) = { - val w = new Waiter - val ch = gopherApi.makeChannel[Int]() - val mapped = ch map (_ * 2) - @volatile var count = 0 - val f = go { for(s <- mapped) { - // error in compiler - //assert((s % 2) == 0) - if ((s%2)!=0) { - throw new IllegalStateException("numbers in mapped channel must be odd") - } - count += 1 - } } - val ar = ch.awriteAll(1 to 10) - ar.onComplete{ case _ => { ch.close(); w.dismiss() } } - f.onComplete{ case _ => { w{assert(count == 10)}; w.dismiss() } } - w.await(timeout(10 seconds), dismissals(2)) - } - for(i <- 1 to 10) one(i) - } - - test("forech with filtered closed stream") { - val w = new Waiter - val ch = gopherApi.makeChannel[Int]() - val filtered = ch filter (_ %2 == 0) - @volatile var count = 0 - val f = go { for(s <- filtered) { - count += 1 - } } - val ar = ch.awriteAll(1 to 10) - ar.onComplete{ case _ => { ch.close(); w.dismiss() } } - f.onComplete{ case _ => { w{assert(count == 5)}; w.dismiss() } } - w.await(timeout(10 seconds), dismissals(2)) - } - -/* - test("channel fold with async operation inside") { - val ch1 = gopherApi.makeChannel[Int](10) - val ch2 = gopherApi.makeChannel[Int](10) - val fs = go { - val sum = ch1.fold(0){ (s,n) => - val n1 = ch2.read - //s+(n1+n2) -- stack overflow in 2.11.8 compiler. TODO: submit bug - s+(n+n1) - } - sum - } - go { - ch1.writeAll(1 to 10) - ch2.writeAll(1 to 10) - ch1.close() - } - val r = Await.result(fs, 10 seconds) - assert(r==110) - } -*/ - - - test("append for finite stream") { - val w = new Waiter - val ch1 = gopherApi.makeChannel[Int](10) - val ch2 = gopherApi.makeChannel[Int](10) - val appended = ch1 append ch2 - var sum = 0 - var prev = 0 - var monotonic = true - val f = go { for(s <- appended) { - // bug in compiler 2.11.7 - //w{assert(prev < s)} - //if (prev >= s) w{assert(false)} - if (prev >= s) monotonic=false - prev = s - sum += s - } } - val a1 = ch1.awriteAll(1 to 10) - val a2 = ch2.awriteAll((1 to 10)map(_*100)) - // it works, but for buffered channeld onComplete can be scheduled before. So, <= instead == - a1.onComplete{ case _ => { w{assert(sum <= 55)}; ch1.close(); w.dismiss() } } - a2.onComplete{ case _ => { w{assert(sum <= 5555)}; w{assert(monotonic)}; w.dismiss() } } - w.await(timeout(10 seconds), dismissals(2)) - assert(sum<=5555) - assert(monotonic) - } - - test("append for empty stream") { - val w = new Waiter - val ch1 = gopherApi.makeChannel[Int]() - val ch2 = gopherApi.makeChannel[Int]() - val appended = ch1 append ch2 - val f = appended.atake(10).map(_.sum) - f.onComplete{ case Success(x) => { w{assert(x==55)}; w.dismiss() } - case Failure(_) => { w{assert(false)}; w.dismiss() } - } - ch1.close() - val a2 = ch2.awriteAll(1 to 10) - w.await(timeout(10 seconds), dismissals(1)) - } - - def gopherApi = CommonTestObjects.gopherApi - - -} diff --git a/src/test/scala/gopher/channels/MacroSelectSuite.scala b/src/test/scala/gopher/channels/MacroSelectSuite.scala deleted file mode 100644 index 86bb03c7..00000000 --- a/src/test/scala/gopher/channels/MacroSelectSuite.scala +++ /dev/null @@ -1,341 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.channels._ -import gopher.tags._ - -import org.scalatest._ - -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ - -class MacroSelectSuite extends FunSuite -{ - - import scala.concurrent.ExecutionContext.Implicits.global - - - test("select emulation with macroses") { - - val channel = gopherApi.makeChannel[Int](100) - - go { - var i = 1 - while(i <= 1000) { - channel <~ i - i+=1 - } - //TODO: implement for in goas preprocessor to async - //for( i <- 1 to 1000) - // channel <~ i - } - - var sum = 0; - val consumer = go { - for(s <- gopherApi.select.forever) { - s match { - case i: channel.read => - //System.err.println("received:"+i) - sum = sum + i - if (i==1000) - implicitly[FlowTermination[Unit]].doExit(()) - } - } - sum - } - - Await.ready(consumer, 5.second) - - val xsum = (1 to 1000).sum - assert(xsum == sum) - - - } - - - test("select with run-once") { - import gopherApi._ - val channel1 = makeChannel[Int](100) - val channel2 = makeChannel[Int](100) - - val g = go { - var nWrites=0; - for(s <- select.once) - s match { - case x: channel1.write if (x==1) => { {}; nWrites = nWrites + 1 } - case x: channel2.write if (x==1) => { {}; nWrites = nWrites + 1 } - } - - @volatile var nReads=0; - for(s <- select.once) - s match { - case x: channel1.read => { {}; nReads = nReads + 1 } - case x: channel2.read => { {}; nReads = nReads + 1 } - } - - } - - Await.ready(g, 10 seconds) - - } - - test("select from futureInput") { - import gopherApi._ - val channel = makeChannel[Int](100) - val future = Future successful 10 - val fu = futureInput(future) - var res = 0 - val r = select.forever{ - case x: channel.read => - Console.println(s"readed from channel: ${x}") - case x: fu.read => - //Console.println(s"readed from future: ${x}") - res=x - implicitly[FlowTermination[Unit]].doExit(()) - // syntax for using channels/futures in cases without - // setting one in stable identifers. - case x: Int if (x==future.read) => - {}; - res=x - } - Await.ready(r, 10 seconds) - assert(res==10) - } - - test("select syntax with read/writes in guard") { - import gopherApi._ - val channel1 = makeChannel[Int](100) - val channel2 = makeChannel[Int](100) - var res = 0 - val r = select.forever{ - case x: Int if (x==channel1.write(3)) => - Console.println(s"write to channel1: ${x} ") - case x: Int if (x==channel2.read) => - Console.println(s"readed from channel2: ${x}") - case x: Int if (x==(Future successful 10).read) => - res=x - implicitly[FlowTermination[Unit]].doExit(()) - } - Await.ready(r, 10 seconds) - assert(res==10) - } - - test("select syntax with @unchecked annotation") { - import gopherApi._ - val channel1 = makeChannel[List[Int]](100) - val channel2 = makeChannel[List[Int]](100) - var res = 0 - val r = select.once{ - case x: channel1.read @ unchecked => - {}; - res=1 - case x: List[Int] @ unchecked if (x==channel2.read) => - {}; - res=2 - } - channel1.awrite(List(1,2,3)) - Await.ready(r, 10 seconds) - assert(res==1) - } - - test("tuple in caseDef as one symbol") { - import gopherApi._ - val ch = makeChannel[(Int,Int)](100) - var res = 0 - val r = select.once{ - case xpair: ch.read @unchecked => - // fixed error in compiler: Can't find proxy - val (a,b)=xpair - res=1 - } - ch.awrite((1,1)) - Await.ready(r, 10 seconds) - assert(res==1) - } - - test("multiple readers for one write") { - import gopherApi._ - val ch = makeChannel[Int](10) - var x1 = 0 - var x2 = 0 - var x3 = 0 - var x4 = 0 - var x5 = 0 - val f1 = select.once{ - case x:ch.read => - {}; - x1=1 - } - val f2 = select.once{ - case x:ch.read => - {}; - x2=1 - } - val f3 = select.once{ - case x:ch.read => - {}; - x3=1 - } - val f4 = select.once{ - case x:ch.read => - {}; - x4=1 - } - val f5 = select.once{ - case x:ch.read => - {}; - x5=1 - } - Await.ready(ch.awrite(1),1 second) - val fr = Future.firstCompletedOf(List(f1,f2,f3,f4,f5)) - Await.ready(fr, 1 second) - ch.close() - Await.ready(Future.sequence(List(f1,f2,f3,f4,f5)),1 second) - assert(x1+x2+x3+x4+x5==1) - } - - test("fold over selector") { - import gopherApi._ - for(i <- 1 to 100) { - val ch = makeChannel[Int](10) - val back = makeChannel[Int]() - val quit = Promise[Boolean]() - val r = select.afold(0){ (x,s) => - s match { - case a:ch.read => back <~ a - x+a - case q:Boolean if (q==quit.future.read) => CurrentFlowTermination.exit(x) - } - } - ch.awriteAll(1 to 10) - back.aforeach{ x => - if (x==10) { - quit success true - } - } - val sum = Await.result(r, 1 second) - assert(sum==(1 to 10).sum) - } - } - - test("fold over selector with idle") { - import gopherApi._ - val ch1 = makeChannel[Int](10) - val ch2 = makeChannel[Int](10) - ch1.awrite(1) - val sf = select.afold((0,0,0)){ case ((n1,n2,nIdle),s) => - s match { - case x:ch1.read => - val nn1 = n1+1 - if (nn1 > 100) { - CurrentFlowTermination.exit((nn1,n2,nIdle)) - }else{ - ch2.write(x) - (nn1,n2,nIdle) - } - case x:ch2.read => - ch1.write(x) - (n1,n2+1,nIdle) - case _ => - (n1,n2,nIdle+1) - - } - } - val (n1,n2,ni) = Await.result(sf, 10 seconds) - assert (n1+n2+ni > 100) - val sf2 = select.afold((0,0)){ case ((n1,nIdle),s) => - s match { - case x:ch1.read => - (n1+1,nIdle) - case _ => - val nni = nIdle+1 - if (nni > 3) { - CurrentFlowTermination.exit((n1,nni)) - } else { - (n1,nni) - } - } } - val (n21,n2i) = Await.result(sf2, 10 seconds) - assert(n2i>3) - } - - test("amap over selector") { - import gopherApi._ - val ch1 = makeChannel[Int](10) - val ch2 = makeChannel[Int](10) - val quit = Promise[Boolean]() - val out = select.amap { - case x:ch1.read => x*2 - case x:ch2.read => - //System.err.println(s"received:${x}") - x*3 - case q:Boolean if (q==quit.future.read) => - //System.err.println("received quit") - CurrentFlowTermination.exit(1) - } - ch1.awriteAll(1 to 10) - ch2.awriteAll(100 to 110) - val f = out.afold(0){ - case (s,x) => //System.err.println(s"in afold ${x}") - s+x } - Thread.sleep(1000) - quit success true - val x = Await.result(f, 10 seconds) - assert(x > 3000) - } - - test("generic channel make") { - val ch1 = gopherApi.make[Channel[Int]]() - val ch2 = gopherApi.make[Channel[Int]](1) - // yet not supported by compiler. - //val ch3 = gopherApi.make[Channel[Int]](capacity=3) - val f1 = ch1.awrite(1) - val f2 = ch2.awrite(2) - val x = Await.result(ch1.aread, 10 seconds) - assert(x==1) - } - - test("input afold") { - import gopherApi._ - val ch1 = makeChannel[Int]() - ch1.awriteAll(1 to 10) map { _ => ch1.close() } - val f = ch1.afold(0){ case (s,x) => s+x } - val x = Await.result(f, 10 seconds) - assert(x==55) - } - - test("map over selector") { - import gopherApi._ - val ch1 = gopherApi.make[Channel[Int]]() - val ch2 = gopherApi.make[Channel[Int]](1) - val f1 = ch1.awrite(1) - val f2 = ch2.awrite(2) - val chs = for(s <- select) yield { - s match { - case x:ch1.read => x*3 - case x:ch2.read => x*5 - } - } - val fs1 = chs.aread - val fs2 = chs.aread - val s1 = Await.result(fs1, 1 second) - val s2 = Await.result(fs2, 1 second) - assert(s1==3 || s1==10) - } - - test("one-time channel make") { - import gopherApi._ - val ch = gopherApi.make[OneTimeChannel[Int]]() - val f1 = ch.awrite(1) - val f2 = ch.awrite(2) - val x = Await.result(ch.aread, 10 seconds) - val x2 = Await.result(f2.failed, 10 seconds) - assert(x==1) - assert(x2.isInstanceOf[ChannelClosedException]) - } - - - lazy val gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/channels/SchedulerStartupTest.scala b/src/test/scala/gopher/channels/SchedulerStartupTest.scala deleted file mode 100644 index 9726f06f..00000000 --- a/src/test/scala/gopher/channels/SchedulerStartupTest.scala +++ /dev/null @@ -1,31 +0,0 @@ -package gopher.channels - -import org.scalatest._ -import gopher._ -import gopher.tags._ -import akka.actor._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import CommonTestObjects._ - -class SchedulerStartupTest extends FunSuite { - - - test("scheduler-allocated task mast start") { - - val scheduler = actorSystem.scheduler - val p = Promise[Int]() - //System.err.println("scheduler:"+scheduler) - val cancelable = scheduler.schedule( - 100 milliseconds, - 500 milliseconds - ){ - if (!p.isCompleted) p success 0 - }(ExecutionContext.Implicits.global) - val x = Await.result(p.future, 3000 milliseconds) - assert(x==0) - - } - -} diff --git a/src/test/scala/gopher/channels/SelectSuite.scala b/src/test/scala/gopher/channels/SelectSuite.scala deleted file mode 100644 index acf0f703..00000000 --- a/src/test/scala/gopher/channels/SelectSuite.scala +++ /dev/null @@ -1,281 +0,0 @@ -package gopher.channels - - -import org.scalatest._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import gopher._ -import gopher.tags._ - - -class SelectSuite extends FunSuite -{ - - - test("basic select with reading syntax sugar") { - - val channel = gopherApi.makeChannel[Int](100) - - val producer = channel.awriteAll(1 to 1000) - - @volatile var sum = 0; - val consumer = gopherApi.select.forever.reading(channel){ i => - sum = sum+i - if (i==1000) { - implicitly[FlowTermination[Unit]].doExit(()) - } else { - } - }.go - - - - Await.ready(consumer, 10.second) - - val xsum = (1 to 1000).sum - assert(xsum == sum) - } - - test("basic select with 'apply' reading syntax sugar") { - - val channel = gopherApi.makeChannel[Int](100) - val producer = channel.awriteAll(1 to 1000) - - @volatile var sum = 0; - val consumer = gopherApi.select.forever.reading(channel) { i => - sum = sum+i - if (i==1000) gopherApi.currentFlow.exit(()) - }.go - - Await.ready(consumer, 1000.second) - val xsum = (1 to 1000).sum - assert(xsum == sum) - - } - - - test("basic select with async reading form oter stream in apply") { - - val channel1 = gopherApi.makeChannel[Int](100) - val channel2 = gopherApi.makeChannel[Int](100) - - val producer1 = channel1.awriteAll(1 to 1000) - val producer2_1 = channel2.awriteAll(1 to 10) - - @volatile var sum = 0; - val consumer = gopherApi.select.forever.reading(channel1) { i1 => - val i2 = channel2.read - sum = sum+i1 + i2 - if (i1==1000) gopherApi.currentFlow.exit(()) - }.go - - assert(consumer.isCompleted == false, "consumer must not be complete after reading first stream" ) - assert(producer1.isCompleted == false) - - val producer2_2 = channel2.awriteAll(1 to 1000) - - Await.ready(consumer, 1000.second) - - assert(consumer.isCompleted) - - } - - - - test("basic select write with apply") { - - val channel = gopherApi.makeChannel[Int](1) - - @volatile var x = 1 - @volatile var y = 1 - val producer = gopherApi.select.forever.writing(channel,x) { _ => - var z = x + y - x=y - y=z - if (z > 1000) { - channel.close() - gopherApi.currentFlow.exit(()) - } - }.go - - @volatile var last = 0 - channel.aforeach{ i=> - //System.out.printn(i) - last=i - } - - Await.ready(producer, 1000.second) - - assert(producer.isCompleted) - //assert(consumer.isCompleted) - assert(last!=0) - - } - - test("basic select idlle with apply") { - - @volatile var x = 0 - val selector = gopherApi.select.forever.idle{ - if (x >= 10) { - gopherApi.currentFlow.exit(()) - } else { - x=x+1 - } - }.go - - - Await.ready(selector, 10.second) - assert(selector.isCompleted) - assert(x==10) - - } - - test("basic compound select with apply") { - - import scala.concurrent.ExecutionContext.Implicits.global - - val channel1 = gopherApi.makeChannel[Int](1) - val channel2 = gopherApi.makeChannel[Int](1) - val channel3 = gopherApi.makeChannel[Int](1) - val channel4 = gopherApi.makeChannel[Int](1) - - val producer = channel1.awriteAll(1 to 1000) - - @volatile var x=0 - @volatile var nw=0 - @volatile var q = false - @volatile var ch1s=0 - - val selector = gopherApi.select.forever.reading(channel1) { i => - // read ch1 in selector - channel4.awrite(i) - ch1s=i - }.reading(channel2) { i => - {}; // workarround for https://issues.scala-lang.org/browse/SI-8846 - x=i - //Console.println(s"reading from ch2, i=${i}") - }.writing(channel3,x) { x => - {}; // workarround for https://issues.scala-lang.org/browse/SI-8846 - nw=nw+1 - //Console.println(s"writing ${x} to ch3, nw=${nw}") - }.idle { - //Console.println(s"idle, exiting") - {}; - q=true - gopherApi.currentFlow.exit(()) - }.go - - for(c <- channel4.async) channel2.write(c) - - Await.ready(selector, 10.second) - assert(selector.isCompleted) - assert(q==true) - - } - - - test("basic compound select with for syntax") { - - import scala.concurrent.ExecutionContext.Implicits.global - import scala.async.Async._ - - val channel1 = gopherApi.makeChannel[Int](1) - val channel2 = gopherApi.makeChannel[Int](1) - val channel3 = gopherApi.makeChannel[Int](1) - val channel4 = gopherApi.makeChannel[Int](1) - - val producer = channel1.awriteAll(1 to 1000) - - @volatile var q = false - - val selector = async { - @volatile var x=0 - @volatile var nw=0 - @volatile var ch1s=0 - - //pending - // for syntax will be next: - for(s <- gopherApi.select.forever) - s match { - case ir: channel1.read => - channel4.awrite(ir) - ch1s=ir - case iw: channel3.write if (iw==(x+1)) => - {}; nw = nw+1 - case _ => {}; q=true - implicitly[FlowTermination[Unit]].doExit(()) - } - - } - - for(c <- channel4.async) channel2.write(c) - - Await.ready(selector, 10.second) - assert(selector.isCompleted) - assert(q==true) - - } - - - test("basic select.once with reading syntax sugar") { - - val channel1 = gopherApi.makeChannel[String](1) - val channel2 = gopherApi.makeChannel[String](1) - val selector = (gopherApi.select.once.reading(channel1)(x=>x) - .reading(channel2)(x=>x) - ).go - channel2.awrite("A") - assert(Await.result(selector, 10.second)=="A") - - } - - test("basic select.once with writing syntax sugar") { - val channel1 = gopherApi.makeChannel[Int](100) - val channel2 = gopherApi.makeChannel[Int](100) - @volatile var s:Int = 0 - val selector = (gopherApi.select.once.writing(channel1,s){q:Int =>"A"} - .writing(channel2,s){s=>"B"} - ).go - // hi, Captain Obvious - assert(Set("A","B") contains Await.result(selector, 10.second) ) - channel1.close() - channel2.close() - } - - test("basic select.once with idle syntax sugar") { - val ch = gopherApi.makeChannel[String](1) - val selector = (gopherApi.select.once[String].reading(ch)(x=>x) - .idle("IDLE") - ).go - assert(Await.result(selector, 10.second)=="IDLE") - ch.close() - } - - - test("basic select.foreach with partial-function syntax sugar") { - val info = gopherApi.makeChannel[Long](1) - val quit = gopherApi.makeChannel[Int](2) - @volatile var (x,y)=(0L,1L) - val writer = gopherApi.select.forever{ - case z:info.write if (z==x) => - x = y - y = y + x - case q:quit.read => - implicitly[FlowTermination[Unit]].doExit(()) - } - @volatile var sum=0L - val reader = gopherApi.select.forever{ - case z:info.read => sum += z - if (sum > 100000) { - quit.write(1) - implicitly[FlowTermination[Unit]].doExit(()) - } - } - Await.ready(writer, 10.second) - Await.ready(reader, 10.second) - assert(sum > 100000) - } - - def gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/channels/SelectTimeoutSuite.scala b/src/test/scala/gopher/channels/SelectTimeoutSuite.scala deleted file mode 100644 index 21cae005..00000000 --- a/src/test/scala/gopher/channels/SelectTimeoutSuite.scala +++ /dev/null @@ -1,112 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.channels._ -import gopher.tags._ - -import org.scalatest._ - -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ - -import akka.util.Timeout - -class SelectTimeoutSuite extends FunSuite -{ - - import scala.concurrent.ExecutionContext.Implicits.global - - - test("select with constant timeout which not fire") { - //pending - import gopherApi._ - val ch1 = makeChannel[Int](10) - val r = select.amap { - case x:ch1.read => - //System.err.println(s"readed ${x}") - x - case y:select.timeout if (y==500.milliseconds) => - //System.err.println(s"timeout ${y}") - -1 - } - val f1 = ch1.awrite(1) - val x = Await.result(r.aread, 10 seconds) - assert(x==1) - } - - test("select with constant timeout which fire") { - import gopherApi._ - val ch1 = makeChannel[Int](10) - val r = select.amap { - case x:ch1.read => - //System.err.println(s"readed ${x}") - x - case x:select.timeout if (x==500.milliseconds) => - //System.err.println(s"timeout ${x}") - -1 - } - val x = Await.result(r.aread, 10 seconds) - assert(x == -1) - } - - test("timeout in select.forever") { - import gopherApi._ - val ch1 = makeChannel[Int](10) - val ch2 = makeChannel[Int]() - val chS = makeChannel[String](10) - var s = 0 - implicit val timeout = Timeout(100 milliseconds) - val f = select.forever{ - case x: ch1.read => - chS.write("1") - case x: ch2.read => - chS.write("2") - case x:select.timeout => - s += 1 - chS.write("t") - if (s > 2) select.exit(()) - } - val x = Await.result(f, 10 seconds) - assert(s > 2) - } - - test("timeout in select.fold") { - import gopherApi._ - val ch1 = makeChannel[Int](10) - val f = select.afold(0) { (state,sl) => - sl match { - case x: ch1.read => state+1 - case x: select.timeout if (x == 100.milliseconds) => - select.exit(state+10) - } - } - ch1.awrite(1) - val x = Await.result(f, 10 seconds) - assert(x==11) - } - - test("timeout in select.once") { - import gopherApi._ - implicit val timeout = Timeout(100 milliseconds) - val ch1 = makeChannel[Int](10) - var x = 0 - val f = go { - for(s <- select.once) { - s match { - case y: ch1.read => info("ch1 readed") - x=1 - case y: select.timeout => - info("ch2 readed") - x=10 - } - } - } - Await.ready(f, 10 seconds) - assert(x==10) - - } - - lazy val gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/channels/UnbufferedSelectSuite.scala b/src/test/scala/gopher/channels/UnbufferedSelectSuite.scala deleted file mode 100644 index f5c65c08..00000000 --- a/src/test/scala/gopher/channels/UnbufferedSelectSuite.scala +++ /dev/null @@ -1,88 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.channels._ -import gopher.tags._ - - -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ - -import org.scalatest._ -import org.scalatest.concurrent._ - -class UnbufferedSelectSuite extends FunSuite with Waiters -{ - - import scala.concurrent.ExecutionContext.Implicits.global - - - test("write without read must block ") { - import gopherApi._ - for(i <- 0 until 100) { - val channel1 = makeChannel[Int](0) - val w1 = channel1.awrite(1) - - assert(!w1.isCompleted) - - val r1 = channel1.aread - - Await.ready(w1, 10 seconds) - Await.ready(r1, 10 seconds) - - assert(w1.isCompleted) - assert(r1.isCompleted) - - val rd = Await.result(r1, 10 seconds) - assert(rd==1) - } - - } - - test("fold over selector with one-direction flow") { - import gopherApi._ - for(i <- 1 to 100) { - val ch = makeChannel[Int](0) - val quit = Promise[Boolean]() - val r = select.afold(0){ (x,s) => - s match { - case a:ch.read => x+a - case q:Boolean if (q==quit.future.read) => CurrentFlowTermination.exit(x) - } - } - ch.awriteAll(1 to 10) onComplete { _ => quit success true } - val sum = Await.result(r, 3 second) - assert(sum==(1 to 10).sum) - } - } - - test("append for finite unbuffered stream") { - val w = new Waiter - val ch1 = gopherApi.makeChannel[Int](0) - val ch2 = gopherApi.makeChannel[Int](0) - val appended = ch1 append ch2 - var sum = 0 - var prev = 0 - var monotonic = true - val f = go { for(s <- appended) { - // bug in compiler 2.11.7 - //w{assert(prev < s)} - //if (prev >= s) w{assert(false)} - if (prev >= s) monotonic=false - prev = s - sum += s - } } - val a1 = ch1.awriteAll(1 to 10) - val a2 = ch2.awriteAll((1 to 10)map(_*100)) - // it works, but for buffered channeld onComplete can be scheduled before. So, <= instead == - a1.onComplete{ case _ => { w{assert(sum == 55)}; ch1.close(); w.dismiss() } } - a2.onComplete{ case _ => { w{assert(sum == 5555)}; w{assert(monotonic)}; w.dismiss() } } - w.await(timeout(10 seconds), dismissals(2)) - assert(sum==5555) - assert(monotonic) - } - - lazy val gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/hofasyn/FibbonacyAsyncLoopSuite.scala b/src/test/scala/gopher/hofasyn/FibbonacyAsyncLoopSuite.scala deleted file mode 100644 index 6477158d..00000000 --- a/src/test/scala/gopher/hofasyn/FibbonacyAsyncLoopSuite.scala +++ /dev/null @@ -1,66 +0,0 @@ -package gopher.hofasyn - -import gopher._ -import gopher.channels._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.async.Async._ - -import org.scalatest._ -import gopher.tags._ - - -/* -* code from go tutorial: http://tour.golang.org/#66 -* -*/ -object FibonaccyL { - - import scala.concurrent.ExecutionContext.Implicits.global - - def fibonacci(c: Output[Long], quit: Input[Int]): Future[Unit] = - go { - var (x,y) = (0L,1L) - for(s <- gopherApi.select.forever) { - s match { - case z: c.write if (z == x) => - x = y - y = z+y - case q: quit.read => - implicitly[FlowTermination[Unit]].doExit(()) - } - } - } - - def run(n:Int, acceptor: Long => Unit ): Future[Unit] = - { - val c = gopherApi.makeChannel[Long](1); - val quit = gopherApi.makeChannel[Int](1); - go { - for(i <-1 to n) { - val xLLFind = c.read - //Console.println(s"received: ${i}, ${xLLFind}") - acceptor(xLLFind) - } - //System.err.println("sending quit") - quit <~ 0 - } - - fibonacci(c,quit) - } - - def gopherApi = CommonTestObjects.gopherApi - -} - -class FibonaccyAsyncLoopSuite extends FunSuite -{ - - test("fibonaccy must be processed up to 50") { - @volatile var last:Long = 0; - Await.ready( FibonaccyL.run(50, last = _ ), 10 seconds ) - assert(last != 0) - } - -} diff --git a/src/test/scala/gopher/hofasyn/HofAsyncSuite.scala b/src/test/scala/gopher/hofasyn/HofAsyncSuite.scala deleted file mode 100644 index a4ebdcf3..00000000 --- a/src/test/scala/gopher/hofasyn/HofAsyncSuite.scala +++ /dev/null @@ -1,145 +0,0 @@ -package gopher.channels - -import gopher._ -import gopher.channels._ -import gopher.tags._ - -import org.scalatest._ - -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ - -class HofAsyncSuite extends FunSuite -{ - - import scala.concurrent.ExecutionContext.Implicits.global - - - test("select emulation with macroses") { - - val channel = gopherApi.makeChannel[Int](100) - - go { - for( i <- 1 to 1000) - channel <~ i - } - - var sum = 0; - val consumer = go { - for(s <- gopherApi.select.forever) { - s match { - case i: channel.read => - //System.err.println("received:"+i) - sum = sum + i - if (i==1000) - implicitly[FlowTermination[Unit]].doExit(()) - } - } - sum - } - - Await.ready(consumer, 5.second) - - val xsum = (1 to 1000).sum - assert(xsum == sum) - - } - - - test("test async operations inside map") { - val channel = gopherApi.makeChannel[Int](100) - channel.awriteAll(1 to 100) - val fresult = go{ - for(i <- 1 to 100) yield channel.read - } - val result = Await.result(fresult, 5.second) - assert(result(0)==1) - assert(result(1)==2) - assert(result(99)==100) - } - - - test("write to channel in options.foreach") { - val channel = gopherApi.makeChannel[Int](100) - val optChannel = Some(channel) - val f1 = go { - optChannel.foreach{ _.write(1) } - } - val f2 = go { - optChannel.map{ _.read } - } - val r2 = Await.result(f2, 5.second) - assert(r2.isDefined) - assert(r2 === Some(1) ) - } - - test("nested option foreach") { - val a:Option[Int] = Some(1) - val b:Option[Int] = Some(3) - val channel = gopherApi.makeChannel[Int](10) - val fin = go { - for (xa <- a; - xb <- b) channel.write(xa+xb) - } - val fout = channel.aread - val r = Await.result(fout, 5.second) - assert(r == 4) - } - - test("option flatMap") { - val channel = gopherApi.makeChannel[Int](10) - val map = Map(1->Map(2->channel)) - val fout = go { - for (x <- map.get(1); - ch <- x.get(2)) yield ch.read - } - val fin = channel.awrite(1) - val r = Await.result(fout, 5.second) - assert(r == Some(1)) - } - - test("channels foreach ") { - val channels = gopherApi.makeChannel[Channel[Int]](10) - val fin = go { - for(ch <- channels) { - ch.awrite(1) - } - } - val ch = gopherApi.makeChannel[Int](10) - channels.awrite(ch) - val fout = ch.aread - val r = Await.result(fout, 5.second) - assert(r == 1) - } - - test("lift inside select") { - import gopherApi._ - val ch1 = makeChannel[Int](10) - val ch2 = makeChannel[Int](10) - val quit = makeChannel[Boolean]() - val fin = go{ - for(s <- select.forever) - s match { - case x: ch1.read => - //System.err.println(s"received $x") - for(i <- 1 to x) { - //System.err.println(s"writing ${i*x}") - ch2.write(i*x) - } - case y: select.timeout if (y==(500.milliseconds)) => - System.err.println(s"timeout $y") - case z: quit.read => - select.exit(()) - } - } - val fout = ch1.awrite(2) - val x1 = ch2.aread - val rx1 = Await.result(x1, 1 minute) - quit.awrite(true) - assert(rx1 == 2) - } - - lazy val gopherApi = CommonTestObjects.gopherApi - -} diff --git a/src/test/scala/gopher/internal/FoldParseSuite.scala b/src/test/scala/gopher/internal/FoldParseSuite.scala deleted file mode 100644 index 26d38409..00000000 --- a/src/test/scala/gopher/internal/FoldParseSuite.scala +++ /dev/null @@ -1,85 +0,0 @@ -package gopher.internal - -import gopher._ -import gopher.channels._ -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ - -import org.scalatest._ -import gopher.tags._ - - -object FoldData { - - import scala.concurrent.ExecutionContext.Implicits.global - - def foldWithCase(c: Output[Long], quit: Input[Int]): Future[(Long,Long,Long)] = - gopherApi.select.afold((0L,1L,2L)) { - case ((x,y,z), s) => s match { - case v: c.write if (v==x) => - (y,z,y+z) - case q: quit.read => - CurrentFlowTermination.exit((x,y,z)) - } - } - - def foldWithCaseWithoutGuard(c: Output[Long], quit: Input[Int]): Future[(Long,Long,Long)] = - gopherApi.select.afold((0L,1L,2L)) { - case ((x,y,z), s) => s match { - case x: c.write => - (y,z,y+z) - case q: quit.read => - CurrentFlowTermination.exit((x,y,z)) - } - } - - - def foldWithoutCase(c: Output[Long], quit: Input[Int]): Future[Long] = - gopherApi.select.afold(1L) { (x,s) => - s match { - case v: c.write if (v==x) => x+1 - case q: quit.read => CurrentFlowTermination.exit(x) - } - } - - - def run1(n:Int, acceptor: Long => Unit ): Future[(Long,Long,Long)] = - { - val c = gopherApi.makeChannel[Long](1); - val quit = gopherApi.makeChannel[Int](1); - val r = go { - // for loop in go with async insied yet not supported - var i = 1 - while(i <= n) { - val x: Long = (c ?) - //Console.println(s"received: ${i}, ${x}") - acceptor(x) - i += 1 - } - quit <~ 0 - } - - foldWithCase(c,quit) - - - } - - - def gopherApi = CommonTestObjects.gopherApi - -} - -class FoldParseSuite extends FunSuite -{ - - test("fold must be parsed") { - @volatile var last:Long = 0; - Await.ready( FoldData.run1(50, last = _ ), 10 seconds ) - assert(last != 0) - } - - test("case var must shadow text") { - } - -} diff --git a/src/test/scala/gopher/scope/DefersSuite.scala b/src/test/scala/gopher/scope/DefersSuite.scala deleted file mode 100644 index af956d5c..00000000 --- a/src/test/scala/gopher/scope/DefersSuite.scala +++ /dev/null @@ -1,85 +0,0 @@ -package gopher.scope - -import scala.annotation.tailrec -import scala.util._ -import scala.reflect.runtime.universe.{Try => _, _} -import scala.io._ -import gopher._ - -import org.scalatest._ - -trait Source -{ - def name(): String - def lines(): Iterator[String] - def close(): Unit -} - -object TestParser -{ - - def parseCsv(source: Source): Either[String, Seq[Seq[Double]]] = - withDefer[Either[String,Seq[Seq[Double]]]]{ d => - d.defer{ - if (!d.recover { - case ex: Throwable => Left(ex.getMessage) - }) - source.close() - } - val retval:Either[String,Seq[Seq[Double]]] = Right{ - for( (line, nLine) <- source.lines.toList zip Stream.from(1) ) yield withDefer[Seq[Double]] { d => - line.split(",") map { s=> - d.defer{ - d.recover{ - case ex: NumberFormatException => - throw new RuntimeException(s"parse error in line ${nLine} file ${source.name} ") - } - } - s.toDouble - } - }.toSeq - } - retval - } - -} - - -class DefersSuite extends FunSuite -{ - - test("Defers.parseCsv: reading from unexistent file will return failure with FileNotFoundException") { - - val s = new Source { - def name()="unexistent.txt" - def lines()=source.getLines - def close()=source.close() - lazy val source = scala.io.Source.fromFile(name) - } - TestParser.parseCsv(s) match { - case Right(x) => assert(false,"unexistent source parsed") - case Left(s) => assert(s.contains("file")) - } - - } - - test("Defers.parseCsv: error in second string must be discovered") { - val s = new Source { - def name()="internal" - def lines()=Seq( - "1,3,4,5,6.0,8,9", - "3,4,5,6,xxxx7.0,8,9" - ).iterator - def close(): Unit = {} - } - - TestParser.parseCsv(s) match { - case Right(x) => assert(false,"source with error parsed") - case Left(s) => assert(s.contains("2")) - } - - } - - -} - diff --git a/src/test/scala/gopher/scope/GoWithDeferSuite.scala b/src/test/scala/gopher/scope/GoWithDeferSuite.scala deleted file mode 100644 index e5d048f4..00000000 --- a/src/test/scala/gopher/scope/GoWithDeferSuite.scala +++ /dev/null @@ -1,50 +0,0 @@ -package gopher.scope - - -import org.scalatest.FunSuite -import gopher._ -import gopher.tags._ - -import scala.language._ -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.concurrent.ExecutionContext.Implicits.global - -class GoWithDeferSuite extends FunSuite { - - - test("2.1. goWithDefer: simple statement with defer must be processed") { - @volatile var x = 0 - val f = go { - defer{ x = 2 } - x = 1 - } - Await.ready(f, 1 second) - assert(x === 2) - } - - test("2.2. typed go with recover") { - var x = 0 - val s = go{ defer{ recover{case ex: Throwable => "CCC"} } ; throw new RuntimeException("AA-go"); "4" } - Await.ready(s, 1 second) - assert(Await.result(s, 1 second)=="CCC") - } - - test("2.2. go with defer and while") { - var x = 0; - var f:Future[Unit] = go { - defer{ x=3; } - var n=4; - while(n > 0) { - n = n-1; - } - } - Await.ready(f, 1 second) - assert(x === 3) - } - - // TODO: go with select. - -} - - diff --git a/src/test/scala/gopher/scope/ScopeMacroSuite.scala b/src/test/scala/gopher/scope/ScopeMacroSuite.scala deleted file mode 100644 index c35f43fb..00000000 --- a/src/test/scala/gopher/scope/ScopeMacroSuite.scala +++ /dev/null @@ -1,71 +0,0 @@ -package gopher.scope - - -import org.scalatest.FunSuite -import gopher._ -import gopher.tags._ - - - -class ScopeMacroSuite extends FunSuite { - - - test("1. goScope: simple statement with defer must be processed") { - var x = 0 - goScope { - defer{ x = 2 } - x = 1 - } - assert(x === 2) - } - - test("2. typed goScope") { - var x = 0 - val s = goScope{ defer{ recover{case ex: Throwable => "CCC"} } ; throw new RuntimeException("AA"); "4" } - assert(s=="CCC") - } - - test("3. defered code must be called when non-local return") { - - var deferCalled = false - - def testFun(): Int = - goScope { - defer{ - deferCalled=true - } - if (true) { - return 34; - } - 42 - } - - val q = testFun() - - assert(q==34) - assert(deferCalled) - - } - - test("4. non-local return must not be catched in recover") { - - var thCatched = false - - def testFun(): Int = - goScope { - defer{ recover{ case th: Throwable => { thCatched=true; 5 } } } - if (true) { - return 10; - } - 11 - } - - val q = testFun() - assert(q==10) - assert(!thCatched) - - } - -} - - diff --git a/src/test/scala/gopher/scope/SimpleStatementSuite.scala b/src/test/scala/gopher/scope/SimpleStatementSuite.scala deleted file mode 100644 index 627d4ce3..00000000 --- a/src/test/scala/gopher/scope/SimpleStatementSuite.scala +++ /dev/null @@ -1,57 +0,0 @@ -package gopher.scope - -import org.scalatest.FunSuite -import gopher._ -import gopher.tags._ - - -class SimpleStatementSuite extends FunSuite -{ - - test("withDefer: simple statement without defer must be processed") { - var x = 0 - withDefer[Unit] { d=> - x = 1 - } - assert(x === 1) - } - - test("withDefers: simple statement with defer must be processed") { - var x = 0 - withDefer[Unit] { d => - d.defer{ x = 2 } - x = 1 - } - assert(x === 2) - } - - test("withDefers: simple statement with panic must be processed") { - var x = 0 - withDefer[Unit] { d=> - d.defer { - d.recover{ - case ex: Throwable => - x=1 - } - } - if (x==0) { - throw new IllegalStateException("x==0"); - } - } - assert(x==1) - } - - test("withDefers: recover must resturn value") { - val x = withDefer[Int] { d=> - val retval = 3; - d.defer { d.recover { - case ex: IllegalStateException => 5 - }} - throw new IllegalStateException("Be-Be-Be") - retval - } - assert(x==5) - } - -} - diff --git a/src/test/scala/gopher/tags/Gen.scala b/src/test/scala/gopher/tags/Gen.scala deleted file mode 100644 index 2eb9692d..00000000 --- a/src/test/scala/gopher/tags/Gen.scala +++ /dev/null @@ -1,5 +0,0 @@ -package gopher.tags - -import org.scalatest._ - -object Gen extends Tag("Gen") diff --git a/src/test/scala/gopher/tags/Now.scala b/src/test/scala/gopher/tags/Now.scala deleted file mode 100644 index 14250192..00000000 --- a/src/test/scala/gopher/tags/Now.scala +++ /dev/null @@ -1,6 +0,0 @@ -package gopher.tags - -import org.scalatest.Tag - -object Now extends Tag("Now") - diff --git a/src/test/scala/gopher/transputers/ReplicateSuite.scala b/src/test/scala/gopher/transputers/ReplicateSuite.scala deleted file mode 100644 index e2cfcaf0..00000000 --- a/src/test/scala/gopher/transputers/ReplicateSuite.scala +++ /dev/null @@ -1,182 +0,0 @@ -package gopher.transputers - -import scala.language._ -import gopher._ -import gopher.channels._ -import gopher.util._ -import gopher.tags._ -import org.scalatest._ -import scala.concurrent._ -import scala.concurrent.duration._ -import akka.actor._ - -sealed trait ControlMessage -case class SetMaxWords(n:Int) extends ControlMessage -case class SetMaxUsers(n:Int) extends ControlMessage -case class SendTopWords(userId: Long, nWords:Int) extends ControlMessage -case object Clear extends ControlMessage -case object Stop extends ControlMessage - -sealed trait OverflowMessage -case object UsersOverflow extends OverflowMessage -case class WordsOverflow(userId: Long) extends OverflowMessage - -trait WordCountTestTransputer extends SelectTransputer -{ - - val inS = InPort[(Long,String)]() - val control = InPort[ControlMessage]() - - val topOut = OutPort[(Long,Seq[(String,Int)])]() - val overflows = OutPort[OverflowMessage]() - - var data = Map[Long,Map[String,Int]]() - var maxWords : Int = 100 - var maxUsers : Int = 100 - - loop { - case x : inS.read @unchecked => - val (id, word) = x - val nWords = updateData(id, word ) - if (data.size > maxUsers) { - overflows.write(UsersOverflow) - } - if (nWords > maxWords) { - overflows.write(WordsOverflow(id)) - } - case c: control.read => - c match { - case SetMaxWords(n) => maxWords=n - case SetMaxUsers(n) => maxUsers=n - case SendTopWords(userId, nWords) => - topOut.write((userId,topNWords(userId, nWords))) - case Clear => data = Map() - case Stop => stop() - } - - } - - def updateData(userId: Long, word: String): Int = - data.get(userId) match { - case Some(m) => val newM = updateWordCount(m,word) - data = data.updated(userId, newM) - newM.size - case None => data = data.updated(userId,Map(word -> 1)) - 1 - } - - def updateWordCount(m:Map[String,Int],w:String): Map[String,Int] = - m.updated(w, - m.get(w) match { - case Some(n) => n+1 - case None => 1 - } - ) - - def topNWords(userId:Long, nWords: Int): Seq[(String,Int)] = - data.get(userId) match { - case Some(m) => m.toSeq.sortBy{ case (w1,n1) => -n1 }.take(nWords) - case None => List() - } - -} - -trait TestDupper extends SelectTransputer with TransputerLogging -{ - - val in = InPort[Int]() - - val out = OutPort[Int]() - - @volatile var nProcessedMessages = 0 - - loop { - case x: in.read => - log.info(s"testDupper, replica: ${replica} received ${x} from ${in}") - // TODO: implement gopherApi.time.wait - Thread.sleep(1000) - out.write(x) - nProcessedMessages += 1 - } - -} - - - -class ReplicateSuite extends FunSuite -{ - - test(" define replication of TestDupper with port adapters") { - val r = gopherApi.replicate[TestDupper](10) - import PortAdapters._ - ( r.in.distribute( (_ % 37 ) ). - out.share() - ) - val inChannel = gopherApi.makeChannel[Int](10); - val outChannel = gopherApi.makeChannel[Int](10); - r.in.connect(inChannel) - r.out.connect(outChannel) - val f0 = r.start() - import scala.concurrent.ExecutionContext.Implicits.global - var r1=0 - var r2=0 - val beforeF1 = System.currentTimeMillis - val f1 = go{ - inChannel.write(1) - inChannel.write(2) - r1 = outChannel.read - r2 = outChannel.read - } - Await.ready(f1, 10 seconds) - assert(f1.isCompleted) - assert(r.replicated.map(_.nProcessedMessages).sum == 2) - assert(r.replicated.forall(x => x.nProcessedMessages == 0 || x.nProcessedMessages == 1)) - r.stop() - } - - - test("WordCount must be replicated and accessbke via *! ports side") { - //pending - import PortAdapters._ - val nReplics = 2 - val t = gopherApi.replicate[WordCountTestTransputer](nReplics).inS.distribute{ case(id,w) => id.toInt }.control.duplicate() - val ft = t.start() - val topIn: Input[(Long,Seq[(String,Int)])] = t.topOut.*! - @volatile var nReceived = 0 - val of = gopherApi.select.forever { - case x: topIn.read @ unchecked => - //Console.println("received:"+x) - nReceived = nReceived + 1 - if (nReceived == nReplics) { - implicitly[FlowTermination[Unit]].doExit(()) - } - case o: OverflowMessage if (o==(t.overflows*!).read) => - Console.println("overflow received:"+o) - } - val outS: Output[(Long,String)] = t.inS.*! - // stack overflow in compiler. [2.11.4] - //val fw = go { - // outS.writeAll( "Some nontrivial sentence with more than one word".split(" ").toList map ((11L,_)) ) - //} - import scala.concurrent.ExecutionContext.Implicits.global - val fw = outS.awriteAll( - "Some nontrivial sentence with more than one word".split(" ").toList map ((1L,_)) - ) flatMap ( _ => - outS.awriteAll( "And in next text word 'word' will be one of top words".split(" ").toList map ((1L,_)) - ) flatMap ( _ => - outS.awriteAll( "One image is worse than thousand words".split(" ").toList map ((1L,_)) - ) ) ) flatMap { _ => - t.control.*! awrite SendTopWords(1L, 3) - } - Await.ready(fw, 10 seconds) - t.stop() - Await.ready(ft, 10 seconds) - Await.ready(of, 10 seconds) - assert(nReceived == nReplics) - } - - - def gopherApi = CommonTestObjects.gopherApi - -} - diff --git a/src/test/scala/gopher/transputers/TransputerRestartSuite.scala b/src/test/scala/gopher/transputers/TransputerRestartSuite.scala deleted file mode 100644 index 7d265b43..00000000 --- a/src/test/scala/gopher/transputers/TransputerRestartSuite.scala +++ /dev/null @@ -1,154 +0,0 @@ -package gopher.transputers - -import scala.language._ -import gopher._ -import gopher.channels._ -import gopher.tags._ -import org.scalatest._ -import scala.concurrent._ -import scala.concurrent.duration._ -import akka.actor._ - -class MyException extends RuntimeException("AAA") - -trait BingoWithRecover extends SelectTransputer with TransputerLogging -{ - - val inX = InPort[Int]() - val inY = InPort[Int]() - val out = OutPort[Boolean]() - val fin = OutPort[Boolean]() - - var exReaction: SupervisorStrategy.Directive = SupervisorStrategy.Restart - var throwAlways: Boolean = false - - override def copyState(prev: Transputer):Unit = - { - val bingoPrev = prev.asInstanceOf[BingoWithRecover] - exReaction = bingoPrev.exReaction - throwAlways = bingoPrev.throwAlways - } - - - recover { - case ex: ChannelClosedException => - SupervisorStrategy.Stop - case ex: MyException => - SupervisorStrategy.Restart - } - - loop { - case x: inX.read => - val y = inY.read - //Console.println(s"Bingo checker, received ${x}, ${y} ") - out.write(x==y) - if (x==2) { - throw new MyException() - } else if (x > 2 && throwAlways) { - throw new MyException() - } - if (x==100) { - fin.write(true) - } - } - -} - - -trait Acceptor1 extends SelectTransputer -{ - - val inA = InPort[Boolean]() - - var nBingos = 0 - var nPairs = 0 - - loop { - case x: inA.read => - //Console.println(s"acceptor: ${nPairs} ${nBingos} ${x}") - if (x) { - nBingos += 1 - } - nPairs += 1 - } - -} - -class TransputerRestartSuite extends FunSuite -{ - - test("bingo restore with the same connectons") { - val inX = gopherApi.iterableInput(1 to 100) - val inY = gopherApi.iterableInput(1 to 100) - val bingo = gopherApi.makeTransputer[BingoWithRecover] - bingo.exReaction = SupervisorStrategy.Restart - val acceptor = gopherApi.makeTransputer[Acceptor1] - val fin = gopherApi.makeChannel[Boolean]() - bingo.inX connect inX - bingo.inY connect inY - bingo.out >~~> acceptor.inA - bingo.fin connect fin - (bingo + acceptor).start() - val w = fin.aread - Await.ready(w,10 seconds) - assert(acceptor.nBingos == acceptor.nPairs) - } - - test("bingo resume") { - val inX = gopherApi.iterableInput(1 to 100) - val inY = gopherApi.iterableInput(1 to 100) - val bingo = gopherApi.makeTransputer[BingoWithRecover] - bingo.exReaction = SupervisorStrategy.Resume - val acceptor = gopherApi.makeTransputer[Acceptor1] - val fin = gopherApi.makeChannel[Boolean]() - bingo.inX connect inX - bingo.inY connect inY - bingo.out >~~> acceptor.inA - bingo.fin connect fin - (bingo + acceptor).start() - val w = fin.aread - Await.ready(w,10 seconds) - assert(acceptor.nBingos == acceptor.nPairs) - } - - test("bingo - too many failures with restart") { - val inX = gopherApi.iterableInput(1 to 100) - val inY = gopherApi.iterableInput(1 to 100) - val bingo = gopherApi.makeTransputer[BingoWithRecover] - bingo.exReaction = SupervisorStrategy.Restart - bingo.throwAlways = true - val acceptor = gopherApi.makeTransputer[Acceptor1] - val fin = gopherApi.makeChannel[Boolean]() - bingo.inX connect inX - bingo.inY connect inY - bingo.out >~~> acceptor.inA - bingo.fin connect fin - val w = (bingo + acceptor).start() - intercept[Transputer.TooManyFailures] { - Await.result(w,10 seconds) - } - } - - test("bingo - too many failures with resume") { - val inX = gopherApi.iterableInput(1 to 100) - val inY = gopherApi.iterableInput(1 to 100) - val bingo = gopherApi.makeTransputer[BingoWithRecover] - bingo.exReaction = SupervisorStrategy.Resume - bingo.throwAlways = true - val acceptor = gopherApi.makeTransputer[Acceptor1] - val fin = gopherApi.makeChannel[Boolean]() - bingo.inX connect inX - bingo.inY connect inY - bingo.out >~~> acceptor.inA - bingo.fin connect fin - val w = (bingo + acceptor).start() - intercept[Transputer.TooManyFailures] { - Await.result(w,10 seconds) - } - } - - - def gopherApi = CommonTestObjects.gopherApi - -} -